Skip to content

Instantly share code, notes, and snippets.

@tklengyel
Last active July 18, 2019 18:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tklengyel/c5cac14a0d57f119dd7747a1be6fb260 to your computer and use it in GitHub Desktop.
Save tklengyel/c5cac14a0d57f119dd7747a1be6fb260 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
diff --git a/xen/arch/arm/acpi/boot.c b/xen/arch/arm/acpi/boot.c
index 9b29769a10..83fe847d67 100644
--- a/xen/arch/arm/acpi/boot.c
+++ b/xen/arch/arm/acpi/boot.c
@@ -128,7 +128,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *processor =
- container_of(header, struct acpi_madt_generic_interrupt, header);
+ container_of(header, struct acpi_madt_generic_interrupt, header);
if ( BAD_MADT_ENTRY(processor, end) )
return -EINVAL;
@@ -149,7 +149,7 @@ void __init acpi_smp_init_cpus(void)
* we need for SMP init
*/
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
- acpi_parse_gic_cpu_interface, 0);
+ acpi_parse_gic_cpu_interface, 0);
if ( count <= 0 )
{
@@ -185,7 +185,7 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
return 0;
printk("Unsupported FADT revision %d.%d, should be 6.0+, will disable ACPI\n",
- table->revision, fadt->minor_revision);
+ table->revision, fadt->minor_revision);
return -EINVAL;
}
@@ -248,7 +248,7 @@ int __init acpi_boot_table_init(void)
*/
if ( param_acpi_off || ( !param_acpi_force
&& device_tree_for_each_node(device_tree_flattened,
- dt_scan_depth1_nodes, NULL)))
+ dt_scan_depth1_nodes, NULL)))
goto disable;
/*
diff --git a/xen/arch/arm/acpi/domain_build.c b/xen/arch/arm/acpi/domain_build.c
index 1b1cfabb00..25a9fde32d 100644
--- a/xen/arch/arm/acpi/domain_build.c
+++ b/xen/arch/arm/acpi/domain_build.c
@@ -67,7 +67,7 @@ static int __init acpi_route_spis(struct domain *d)
* Route the IRQ to hardware domain and permit the access.
* The interrupt type will be set by set by the hardware domain.
*/
- for( i = NR_LOCAL_IRQS; i < vgic_num_irqs(d); i++ )
+ for ( i = NR_LOCAL_IRQS; i < vgic_num_irqs(d); i++ )
{
/*
* TODO: Exclude the SPIs SMMU uses which should not be routed to
@@ -176,7 +176,7 @@ static int __init create_acpi_dtb(struct kernel_info *kinfo,
return 0;
- err:
+err:
printk("Device tree generation failed (%d).\n", ret);
xfree(kinfo->fdt);
return -EINVAL;
@@ -189,7 +189,7 @@ static void __init acpi_map_other_tables(struct domain *d)
u64 addr, size;
/* Map all ACPI tables to Dom0 using 1:1 mappings. */
- for( i = 0; i < acpi_gbl_root_table_list.count; i++ )
+ for ( i = 0; i < acpi_gbl_root_table_list.count; i++ )
{
addr = acpi_gbl_root_table_list.tables[i].address;
size = acpi_gbl_root_table_list.tables[i].length;
@@ -200,9 +200,9 @@ static void __init acpi_map_other_tables(struct domain *d)
p2m_mmio_direct_c);
if ( res )
{
- panic(XENLOG_ERR "Unable to map ACPI region 0x%"PRIx64
- " - 0x%"PRIx64" in domain\n",
- addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1);
+ panic(XENLOG_ERR "Unable to map ACPI region 0x%"PRIx64
+ " - 0x%"PRIx64" in domain\n",
+ addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1);
}
}
}
@@ -249,7 +249,7 @@ static void __init acpi_xsdt_modify_entry(u64 entry[],
struct acpi_table_header *table;
u64 size = sizeof(struct acpi_table_header);
- for( i = 0; i < entry_count; i++ )
+ for ( i = 0; i < entry_count; i++ )
{
table = acpi_os_map_memory(entry[i], size);
if ( ACPI_COMPARE_NAME(table->signature, signature) )
@@ -381,7 +381,7 @@ static int __init acpi_create_madt(struct domain *d, struct membank tbl_add[])
}
gicd = container_of(header, struct acpi_madt_generic_distributor, header);
memcpy(base_ptr + table_size, gicd,
- sizeof(struct acpi_madt_generic_distributor));
+ sizeof(struct acpi_madt_generic_distributor));
table_size += sizeof(struct acpi_madt_generic_distributor);
/* Add other subtables. */
diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c
index 52ed7edf69..eaae705ae3 100644
--- a/xen/arch/arm/alternative.c
+++ b/xen/arch/arm/alternative.c
@@ -40,7 +40,8 @@
extern const struct alt_instr __alt_instructions[], __alt_instructions_end[];
-struct alt_region {
+struct alt_region
+{
const struct alt_instr *begin;
const struct alt_instr *end;
};
@@ -229,16 +230,18 @@ void __init apply_alternatives_all(void)
ASSERT(system_state != SYS_STATE_active);
- /* better not try code patching on a live SMP system */
+ /* better not try code patching on a live SMP system */
ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS);
/* stop_machine_run should never fail at this stage of the boot */
BUG_ON(ret);
}
-int apply_alternatives(const struct alt_instr *start, const struct alt_instr *end)
+int apply_alternatives(const struct alt_instr *start,
+ const struct alt_instr *end)
{
- const struct alt_region region = {
+ const struct alt_region region =
+ {
.begin = start,
.end = end,
};
diff --git a/xen/arch/arm/arm32/asm-offsets.c b/xen/arch/arm/arm32/asm-offsets.c
index 2116ba5b95..f98124bf24 100644
--- a/xen/arch/arm/arm32/asm-offsets.c
+++ b/xen/arch/arm/arm32/asm-offsets.c
@@ -22,56 +22,56 @@
void __dummy__(void)
{
- OFFSET(UREGS_sp, struct cpu_user_regs, sp);
- OFFSET(UREGS_lr, struct cpu_user_regs, lr);
- OFFSET(UREGS_pc, struct cpu_user_regs, pc);
- OFFSET(UREGS_cpsr, struct cpu_user_regs, cpsr);
- OFFSET(UREGS_hsr, struct cpu_user_regs, hsr);
+ OFFSET(UREGS_sp, struct cpu_user_regs, sp);
+ OFFSET(UREGS_lr, struct cpu_user_regs, lr);
+ OFFSET(UREGS_pc, struct cpu_user_regs, pc);
+ OFFSET(UREGS_cpsr, struct cpu_user_regs, cpsr);
+ OFFSET(UREGS_hsr, struct cpu_user_regs, hsr);
- OFFSET(UREGS_LR_usr, struct cpu_user_regs, lr_usr);
- OFFSET(UREGS_SP_usr, struct cpu_user_regs, sp_usr);
+ OFFSET(UREGS_LR_usr, struct cpu_user_regs, lr_usr);
+ OFFSET(UREGS_SP_usr, struct cpu_user_regs, sp_usr);
- OFFSET(UREGS_SP_svc, struct cpu_user_regs, sp_svc);
- OFFSET(UREGS_LR_svc, struct cpu_user_regs, lr_svc);
- OFFSET(UREGS_SPSR_svc, struct cpu_user_regs, spsr_svc);
+ OFFSET(UREGS_SP_svc, struct cpu_user_regs, sp_svc);
+ OFFSET(UREGS_LR_svc, struct cpu_user_regs, lr_svc);
+ OFFSET(UREGS_SPSR_svc, struct cpu_user_regs, spsr_svc);
- OFFSET(UREGS_SP_abt, struct cpu_user_regs, sp_abt);
- OFFSET(UREGS_LR_abt, struct cpu_user_regs, lr_abt);
- OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt);
+ OFFSET(UREGS_SP_abt, struct cpu_user_regs, sp_abt);
+ OFFSET(UREGS_LR_abt, struct cpu_user_regs, lr_abt);
+ OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt);
- OFFSET(UREGS_SP_und, struct cpu_user_regs, sp_und);
- OFFSET(UREGS_LR_und, struct cpu_user_regs, lr_und);
- OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und);
+ OFFSET(UREGS_SP_und, struct cpu_user_regs, sp_und);
+ OFFSET(UREGS_LR_und, struct cpu_user_regs, lr_und);
+ OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und);
- OFFSET(UREGS_SP_irq, struct cpu_user_regs, sp_irq);
- OFFSET(UREGS_LR_irq, struct cpu_user_regs, lr_irq);
- OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq);
+ OFFSET(UREGS_SP_irq, struct cpu_user_regs, sp_irq);
+ OFFSET(UREGS_LR_irq, struct cpu_user_regs, lr_irq);
+ OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq);
- OFFSET(UREGS_SP_fiq, struct cpu_user_regs, sp_fiq);
- OFFSET(UREGS_LR_fiq, struct cpu_user_regs, lr_fiq);
- OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq);
+ OFFSET(UREGS_SP_fiq, struct cpu_user_regs, sp_fiq);
+ OFFSET(UREGS_LR_fiq, struct cpu_user_regs, lr_fiq);
+ OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq);
- OFFSET(UREGS_R8_fiq, struct cpu_user_regs, r8_fiq);
- OFFSET(UREGS_R9_fiq, struct cpu_user_regs, r9_fiq);
- OFFSET(UREGS_R10_fiq, struct cpu_user_regs, r10_fiq);
- OFFSET(UREGS_R11_fiq, struct cpu_user_regs, r11_fiq);
- OFFSET(UREGS_R12_fiq, struct cpu_user_regs, r12_fiq);
+ OFFSET(UREGS_R8_fiq, struct cpu_user_regs, r8_fiq);
+ OFFSET(UREGS_R9_fiq, struct cpu_user_regs, r9_fiq);
+ OFFSET(UREGS_R10_fiq, struct cpu_user_regs, r10_fiq);
+ OFFSET(UREGS_R11_fiq, struct cpu_user_regs, r11_fiq);
+ OFFSET(UREGS_R12_fiq, struct cpu_user_regs, r12_fiq);
- OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr);
- BLANK();
+ OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr);
+ BLANK();
- DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
+ DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
- OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
+ OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
- BLANK();
- DEFINE(PROCINFO_sizeof, sizeof(struct proc_info_list));
- OFFSET(PROCINFO_cpu_val, struct proc_info_list, cpu_val);
- OFFSET(PROCINFO_cpu_mask, struct proc_info_list, cpu_mask);
- OFFSET(PROCINFO_cpu_init, struct proc_info_list, cpu_init);
+ BLANK();
+ DEFINE(PROCINFO_sizeof, sizeof(struct proc_info_list));
+ OFFSET(PROCINFO_cpu_val, struct proc_info_list, cpu_val);
+ OFFSET(PROCINFO_cpu_mask, struct proc_info_list, cpu_mask);
+ OFFSET(PROCINFO_cpu_init, struct proc_info_list, cpu_init);
- BLANK();
- OFFSET(INITINFO_stack, struct init_info, stack);
+ BLANK();
+ OFFSET(INITINFO_stack, struct init_info, stack);
}
diff --git a/xen/arch/arm/arm32/domctl.c b/xen/arch/arm/arm32/domctl.c
index fbf9d3bddc..984025e69d 100644
--- a/xen/arch/arm/arm32/domctl.c
+++ b/xen/arch/arm/arm32/domctl.c
@@ -12,7 +12,7 @@
#include <public/domctl.h>
long subarch_do_domctl(struct xen_domctl *domctl, struct domain *d,
- XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
switch ( domctl->cmd )
{
diff --git a/xen/arch/arm/arm32/lib/assembler.h b/xen/arch/arm/arm32/lib/assembler.h
index 6de2638a36..966122a70f 100644
--- a/xen/arch/arm/arm32/lib/assembler.h
+++ b/xen/arch/arm/arm32/lib/assembler.h
@@ -87,78 +87,78 @@
* Enable and disable interrupts
*/
#if __LINUX_ARM_ARCH__ >= 6
- .macro disable_irq_notrace
- cpsid i
- .endm
+.macro disable_irq_notrace
+cpsid i
+.endm
- .macro enable_irq_notrace
- cpsie i
- .endm
+.macro enable_irq_notrace
+cpsie i
+.endm
#else
- .macro disable_irq_notrace
- msr cpsr_c, #PSR_I_BIT | SVC_MODE
- .endm
+.macro disable_irq_notrace
+msr cpsr_c, #PSR_I_BIT | SVC_MODE
+.endm
- .macro enable_irq_notrace
- msr cpsr_c, #SVC_MODE
- .endm
+.macro enable_irq_notrace
+msr cpsr_c, #SVC_MODE
+.endm
#endif
- .macro asm_trace_hardirqs_off
+.macro asm_trace_hardirqs_off
#if defined(CONFIG_TRACE_IRQFLAGS)
- stmdb sp!, {r0-r3, ip, lr}
- bl trace_hardirqs_off
- ldmia sp!, {r0-r3, ip, lr}
+stmdb sp!, {r0-r3, ip, lr}
+bl trace_hardirqs_off
+ldmia sp!, {r0-r3, ip, lr}
#endif
- .endm
+.endm
- .macro asm_trace_hardirqs_on_cond, cond
+.macro asm_trace_hardirqs_on_cond, cond
#if defined(CONFIG_TRACE_IRQFLAGS)
- /*
- * actually the registers should be pushed and pop'd conditionally, but
- * after bl the flags are certainly clobbered
- */
- stmdb sp!, {r0-r3, ip, lr}
- bl\cond trace_hardirqs_on
- ldmia sp!, {r0-r3, ip, lr}
+/*
+ * actually the registers should be pushed and pop'd conditionally, but
+ * after bl the flags are certainly clobbered
+ */
+stmdb sp!, {r0-r3, ip, lr}
+bl\cond trace_hardirqs_on
+ldmia sp!, {r0-r3, ip, lr}
#endif
- .endm
+.endm
- .macro asm_trace_hardirqs_on
- asm_trace_hardirqs_on_cond al
- .endm
+.macro asm_trace_hardirqs_on
+asm_trace_hardirqs_on_cond al
+.endm
- .macro disable_irq
- disable_irq_notrace
- asm_trace_hardirqs_off
- .endm
+.macro disable_irq
+disable_irq_notrace
+asm_trace_hardirqs_off
+.endm
- .macro enable_irq
- asm_trace_hardirqs_on
- enable_irq_notrace
- .endm
+.macro enable_irq
+asm_trace_hardirqs_on
+enable_irq_notrace
+.endm
/*
* Save the current IRQ state and disable IRQs. Note that this macro
* assumes FIQs are enabled, and that the processor is in SVC mode.
*/
- .macro save_and_disable_irqs, oldcpsr
- mrs \oldcpsr, cpsr
- disable_irq
- .endm
+.macro save_and_disable_irqs, oldcpsr
+mrs \oldcpsr, cpsr
+disable_irq
+.endm
/*
* Restore interrupt state previously stored in a register. We don't
* guarantee that this will preserve the flags.
*/
- .macro restore_irqs_notrace, oldcpsr
- msr cpsr_c, \oldcpsr
- .endm
+.macro restore_irqs_notrace, oldcpsr
+msr cpsr_c, \oldcpsr
+.endm
- .macro restore_irqs, oldcpsr
- tst \oldcpsr, #PSR_I_BIT
- asm_trace_hardirqs_on_cond eq
- restore_irqs_notrace \oldcpsr
- .endm
+.macro restore_irqs, oldcpsr
+tst \oldcpsr, #PSR_I_BIT
+asm_trace_hardirqs_on_cond eq
+restore_irqs_notrace \oldcpsr
+.endm
#define USER(x...) \
9999: x; \
@@ -198,47 +198,47 @@
/*
* Instruction barrier
*/
- .macro instr_sync
+.macro instr_sync
#if __LINUX_ARM_ARCH__ >= 7
- isb
+isb
#elif __LINUX_ARM_ARCH__ == 6
- mcr p15, 0, r0, c7, c5, 4
+mcr p15, 0, r0, c7, c5, 4
#endif
- .endm
+.endm
/*
* SMP data memory barrier
*/
- .macro smp_dmb mode
+.macro smp_dmb mode
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
- .ifeqs "\mode","arm"
- ALT_SMP(dmb)
- .else
- ALT_SMP(W(dmb))
- .endif
+.ifeqs "\mode", "arm"
+ALT_SMP(dmb)
+.else
+ALT_SMP(W(dmb))
+.endif
#elif __LINUX_ARM_ARCH__ == 6
- ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
+ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
#else
#error Incompatible SMP platform
#endif
- .ifeqs "\mode","arm"
- ALT_UP(nop)
- .else
- ALT_UP(W(nop))
- .endif
+.ifeqs "\mode", "arm"
+ALT_UP(nop)
+.else
+ALT_UP(W(nop))
+.endif
#endif
- .endm
+.endm
#ifdef CONFIG_THUMB2_KERNEL
- .macro setmode, mode, reg
- mov \reg, #\mode
- msr cpsr_c, \reg
- .endm
+.macro setmode, mode, reg
+mov \reg, #\mode
+msr cpsr_c, \reg
+.endm
#else
- .macro setmode, mode, reg
- msr cpsr_c, #\mode
- .endm
+.macro setmode, mode, reg
+msr cpsr_c, #\mode
+.endm
#endif
/*
@@ -246,80 +246,81 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
- .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
-9999:
- .if \inc == 1
- \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
- .elseif \inc == 4
- \instr\cond\()\t\().w \reg, [\ptr, #\off]
- .else
- .error "Unsupported inc macro argument"
- .endif
-
- .pushsection __ex_table,"a"
- .align 3
- .long 9999b, \abort
- .popsection
- .endm
-
- .macro usracc, instr, reg, ptr, inc, cond, rept, abort
- @ explicit IT instruction needed because of the label
- @ introduced by the USER macro
- .ifnc \cond,al
- .if \rept == 1
- itt \cond
- .elseif \rept == 2
- ittt \cond
- .else
- .error "Unsupported rept macro argument"
- .endif
- .endif
-
- @ Slightly optimised to avoid incrementing the pointer twice
- usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
- .if \rept == 2
- usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
- .endif
-
- add\cond \ptr, #\rept * \inc
- .endm
+.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
+ 9999:
+ .if \inc == 1
+ \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
+ .elseif \inc == 4
+ \instr\cond\()\t\().w \reg, [\ptr, #\off]
+ .else
+ .error "Unsupported inc macro argument"
+ .endif
+
+ .pushsection __ex_table, "a"
+ .align 3
+ .long 9999b, \abort
+ .popsection
+ .endm
+
+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort
+ @ explicit IT instruction needed because of the label
+ @ introduced by the USER macro
+ .ifnc \cond, al
+ .if \rept == 1
+ itt \cond
+ .elseif \rept == 2
+ ittt \cond
+ .else
+ .error "Unsupported rept macro argument"
+ .endif
+ .endif
+
+ @ Slightly optimised to avoid incrementing the pointer twice
+ usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
+ .if \rept == 2
+ usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
+ .endif
+
+ add\cond \ptr, #\rept* \inc
+ .endm
#else /* !CONFIG_THUMB2_KERNEL */
- .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
- .rept \rept
-9999:
- .if \inc == 1
- \instr\cond\()b\()\t \reg, [\ptr], #\inc
- .elseif \inc == 4
- \instr\cond\()\t \reg, [\ptr], #\inc
- .else
- .error "Unsupported inc macro argument"
- .endif
-
- .pushsection __ex_table,"a"
- .align 3
- .long 9999b, \abort
- .popsection
- .endr
- .endm
+.macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
+ .rept \rept
+ 9999:
+ .if \inc == 1
+ \instr\cond\()b\()\t \reg, [\ptr], #\inc
+ .elseif \inc == 4
+ \instr\cond\()\t \reg, [\ptr], #\inc
+ .else
+ .error "Unsupported inc macro argument"
+ .endif
+
+ .pushsection __ex_table, "a"
+ .align 3
+ .long 9999b, \abort
+ .popsection
+ .endr
+ .endm
#endif /* CONFIG_THUMB2_KERNEL */
- .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
- usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
- .endm
-
- .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
- usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
- .endm
-
-/* Utility macro for declaring string literals */
- .macro string name:req, string
- .type \name , #object
-\name:
- .asciz "\string"
- .size \name , . - \name
- .endm
+ .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
+ usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
+ usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+ /* Utility macro for declaring string literals */
+ .macro string name:
+ req, string
+ .type \name, #object
+ \name:
+ .asciz "\string"
+ .size \name, . - \name
+ .endm
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/xen/arch/arm/arm32/lib/bitops.c b/xen/arch/arm/arm32/lib/bitops.c
index 3dca769bf0..3b6fc576d0 100644
--- a/xen/arch/arm/arm32/lib/bitops.c
+++ b/xen/arch/arm/arm32/lib/bitops.c
@@ -136,11 +136,11 @@ static always_inline bool int_clear_mask16(uint16_t mask, volatile uint16_t *p,
do
{
asm volatile ("// int_clear_mask16\n"
- " ldrexh %2, %1\n"
- " bic %2, %2, %3\n"
- " strexh %0, %2, %1\n"
- : "=&r" (res), "+Qo" (*p), "=&r" (tmp)
- : "r" (mask));
+ " ldrexh %2, %1\n"
+ " bic %2, %2, %3\n"
+ " strexh %0, %2, %1\n"
+ : "=&r" (res), "+Qo" (*p), "=&r" (tmp)
+ : "r" (mask));
if ( !res )
break;
diff --git a/xen/arch/arm/arm32/livepatch.c b/xen/arch/arm/arm32/livepatch.c
index 41378a54ae..5554749cbe 100644
--- a/xen/arch/arm/arm32/livepatch.c
+++ b/xen/arch/arm/arm32/livepatch.c
@@ -116,7 +116,8 @@ static s32 get_addend(unsigned char type, void *dest)
{
s32 addend = 0;
- switch ( type ) {
+ switch ( type )
+ {
case R_ARM_NONE:
/* ignore */
break;
@@ -151,7 +152,8 @@ static s32 get_addend(unsigned char type, void *dest)
static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend)
{
- switch ( type ) {
+ switch ( type )
+ {
case R_ARM_NONE:
/* ignore */
break;
@@ -213,7 +215,7 @@ static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend)
break;
default:
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -262,7 +264,8 @@ int arch_livepatch_perform(struct livepatch_elf *elf,
}
else if ( symndx >= elf->nsym )
{
- dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative symbol wants symbol@%u which is past end!\n",
+ dprintk(XENLOG_ERR, LIVEPATCH
+ "%s: Relative symbol wants symbol@%u which is past end!\n",
elf->name, symndx);
return -EINVAL;
}
diff --git a/xen/arch/arm/arm32/proc-caxx.c b/xen/arch/arm/arm32/proc-caxx.c
index 9166a1d654..aec18c07f3 100644
--- a/xen/arch/arm/arm32/proc-caxx.c
+++ b/xen/arch/arm/arm32/proc-caxx.c
@@ -30,6 +30,7 @@ static void caxx_vcpu_initialise(struct vcpu *v)
v->arch.actlr &= ~ACTLR_SMP;
}
-const struct processor caxx_processor = {
+const struct processor caxx_processor =
+{
.vcpu_initialise = caxx_vcpu_initialise,
};
diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c
index 280ddb55bf..a88096ec71 100644
--- a/xen/arch/arm/arm64/asm-offsets.c
+++ b/xen/arch/arm/arm64/asm-offsets.c
@@ -22,40 +22,40 @@
void __dummy__(void)
{
- OFFSET(UREGS_X0, struct cpu_user_regs, x0);
- OFFSET(UREGS_X1, struct cpu_user_regs, x1);
- OFFSET(UREGS_LR, struct cpu_user_regs, lr);
+ OFFSET(UREGS_X0, struct cpu_user_regs, x0);
+ OFFSET(UREGS_X1, struct cpu_user_regs, x1);
+ OFFSET(UREGS_LR, struct cpu_user_regs, lr);
- OFFSET(UREGS_SP, struct cpu_user_regs, sp);
- OFFSET(UREGS_PC, struct cpu_user_regs, pc);
- OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr);
- OFFSET(UREGS_ESR_el2, struct cpu_user_regs, hsr);
+ OFFSET(UREGS_SP, struct cpu_user_regs, sp);
+ OFFSET(UREGS_PC, struct cpu_user_regs, pc);
+ OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr);
+ OFFSET(UREGS_ESR_el2, struct cpu_user_regs, hsr);
- OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1);
+ OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1);
- OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq);
- OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq);
- OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und);
- OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt);
+ OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq);
+ OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq);
+ OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und);
+ OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt);
- OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0);
- OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1);
- OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1);
+ OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0);
+ OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1);
+ OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1);
- OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, spsr_el1);
- BLANK();
+ OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, spsr_el1);
+ BLANK();
- DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
- OFFSET(CPUINFO_flags, struct cpu_info, flags);
+ DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
+ OFFSET(CPUINFO_flags, struct cpu_info, flags);
- OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
+ OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
- BLANK();
- OFFSET(INITINFO_stack, struct init_info, stack);
+ BLANK();
+ OFFSET(INITINFO_stack, struct init_info, stack);
- BLANK();
- OFFSET(SMCCC_RES_a0, struct arm_smccc_res, a0);
- OFFSET(SMCCC_RES_a2, struct arm_smccc_res, a2);
+ BLANK();
+ OFFSET(SMCCC_RES_a0, struct arm_smccc_res, a0);
+ OFFSET(SMCCC_RES_a2, struct arm_smccc_res, a2);
}
/*
diff --git a/xen/arch/arm/arm64/domain.c b/xen/arch/arm/arm64/domain.c
index dd19098929..1025d7c157 100644
--- a/xen/arch/arm/arm64/domain.c
+++ b/xen/arch/arm/arm64/domain.c
@@ -29,13 +29,9 @@ void vcpu_regs_hyp_to_user(const struct vcpu *vcpu,
#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp
ALLREGS;
if ( is_32bit_domain(vcpu->domain) )
- {
ALLREGS32;
- }
else
- {
ALLREGS64;
- }
#undef C
}
@@ -45,13 +41,9 @@ void vcpu_regs_user_to_hyp(struct vcpu *vcpu,
#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user
ALLREGS;
if ( is_32bit_domain(vcpu->domain) )
- {
ALLREGS32;
- }
else
- {
ALLREGS64;
- }
#undef C
}
diff --git a/xen/arch/arm/arm64/domctl.c b/xen/arch/arm/arm64/domctl.c
index ab8781fb91..e65a10e56d 100644
--- a/xen/arch/arm/arm64/domctl.c
+++ b/xen/arch/arm/arm64/domctl.c
@@ -27,7 +27,7 @@ static long switch_mode(struct domain *d, enum domain_type type)
if ( is_64bit_domain(d) )
for_each_vcpu(d, v)
- vcpu_switch_to_aarch64_mode(v);
+ vcpu_switch_to_aarch64_mode(v);
return 0;
}
diff --git a/xen/arch/arm/arm64/insn.c b/xen/arch/arm/arm64/insn.c
index 22f2bdebd5..d9430eab4a 100644
--- a/xen/arch/arm/arm64/insn.c
+++ b/xen/arch/arm/arm64/insn.c
@@ -31,64 +31,65 @@
bool aarch64_insn_is_branch_imm(u32 insn)
{
- return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
- aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
- aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
- aarch64_insn_is_bcond(insn));
+ return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn));
}
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
- u32 *maskp, int *shiftp)
+ u32 *maskp, int *shiftp)
{
- u32 mask;
- int shift;
-
- switch (type) {
- case AARCH64_INSN_IMM_26:
- mask = BIT(26, UL) - 1;
- shift = 0;
- break;
- case AARCH64_INSN_IMM_19:
- mask = BIT(19, UL) - 1;
- shift = 5;
- break;
- case AARCH64_INSN_IMM_16:
- mask = BIT(16, UL) - 1;
- shift = 5;
- break;
- case AARCH64_INSN_IMM_14:
- mask = BIT(14, UL) - 1;
- shift = 5;
- break;
- case AARCH64_INSN_IMM_12:
- mask = BIT(12, UL) - 1;
- shift = 10;
- break;
- case AARCH64_INSN_IMM_9:
- mask = BIT(9, UL) - 1;
- shift = 12;
- break;
- case AARCH64_INSN_IMM_7:
- mask = BIT(7, UL) - 1;
- shift = 15;
- break;
- case AARCH64_INSN_IMM_6:
- case AARCH64_INSN_IMM_S:
- mask = BIT(6, UL) - 1;
- shift = 10;
- break;
- case AARCH64_INSN_IMM_R:
- mask = BIT(6, UL) - 1;
- shift = 16;
- break;
- default:
- return -EINVAL;
- }
-
- *maskp = mask;
- *shiftp = shift;
-
- return 0;
+ u32 mask;
+ int shift;
+
+ switch (type)
+ {
+ case AARCH64_INSN_IMM_26:
+ mask = BIT(26, UL) - 1;
+ shift = 0;
+ break;
+ case AARCH64_INSN_IMM_19:
+ mask = BIT(19, UL) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_16:
+ mask = BIT(16, UL) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_14:
+ mask = BIT(14, UL) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_12:
+ mask = BIT(12, UL) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_9:
+ mask = BIT(9, UL) - 1;
+ shift = 12;
+ break;
+ case AARCH64_INSN_IMM_7:
+ mask = BIT(7, UL) - 1;
+ shift = 15;
+ break;
+ case AARCH64_INSN_IMM_6:
+ case AARCH64_INSN_IMM_S:
+ mask = BIT(6, UL) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_R:
+ mask = BIT(6, UL) - 1;
+ shift = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *maskp = mask;
+ *shiftp = shift;
+
+ return 0;
}
#define ADR_IMM_HILOSPLIT 2
@@ -100,121 +101,128 @@ static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
{
- u32 immlo, immhi, mask;
- int shift;
-
- switch (type) {
- case AARCH64_INSN_IMM_ADR:
- shift = 0;
- immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
- immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
- insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
- mask = ADR_IMM_SIZE - 1;
- break;
- default:
- if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
- pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
- type);
- return 0;
- }
- }
-
- return (insn >> shift) & mask;
+ u32 immlo, immhi, mask;
+ int shift;
+
+ switch (type)
+ {
+ case AARCH64_INSN_IMM_ADR:
+ shift = 0;
+ immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
+ immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
+ insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
+ mask = ADR_IMM_SIZE - 1;
+ break;
+ default:
+ if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0)
+ {
+ pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+ }
+
+ return (insn >> shift) & mask;
}
u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
- u32 insn, u64 imm)
+ u32 insn, u64 imm)
{
- u32 immlo, immhi, mask;
- int shift;
-
- if (insn == AARCH64_BREAK_FAULT)
- return AARCH64_BREAK_FAULT;
-
- switch (type) {
- case AARCH64_INSN_IMM_ADR:
- shift = 0;
- immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
- imm >>= ADR_IMM_HILOSPLIT;
- immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
- imm = immlo | immhi;
- mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
- (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
- break;
- default:
- if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
- pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
- type);
- return AARCH64_BREAK_FAULT;
- }
- }
-
- /* Update the immediate field. */
- insn &= ~(mask << shift);
- insn |= (imm & mask) << shift;
-
- return insn;
+ u32 immlo, immhi, mask;
+ int shift;
+
+ if (insn == AARCH64_BREAK_FAULT)
+ return AARCH64_BREAK_FAULT;
+
+ switch (type)
+ {
+ case AARCH64_INSN_IMM_ADR:
+ shift = 0;
+ immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
+ imm >>= ADR_IMM_HILOSPLIT;
+ immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
+ imm = immlo | immhi;
+ mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
+ (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
+ break;
+ default:
+ if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0)
+ {
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return AARCH64_BREAK_FAULT;
+ }
+ }
+
+ /* Update the immediate field. */
+ insn &= ~(mask << shift);
+ insn |= (imm & mask) << shift;
+
+ return insn;
}
static inline long branch_imm_common(unsigned long pc, unsigned long addr,
- long range)
+ long range)
{
- long offset;
+ long offset;
- if ((pc & 0x3) || (addr & 0x3)) {
- pr_err("%s: A64 instructions must be word aligned\n", __func__);
- return range;
- }
+ if ((pc & 0x3) || (addr & 0x3))
+ {
+ pr_err("%s: A64 instructions must be word aligned\n", __func__);
+ return range;
+ }
- offset = ((long)addr - (long)pc);
+ offset = ((long)addr - (long)pc);
- if (offset < -range || offset >= range) {
- pr_err("%s: offset out of range\n", __func__);
- return range;
- }
+ if (offset < -range || offset >= range)
+ {
+ pr_err("%s: offset out of range\n", __func__);
+ return range;
+ }
- return offset;
+ return offset;
}
u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
- enum aarch64_insn_branch_type type)
+ enum aarch64_insn_branch_type type)
{
- u32 insn;
- long offset;
-
- /*
- * B/BL support [-128M, 128M) offset
- * ARM64 virtual address arrangement guarantees all kernel and module
- * texts are within +/-128M.
- */
- offset = branch_imm_common(pc, addr, SZ_128M);
- if (offset >= SZ_128M)
- return AARCH64_BREAK_FAULT;
-
- switch (type) {
- case AARCH64_INSN_BRANCH_LINK:
- insn = aarch64_insn_get_bl_value();
- break;
- case AARCH64_INSN_BRANCH_NOLINK:
- insn = aarch64_insn_get_b_value();
- break;
- default:
- pr_err("%s: unknown branch encoding %d\n", __func__, type);
- return AARCH64_BREAK_FAULT;
- }
-
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
- offset >> 2);
+ u32 insn;
+ long offset;
+
+ /*
+ * B/BL support [-128M, 128M) offset
+ * ARM64 virtual address arrangement guarantees all kernel and module
+ * texts are within +/-128M.
+ */
+ offset = branch_imm_common(pc, addr, SZ_128M);
+ if (offset >= SZ_128M)
+ return AARCH64_BREAK_FAULT;
+
+ switch (type)
+ {
+ case AARCH64_INSN_BRANCH_LINK:
+ insn = aarch64_insn_get_bl_value();
+ break;
+ case AARCH64_INSN_BRANCH_NOLINK:
+ insn = aarch64_insn_get_b_value();
+ break;
+ default:
+ pr_err("%s: unknown branch encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
}
u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
{
- return aarch64_insn_get_hint_value() | op;
+ return aarch64_insn_get_hint_value() | op;
}
u32 __kprobes aarch64_insn_gen_nop(void)
{
- return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
}
/*
@@ -224,26 +232,29 @@ u32 __kprobes aarch64_insn_gen_nop(void)
*/
s32 aarch64_get_branch_offset(u32 insn)
{
- s32 imm;
-
- if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
- imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
- return (imm << 6) >> 4;
- }
-
- if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
- aarch64_insn_is_bcond(insn)) {
- imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
- return (imm << 13) >> 11;
- }
-
- if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
- imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
- return (imm << 18) >> 16;
- }
-
- /* Unhandled instruction */
- BUG();
+ s32 imm;
+
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
+ {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
+ return (imm << 6) >> 4;
+ }
+
+ if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn))
+ {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
+ return (imm << 13) >> 11;
+ }
+
+ if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
+ {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
+ return (imm << 18) >> 16;
+ }
+
+ /* Unhandled instruction */
+ BUG();
}
/*
@@ -252,21 +263,21 @@ s32 aarch64_get_branch_offset(u32 insn)
*/
u32 aarch64_set_branch_offset(u32 insn, s32 offset)
{
- if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
- offset >> 2);
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
- if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
- aarch64_insn_is_bcond(insn))
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
- offset >> 2);
+ if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
- if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
- offset >> 2);
+ if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
+ offset >> 2);
- /* Unhandled instruction */
- BUG();
+ /* Unhandled instruction */
+ BUG();
}
/*
diff --git a/xen/arch/arm/arm64/lib/bitops.c b/xen/arch/arm/arm64/lib/bitops.c
index 27688e5418..9469e58434 100644
--- a/xen/arch/arm/arm64/lib/bitops.c
+++ b/xen/arch/arm/arm64/lib/bitops.c
@@ -126,11 +126,11 @@ static always_inline bool int_clear_mask16(uint16_t mask, volatile uint16_t *p,
do
{
asm volatile ("// int_clear_mask16\n"
- " ldxrh %w2, %1\n"
- " bic %w2, %w2, %w3\n"
- " stxrh %w0, %w2, %1\n"
- : "=&r" (res), "+Q" (*p), "=&r" (tmp)
- : "r" (mask));
+ " ldxrh %w2, %1\n"
+ " bic %w2, %w2, %w3\n"
+ " stxrh %w0, %w2, %1\n"
+ : "=&r" (res), "+Q" (*p), "=&r" (tmp)
+ : "r" (mask));
if ( !res )
break;
diff --git a/xen/arch/arm/arm64/lib/find_next_bit.c b/xen/arch/arm/arm64/lib/find_next_bit.c
index 17cb176266..0c305bbebf 100644
--- a/xen/arch/arm/arm64/lib/find_next_bit.c
+++ b/xen/arch/arm/arm64/lib/find_next_bit.c
@@ -19,42 +19,44 @@
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+ unsigned long offset)
{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset)
+ {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1))
+ {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
found_middle:
- return result + __ffs(tmp);
+ return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_next_bit);
#endif
@@ -65,42 +67,44 @@ EXPORT_SYMBOL(find_next_bit);
* Linus' asm-alpha/bitops.h.
*/
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+ unsigned long offset)
{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (BITS_PER_LONG - offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset)
+ {
+ tmp = *(p++);
+ tmp |= ~0UL >> (BITS_PER_LONG - offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1))
+ {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
found_middle:
- return result + ffz(tmp);
+ return result + ffz(tmp);
}
EXPORT_SYMBOL(find_next_zero_bit);
#endif
@@ -111,24 +115,25 @@ EXPORT_SYMBOL(find_next_zero_bit);
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
- const unsigned long *p = addr;
- unsigned long result = 0;
- unsigned long tmp;
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
+ while (size & ~(BITS_PER_LONG-1))
+ {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
- tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
found:
- return result + __ffs(tmp);
+ return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_first_bit);
#endif
@@ -139,24 +144,25 @@ EXPORT_SYMBOL(find_first_bit);
*/
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
- const unsigned long *p = addr;
- unsigned long result = 0;
- unsigned long tmp;
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
- while (size & ~(BITS_PER_LONG-1)) {
- if (~(tmp = *(p++)))
- goto found;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
+ while (size & ~(BITS_PER_LONG-1))
+ {
+ if (~(tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
- tmp = (*p) | (~0UL << size);
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
+ tmp = (*p) | (~0UL << size);
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
found:
- return result + ffz(tmp);
+ return result + ffz(tmp);
}
EXPORT_SYMBOL(find_first_zero_bit);
#endif
@@ -164,12 +170,12 @@ EXPORT_SYMBOL(find_first_zero_bit);
#ifdef __BIG_ENDIAN
/* include/linux/byteorder does not support "unsigned long" type */
-static inline unsigned long ext2_swabp(const unsigned long * x)
+static inline unsigned long ext2_swabp(const unsigned long *x)
{
#if BITS_PER_LONG == 64
- return (unsigned long) __swab64p((u64 *) x);
+ return (unsigned long) __swab64p((u64 *) x);
#elif BITS_PER_LONG == 32
- return (unsigned long) __swab32p((u32 *) x);
+ return (unsigned long) __swab32p((u32 *) x);
#else
#error BITS_PER_LONG not defined
#endif
@@ -179,9 +185,9 @@ static inline unsigned long ext2_swabp(const unsigned long * x)
static inline unsigned long ext2_swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
- return (unsigned long) __swab64((u64) y);
+ return (unsigned long) __swab64((u64) y);
#elif BITS_PER_LONG == 32
- return (unsigned long) __swab32((u32) y);
+ return (unsigned long) __swab32((u32) y);
#else
#error BITS_PER_LONG not defined
#endif
@@ -189,93 +195,97 @@ static inline unsigned long ext2_swab(const unsigned long y)
#ifndef find_next_zero_bit_le
unsigned long find_next_zero_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
+ long size, unsigned long offset)
{
- const unsigned long *p = addr;
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
- unsigned long tmp;
+ const unsigned long *p = addr;
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
- if (offset >= size)
- return size;
- p += BITOP_WORD(offset);
- size -= result;
- offset &= (BITS_PER_LONG - 1UL);
- if (offset) {
- tmp = ext2_swabp(p++);
- tmp |= (~0UL >> (BITS_PER_LONG - offset));
- if (size < BITS_PER_LONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
+ if (offset >= size)
+ return size;
+ p += BITOP_WORD(offset);
+ size -= result;
+ offset &= (BITS_PER_LONG - 1UL);
+ if (offset)
+ {
+ tmp = ext2_swabp(p++);
+ tmp |= (~0UL >> (BITS_PER_LONG - offset));
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
- while (size & ~(BITS_PER_LONG - 1)) {
- if (~(tmp = *(p++)))
- goto found_middle_swap;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = ext2_swabp(p);
+ while (size & ~(BITS_PER_LONG - 1))
+ {
+ if (~(tmp = *(p++)))
+ goto found_middle_swap;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = ext2_swabp(p);
found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. Skip ffz */
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. Skip ffz */
found_middle:
- return result + ffz(tmp);
+ return result + ffz(tmp);
found_middle_swap:
- return result + ffz(ext2_swab(tmp));
+ return result + ffz(ext2_swab(tmp));
}
EXPORT_SYMBOL(find_next_zero_bit_le);
#endif
#ifndef find_next_bit_le
unsigned long find_next_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
+ long size, unsigned long offset)
{
- const unsigned long *p = addr;
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
- unsigned long tmp;
+ const unsigned long *p = addr;
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
- if (offset >= size)
- return size;
- p += BITOP_WORD(offset);
- size -= result;
- offset &= (BITS_PER_LONG - 1UL);
- if (offset) {
- tmp = ext2_swabp(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
+ if (offset >= size)
+ return size;
+ p += BITOP_WORD(offset);
+ size -= result;
+ offset &= (BITS_PER_LONG - 1UL);
+ if (offset)
+ {
+ tmp = ext2_swabp(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
- while (size & ~(BITS_PER_LONG - 1)) {
- tmp = *(p++);
- if (tmp)
- goto found_middle_swap;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = ext2_swabp(p);
+ while (size & ~(BITS_PER_LONG - 1))
+ {
+ tmp = *(p++);
+ if (tmp)
+ goto found_middle_swap;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = ext2_swabp(p);
found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
found_middle:
- return result + __ffs(tmp);
+ return result + __ffs(tmp);
found_middle_swap:
- return result + __ffs(ext2_swab(tmp));
+ return result + __ffs(ext2_swab(tmp));
}
EXPORT_SYMBOL(find_next_bit_le);
#endif
diff --git a/xen/arch/arm/arm64/livepatch.c b/xen/arch/arm/arm64/livepatch.c
index 5c75779284..66a8aedf9f 100644
--- a/xen/arch/arm/arm64/livepatch.c
+++ b/xen/arch/arm/arm64/livepatch.c
@@ -86,7 +86,8 @@ bool arch_livepatch_symbol_deny(const struct livepatch_elf *elf,
return false;
}
-enum aarch64_reloc_op {
+enum aarch64_reloc_op
+{
RELOC_OP_NONE,
RELOC_OP_ABS,
RELOC_OP_PREL,
@@ -111,7 +112,8 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
}
- dprintk(XENLOG_DEBUG, LIVEPATCH "do_reloc: unknown relocation operation %d\n", reloc_op);
+ dprintk(XENLOG_DEBUG, LIVEPATCH "do_reloc: unknown relocation operation %d\n",
+ reloc_op);
return 0;
}
@@ -125,13 +127,13 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
case 16:
*(s16 *)place = sval;
if ( sval < INT16_MIN || sval > UINT16_MAX )
- return -EOVERFLOW;
+ return -EOVERFLOW;
break;
case 32:
*(s32 *)place = sval;
if ( sval < INT32_MIN || sval > UINT32_MAX )
- return -EOVERFLOW;
+ return -EOVERFLOW;
break;
case 64:
@@ -139,14 +141,16 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
break;
default:
- dprintk(XENLOG_DEBUG, LIVEPATCH "Invalid length (%d) for data relocation\n", len);
+ dprintk(XENLOG_DEBUG, LIVEPATCH "Invalid length (%d) for data relocation\n",
+ len);
return 0;
}
return 0;
}
-enum aarch64_insn_movw_imm_type {
+enum aarch64_insn_movw_imm_type
+{
AARCH64_INSN_IMM_MOVNZ,
AARCH64_INSN_IMM_MOVKZ,
};
@@ -260,7 +264,8 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf,
}
else if ( symndx >= elf->nsym )
{
- dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation wants symbol@%u which is past end!\n",
+ dprintk(XENLOG_ERR, LIVEPATCH
+ "%s: Relative relocation wants symbol@%u which is past end!\n",
elf->name, symndx);
return -EINVAL;
}
@@ -275,7 +280,7 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf,
/* ARM64 operations at minimum are always 32-bit. */
if ( r->r_offset >= base->sec->sh_size ||
- (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size )
+ (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size )
goto bad_offset;
switch ( ELF64_R_TYPE(r->r_info) )
@@ -314,7 +319,7 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf,
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
- /* Fallthrough. */
+ /* Fallthrough. */
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 0,
@@ -323,7 +328,7 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf,
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
- /* Fallthrough. */
+ /* Fallthrough. */
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 16,
@@ -332,7 +337,7 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf,
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
- /* Fallthrough. */
+ /* Fallthrough. */
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 32,
@@ -409,14 +414,14 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf,
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
- /* Fallthrough. */
+ /* Fallthrough. */
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, dest, val, 12, 21,
AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_LDST8_ABS_LO12_NC:
- /* Fallthrough. */
+ /* Fallthrough. */
case R_AARCH64_ADD_ABS_LO12_NC:
overflow_check = false;
@@ -479,8 +484,9 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf,
}
return 0;
- bad_offset:
- dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation offset is past %s section!\n",
+bad_offset:
+ dprintk(XENLOG_ERR, LIVEPATCH
+ "%s: Relative relocation offset is past %s section!\n",
elf->name, base->name);
return -EINVAL;
}
diff --git a/xen/arch/arm/arm64/smpboot.c b/xen/arch/arm/arm64/smpboot.c
index 694fbf67e6..95fc65546d 100644
--- a/xen/arch/arm/arm64/smpboot.c
+++ b/xen/arch/arm/arm64/smpboot.c
@@ -9,8 +9,9 @@
#include <asm/psci.h>
#include <asm/acpi.h>
-struct smp_enable_ops {
- int (*prepare_cpu)(int);
+struct smp_enable_ops
+{
+ int (*prepare_cpu)(int);
};
static paddr_t cpu_release_addr[NR_CPUS];
diff --git a/xen/arch/arm/arm64/traps.c b/xen/arch/arm/arm64/traps.c
index babfc1d884..0e7a61f3e1 100644
--- a/xen/arch/arm/arm64/traps.c
+++ b/xen/arch/arm/arm64/traps.c
@@ -24,11 +24,12 @@
#include <public/xen.h>
-static const char *handler[]= {
- "Synchronous Abort",
- "IRQ",
- "FIQ",
- "Error"
+static const char *handler[]=
+{
+ "Synchronous Abort",
+ "IRQ",
+ "FIQ",
+ "Error"
};
void do_bad_mode(struct cpu_user_regs *regs, int reason)
diff --git a/xen/arch/arm/arm64/vsysreg.c b/xen/arch/arm/arm64/vsysreg.c
index 8a85507d9d..72139824bf 100644
--- a/xen/arch/arm/arm64/vsysreg.c
+++ b/xen/arch/arm/arm64/vsysreg.c
@@ -101,22 +101,22 @@ void do_sysreg(struct cpu_user_regs *regs,
p2m_set_way_flush(current);
break;
- /*
- * HCR_EL2.TVM
- *
- * ARMv8 (DDI 0487D.a): Table D1-38
- */
- GENERATE_CASE(SCTLR_EL1)
- GENERATE_CASE(TTBR0_EL1)
- GENERATE_CASE(TTBR1_EL1)
- GENERATE_CASE(TCR_EL1)
- GENERATE_CASE(ESR_EL1)
- GENERATE_CASE(FAR_EL1)
- GENERATE_CASE(AFSR0_EL1)
- GENERATE_CASE(AFSR1_EL1)
- GENERATE_CASE(MAIR_EL1)
- GENERATE_CASE(AMAIR_EL1)
- GENERATE_CASE(CONTEXTIDR_EL1)
+ /*
+ * HCR_EL2.TVM
+ *
+ * ARMv8 (DDI 0487D.a): Table D1-38
+ */
+ GENERATE_CASE(SCTLR_EL1)
+ GENERATE_CASE(TTBR0_EL1)
+ GENERATE_CASE(TTBR1_EL1)
+ GENERATE_CASE(TCR_EL1)
+ GENERATE_CASE(ESR_EL1)
+ GENERATE_CASE(FAR_EL1)
+ GENERATE_CASE(AFSR0_EL1)
+ GENERATE_CASE(AFSR1_EL1)
+ GENERATE_CASE(MAIR_EL1)
+ GENERATE_CASE(AMAIR_EL1)
+ GENERATE_CASE(CONTEXTIDR_EL1)
/*
* MDCR_EL2.TDRA
@@ -167,11 +167,11 @@ void do_sysreg(struct cpu_user_regs *regs,
* register as RAZ/WI above. So RO at both EL0 and EL1.
*/
return handle_ro_raz(regs, regidx, hsr.sysreg.read, hsr, 0);
- HSR_SYSREG_DBG_CASES(DBGBVR):
- HSR_SYSREG_DBG_CASES(DBGBCR):
- HSR_SYSREG_DBG_CASES(DBGWVR):
- HSR_SYSREG_DBG_CASES(DBGWCR):
- return handle_raz_wi(regs, regidx, hsr.sysreg.read, hsr, 1);
+ HSR_SYSREG_DBG_CASES(DBGBVR):
+ HSR_SYSREG_DBG_CASES(DBGBCR):
+ HSR_SYSREG_DBG_CASES(DBGWVR):
+ HSR_SYSREG_DBG_CASES(DBGWCR):
+ return handle_raz_wi(regs, regidx, hsr.sysreg.read, hsr, 1);
/*
* MDCR_EL2.TPM
@@ -275,22 +275,22 @@ void do_sysreg(struct cpu_user_regs *regs,
* And all other unknown registers.
*/
default:
- {
- const struct hsr_sysreg sysreg = hsr.sysreg;
+ {
+ const struct hsr_sysreg sysreg = hsr.sysreg;
- gdprintk(XENLOG_ERR,
- "%s %d, %d, c%d, c%d, %d %s x%d @ 0x%"PRIregister"\n",
- sysreg.read ? "mrs" : "msr",
- sysreg.op0, sysreg.op1,
- sysreg.crn, sysreg.crm,
- sysreg.op2,
- sysreg.read ? "=>" : "<=",
- sysreg.reg, regs->pc);
- gdprintk(XENLOG_ERR, "unhandled 64-bit sysreg access %#x\n",
- hsr.bits & HSR_SYSREG_REGS_MASK);
- inject_undef_exception(regs, hsr);
- return;
- }
+ gdprintk(XENLOG_ERR,
+ "%s %d, %d, c%d, c%d, %d %s x%d @ 0x%"PRIregister"\n",
+ sysreg.read ? "mrs" : "msr",
+ sysreg.op0, sysreg.op1,
+ sysreg.crn, sysreg.crm,
+ sysreg.op2,
+ sysreg.read ? "=>" : "<=",
+ sysreg.reg, regs->pc);
+ gdprintk(XENLOG_ERR, "unhandled 64-bit sysreg access %#x\n",
+ hsr.bits & HSR_SYSREG_REGS_MASK);
+ inject_undef_exception(regs, hsr);
+ return;
+ }
}
regs->pc += 4;
diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c
index 891b4b66ff..2babaffed5 100644
--- a/xen/arch/arm/bootfdt.c
+++ b/xen/arch/arm/bootfdt.c
@@ -28,7 +28,7 @@ static bool __init device_tree_node_matches(const void *fdt, int node,
/* Match both "match" and "match@..." patterns but not
"match-foo". */
return strncmp(name, match, match_len) == 0
- && (name[match_len] == '@' || name[match_len] == '\0');
+ && (name[match_len] == '@' || name[match_len] == '\0');
}
static bool __init device_tree_node_compatible(const void *fdt, int node,
@@ -44,7 +44,8 @@ static bool __init device_tree_node_compatible(const void *fdt, int node,
if ( prop == NULL )
return false;
- while ( len > 0 ) {
+ while ( len > 0 )
+ {
if ( !dt_compat_cmp(prop, match) )
return true;
l = strlen(prop) + 1;
@@ -71,7 +72,7 @@ static u32 __init device_tree_get_u32(const void *fdt, int node,
if ( !prop || prop->len < sizeof(u32) )
return dflt;
- return fdt32_to_cpu(*(uint32_t*)prop->data);
+ return fdt32_to_cpu(*(uint32_t *)prop->data);
}
/**
@@ -192,7 +193,7 @@ static void __init process_multiboot_node(const void *fdt, int node,
if ( len < dt_cells_to_size(address_cells + size_cells) )
panic("fdt: node `%s': `reg` property length is too short\n",
- name);
+ name);
cell = (const __be32 *)prop->data;
device_tree_get_reg(&cell, address_cells, size_cells, &start, &size);
@@ -223,11 +224,16 @@ static void __init process_multiboot_node(const void *fdt, int node,
{
switch ( kind_guess++ )
{
- case 0: kind = BOOTMOD_KERNEL; break;
- case 1: kind = BOOTMOD_RAMDISK; break;
- default: break;
+ case 0:
+ kind = BOOTMOD_KERNEL;
+ break;
+ case 1:
+ kind = BOOTMOD_RAMDISK;
+ break;
+ default:
+ break;
}
- if ( kind_guess > 1 && has_xsm_magic(start) )
+ if ( kind_guess > 1 && has_xsm_magic(start) )
kind = BOOTMOD_XSM;
}
@@ -278,7 +284,7 @@ static void __init process_chosen_node(const void *fdt, int node,
if ( start >= end )
{
printk("linux,initrd limits invalid: %"PRIpaddr" >= %"PRIpaddr"\n",
- start, end);
+ start, end);
return;
}
@@ -294,8 +300,9 @@ static int __init early_scan_node(const void *fdt,
{
if ( device_tree_node_matches(fdt, node, "memory") )
process_memory_node(fdt, node, name, address_cells, size_cells);
- else if ( depth <= 3 && (device_tree_node_compatible(fdt, node, "xen,multiboot-module" ) ||
- device_tree_node_compatible(fdt, node, "multiboot,module" )))
+ else if ( depth <= 3
+ && (device_tree_node_compatible(fdt, node, "xen,multiboot-module" ) ||
+ device_tree_node_compatible(fdt, node, "multiboot,module" )))
process_multiboot_node(fdt, node, name, address_cells, size_cells);
else if ( depth == 1 && device_tree_node_matches(fdt, node, "chosen") )
process_chosen_node(fdt, node, name, address_cells, size_cells);
@@ -312,15 +319,15 @@ static void __init early_print_info(void)
for ( i = 0; i < mi->nr_banks; i++ )
printk("RAM: %"PRIpaddr" - %"PRIpaddr"\n",
- mi->bank[i].start,
- mi->bank[i].start + mi->bank[i].size - 1);
+ mi->bank[i].start,
+ mi->bank[i].start + mi->bank[i].size - 1);
printk("\n");
for ( i = 0 ; i < mods->nr_mods; i++ )
printk("MODULE[%d]: %"PRIpaddr" - %"PRIpaddr" %-12s\n",
- i,
- mods->module[i].start,
- mods->module[i].start + mods->module[i].size,
- boot_module_kind_as_string(mods->module[i].kind));
+ i,
+ mods->module[i].start,
+ mods->module[i].start + mods->module[i].size,
+ boot_module_kind_as_string(mods->module[i].kind));
nr_rsvd = fdt_num_mem_rsv(device_tree_flattened);
for ( i = 0; i < nr_rsvd; i++ )
@@ -331,7 +338,7 @@ static void __init early_print_info(void)
/* fdt_get_mem_rsv returns length */
e += s;
printk(" RESVD[%d]: %"PRIpaddr" - %"PRIpaddr"\n",
- i, s, e);
+ i, s, e);
}
printk("\n");
for ( i = 0 ; i < cmds->nr_mods; i++ )
diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c
index 8904939aca..38e24c2ea3 100644
--- a/xen/arch/arm/cpuerrata.c
+++ b/xen/arch/arm/cpuerrata.c
@@ -68,9 +68,7 @@ static bool copy_hyp_vect_bpi(unsigned int slot, const char *hyp_vec_start,
dst_remapped += (vaddr_t)dst & ~PAGE_MASK;
for ( i = 0; i < VECTOR_TABLE_SIZE; i += 0x80 )
- {
memcpy(dst_remapped + i, hyp_vec_start, hyp_vec_end - hyp_vec_start);
- }
clean_dcache_va_range(dst_remapped, VECTOR_TABLE_SIZE);
invalidate_icache();
@@ -176,7 +174,7 @@ static int enable_smccc_arch_workaround_1(void *data)
if ( (int)res.a0 < 0 )
goto warn;
- return !install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start,
+ return !install_bp_hardening_vec(entry, __smccc_workaround_1_smc_start,
__smccc_workaround_1_smc_end,
"call ARM_SMCCC_ARCH_WORKAROUND_1");
@@ -248,7 +246,8 @@ static int __init parse_spec_ctrl(const char *s)
const char *ss;
int rc = 0;
- do {
+ do
+ {
ss = strchr(s, ',');
if ( !ss )
ss = strchr(s, '\0');
@@ -387,7 +386,8 @@ is_affected_midr_range(const struct arm_cpu_capabilities *entry)
entry->midr_range_max);
}
-static const struct arm_cpu_capabilities arm_errata[] = {
+static const struct arm_cpu_capabilities arm_errata[] =
+{
{
/* Cortex-A15 r0p4 */
.desc = "ARM erratum 766422",
@@ -522,7 +522,8 @@ static int cpu_errata_callback(struct notifier_block *nfb,
return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
}
-static struct notifier_block cpu_errata_nfb = {
+static struct notifier_block cpu_errata_nfb =
+{
.notifier_call = cpu_errata_callback,
};
diff --git a/xen/arch/arm/cpufeature.c b/xen/arch/arm/cpufeature.c
index 44126dbf07..2ea0edba15 100644
--- a/xen/arch/arm/cpufeature.c
+++ b/xen/arch/arm/cpufeature.c
@@ -99,44 +99,44 @@ int enable_nonboot_cpu_caps(const struct arm_cpu_capabilities *caps)
void identify_cpu(struct cpuinfo_arm *c)
{
- c->midr.bits = READ_SYSREG32(MIDR_EL1);
- c->mpidr.bits = READ_SYSREG(MPIDR_EL1);
+ c->midr.bits = READ_SYSREG32(MIDR_EL1);
+ c->mpidr.bits = READ_SYSREG(MPIDR_EL1);
#ifdef CONFIG_ARM_64
- c->pfr64.bits[0] = READ_SYSREG64(ID_AA64PFR0_EL1);
- c->pfr64.bits[1] = READ_SYSREG64(ID_AA64PFR1_EL1);
+ c->pfr64.bits[0] = READ_SYSREG64(ID_AA64PFR0_EL1);
+ c->pfr64.bits[1] = READ_SYSREG64(ID_AA64PFR1_EL1);
- c->dbg64.bits[0] = READ_SYSREG64(ID_AA64DFR0_EL1);
- c->dbg64.bits[1] = READ_SYSREG64(ID_AA64DFR1_EL1);
+ c->dbg64.bits[0] = READ_SYSREG64(ID_AA64DFR0_EL1);
+ c->dbg64.bits[1] = READ_SYSREG64(ID_AA64DFR1_EL1);
- c->aux64.bits[0] = READ_SYSREG64(ID_AA64AFR0_EL1);
- c->aux64.bits[1] = READ_SYSREG64(ID_AA64AFR1_EL1);
+ c->aux64.bits[0] = READ_SYSREG64(ID_AA64AFR0_EL1);
+ c->aux64.bits[1] = READ_SYSREG64(ID_AA64AFR1_EL1);
- c->mm64.bits[0] = READ_SYSREG64(ID_AA64MMFR0_EL1);
- c->mm64.bits[1] = READ_SYSREG64(ID_AA64MMFR1_EL1);
+ c->mm64.bits[0] = READ_SYSREG64(ID_AA64MMFR0_EL1);
+ c->mm64.bits[1] = READ_SYSREG64(ID_AA64MMFR1_EL1);
- c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1);
- c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1);
+ c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1);
+ c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1);
#endif
- c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1);
- c->pfr32.bits[1] = READ_SYSREG32(ID_PFR1_EL1);
+ c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1);
+ c->pfr32.bits[1] = READ_SYSREG32(ID_PFR1_EL1);
- c->dbg32.bits[0] = READ_SYSREG32(ID_DFR0_EL1);
+ c->dbg32.bits[0] = READ_SYSREG32(ID_DFR0_EL1);
- c->aux32.bits[0] = READ_SYSREG32(ID_AFR0_EL1);
+ c->aux32.bits[0] = READ_SYSREG32(ID_AFR0_EL1);
- c->mm32.bits[0] = READ_SYSREG32(ID_MMFR0_EL1);
- c->mm32.bits[1] = READ_SYSREG32(ID_MMFR1_EL1);
- c->mm32.bits[2] = READ_SYSREG32(ID_MMFR2_EL1);
- c->mm32.bits[3] = READ_SYSREG32(ID_MMFR3_EL1);
+ c->mm32.bits[0] = READ_SYSREG32(ID_MMFR0_EL1);
+ c->mm32.bits[1] = READ_SYSREG32(ID_MMFR1_EL1);
+ c->mm32.bits[2] = READ_SYSREG32(ID_MMFR2_EL1);
+ c->mm32.bits[3] = READ_SYSREG32(ID_MMFR3_EL1);
- c->isa32.bits[0] = READ_SYSREG32(ID_ISAR0_EL1);
- c->isa32.bits[1] = READ_SYSREG32(ID_ISAR1_EL1);
- c->isa32.bits[2] = READ_SYSREG32(ID_ISAR2_EL1);
- c->isa32.bits[3] = READ_SYSREG32(ID_ISAR3_EL1);
- c->isa32.bits[4] = READ_SYSREG32(ID_ISAR4_EL1);
- c->isa32.bits[5] = READ_SYSREG32(ID_ISAR5_EL1);
+ c->isa32.bits[0] = READ_SYSREG32(ID_ISAR0_EL1);
+ c->isa32.bits[1] = READ_SYSREG32(ID_ISAR1_EL1);
+ c->isa32.bits[2] = READ_SYSREG32(ID_ISAR2_EL1);
+ c->isa32.bits[3] = READ_SYSREG32(ID_ISAR3_EL1);
+ c->isa32.bits[4] = READ_SYSREG32(ID_ISAR4_EL1);
+ c->isa32.bits[5] = READ_SYSREG32(ID_ISAR5_EL1);
}
/*
diff --git a/xen/arch/arm/decode.c b/xen/arch/arm/decode.c
index 8b1e15d118..22fd59ceca 100644
--- a/xen/arch/arm/decode.c
+++ b/xen/arch/arm/decode.c
@@ -87,7 +87,7 @@ static int decode_thumb(register_t pc, struct hsr_dabt *dabt)
{
uint16_t instr;
- if ( raw_copy_from_guest(&instr, (void * __user)pc, sizeof (instr)) )
+ if ( raw_copy_from_guest(&instr, (void *__user)pc, sizeof (instr)) )
return -EFAULT;
switch ( instr >> 12 )
diff --git a/xen/arch/arm/device.c b/xen/arch/arm/device.c
index 70cd6c1a19..c84db95056 100644
--- a/xen/arch/arm/device.c
+++ b/xen/arch/arm/device.c
@@ -52,7 +52,8 @@ int __init device_init(struct dt_device_node *dev, enum device_class class,
return -EBADF;
}
-int __init acpi_device_init(enum device_class class, const void *data, int class_type)
+int __init acpi_device_init(enum device_class class, const void *data,
+ int class_type)
{
const struct acpi_device_desc *desc;
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 4f44d5c742..9fe8ec2235 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -435,12 +435,24 @@ unsigned long hypercall_create_continuation(
switch ( i )
{
- case 0: regs->x0 = arg; break;
- case 1: regs->x1 = arg; break;
- case 2: regs->x2 = arg; break;
- case 3: regs->x3 = arg; break;
- case 4: regs->x4 = arg; break;
- case 5: regs->x5 = arg; break;
+ case 0:
+ regs->x0 = arg;
+ break;
+ case 1:
+ regs->x1 = arg;
+ break;
+ case 2:
+ regs->x2 = arg;
+ break;
+ case 3:
+ regs->x3 = arg;
+ break;
+ case 4:
+ regs->x4 = arg;
+ break;
+ case 5:
+ regs->x5 = arg;
+ break;
}
}
@@ -458,12 +470,24 @@ unsigned long hypercall_create_continuation(
switch ( i )
{
- case 0: regs->r0 = arg; break;
- case 1: regs->r1 = arg; break;
- case 2: regs->r2 = arg; break;
- case 3: regs->r3 = arg; break;
- case 4: regs->r4 = arg; break;
- case 5: regs->r5 = arg; break;
+ case 0:
+ regs->r0 = arg;
+ break;
+ case 1:
+ regs->r1 = arg;
+ break;
+ case 2:
+ regs->r2 = arg;
+ break;
+ case 3:
+ regs->r3 = arg;
+ break;
+ case 4:
+ regs->r4 = arg;
+ break;
+ case 5:
+ regs->r5 = arg;
+ break;
}
}
@@ -938,7 +962,7 @@ static int relinquish_memory(struct domain *d, struct page_list_head *list)
}
}
- out:
+out:
spin_unlock_recursive(&d->page_alloc_lock);
return ret;
}
@@ -961,7 +985,7 @@ int domain_relinquish_resources(struct domain *d)
domain_vpl011_deinit(d);
d->arch.relmem = RELMEM_tee;
- /* Fallthrough */
+ /* Fallthrough */
case RELMEM_tee:
ret = tee_relinquish_resources(d);
@@ -969,7 +993,7 @@ int domain_relinquish_resources(struct domain *d)
return ret;
d->arch.relmem = RELMEM_xen;
- /* Fallthrough */
+ /* Fallthrough */
case RELMEM_xen:
ret = relinquish_memory(d, &d->xenpage_list);
@@ -977,7 +1001,7 @@ int domain_relinquish_resources(struct domain *d)
return ret;
d->arch.relmem = RELMEM_page;
- /* Fallthrough */
+ /* Fallthrough */
case RELMEM_page:
ret = relinquish_memory(d, &d->page_list);
@@ -985,7 +1009,7 @@ int domain_relinquish_resources(struct domain *d)
return ret;
d->arch.relmem = RELMEM_mapping;
- /* Fallthrough */
+ /* Fallthrough */
case RELMEM_mapping:
ret = relinquish_p2m_mapping(d);
@@ -993,7 +1017,7 @@ int domain_relinquish_resources(struct domain *d)
return ret;
d->arch.relmem = RELMEM_done;
- /* Fallthrough */
+ /* Fallthrough */
case RELMEM_done:
break;
@@ -1011,15 +1035,16 @@ void arch_dump_domain_info(struct domain *d)
}
-long do_arm_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
+long do_arm_vcpu_op(int cmd, unsigned int vcpuid,
+ XEN_GUEST_HANDLE_PARAM(void) arg)
{
switch ( cmd )
{
- case VCPUOP_register_vcpu_info:
- case VCPUOP_register_runstate_memory_area:
- return do_vcpu_op(cmd, vcpuid, arg);
- default:
- return -EINVAL;
+ case VCPUOP_register_vcpu_info:
+ case VCPUOP_register_runstate_memory_area:
+ return do_vcpu_op(cmd, vcpuid, arg);
+ default:
+ return -EINVAL;
}
}
@@ -1037,7 +1062,7 @@ void arch_dump_vcpu_info(struct vcpu *v)
void vcpu_mark_events_pending(struct vcpu *v)
{
bool already_pending = guest_test_and_set_bit(v->domain,
- 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
+ 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
if ( already_pending )
return;
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index c1981836a6..16238adc9b 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -143,7 +143,7 @@ static bool __init insert_11_bank(struct domain *d,
return true;
}
- for( i = 0; i < kinfo->mem.nr_banks; i++ )
+ for ( i = 0; i < kinfo->mem.nr_banks; i++ )
{
struct membank *bank = &kinfo->mem.bank[i];
@@ -302,7 +302,7 @@ static void __init allocate_memory_11(struct domain *d,
printk(XENLOG_INFO "No bank has been allocated below 4GB.\n");
lowmem = false;
- got_bank0:
+got_bank0:
/*
* If we failed to allocate bank0 under 4GB, continue allocating
@@ -363,7 +363,7 @@ static void __init allocate_memory_11(struct domain *d,
" %ldMB unallocated\n",
(unsigned long)kinfo->unassigned_mem >> 20);
- for( i = 0; i < kinfo->mem.nr_banks; i++ )
+ for ( i = 0; i < kinfo->mem.nr_banks; i++ )
{
printk("BANK[%d] %#"PRIpaddr"-%#"PRIpaddr" (%ldMB)\n",
i,
@@ -375,9 +375,9 @@ static void __init allocate_memory_11(struct domain *d,
}
static bool __init allocate_bank_memory(struct domain *d,
- struct kernel_info *kinfo,
- gfn_t sgfn,
- unsigned long tot_size)
+ struct kernel_info *kinfo,
+ gfn_t sgfn,
+ unsigned long tot_size)
{
int res;
struct page_info *pg;
@@ -453,7 +453,7 @@ static void __init allocate_memory(struct domain *d, struct kernel_info *kinfo)
if ( kinfo->unassigned_mem )
goto fail;
- for( i = 0; i < kinfo->mem.nr_banks; i++ )
+ for ( i = 0; i < kinfo->mem.nr_banks; i++ )
{
printk(XENLOG_INFO "%pd BANK[%d] %#"PRIpaddr"-%#"PRIpaddr" (%ldMB)\n",
d,
@@ -814,7 +814,7 @@ static int __init make_cpus_node(const struct domain *d, void *fdt,
{
compatible = dt_get_property(npcpu, "compatible", &len);
clock_valid = dt_property_read_u32(npcpu, "clock-frequency",
- &clock_frequency);
+ &clock_frequency);
break;
}
}
@@ -1059,7 +1059,7 @@ int __init make_chosen_node(const struct kernel_info *kinfo)
bootargs = &kinfo->cmdline[0];
res = fdt_property(fdt, "bootargs", bootargs, strlen(bootargs) + 1);
if ( res )
- return res;
+ return res;
}
/*
@@ -1277,7 +1277,7 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev,
if ( rirq.controller != dt_interrupt_controller )
{
dt_dprintk("irq %u not connected to primary controller. Connected to %s\n",
- i, dt_node_full_name(rirq.controller));
+ i, dt_node_full_name(rirq.controller));
continue;
}
@@ -1546,7 +1546,8 @@ static int __init make_gicv3_domU_node(const struct domain *d, void *fdt)
__be32 reg[(GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) * 2];
__be32 *cells;
- res = fdt_begin_node(fdt, "interrupt-controller@"__stringify(GUEST_GICV3_GICD_BASE));
+ res = fdt_begin_node(fdt,
+ "interrupt-controller@"__stringify(GUEST_GICV3_GICD_BASE));
if ( res )
return res;
@@ -1771,7 +1772,7 @@ static int __init prepare_dtb_domU(struct domain *d, struct kernel_info *kinfo)
return 0;
- err:
+err:
printk("Device tree generation failed (%d).\n", ret);
xfree(kinfo->fdt);
@@ -1810,7 +1811,7 @@ static int __init prepare_dtb_hwdom(struct domain *d, struct kernel_info *kinfo)
return 0;
- err:
+err:
printk("Device tree generation failed (%d).\n", ret);
xfree(kinfo->fdt);
return -EINVAL;
@@ -2083,7 +2084,8 @@ void __init create_domUs(void)
dt_for_each_child_node(chosen, node)
{
struct domain *d;
- struct xen_domctl_createdomain d_cfg = {
+ struct xen_domctl_createdomain d_cfg =
+ {
.arch.gic_version = XEN_DOMCTL_CONFIG_GIC_NATIVE,
.arch.nr_spis = 0,
.flags = XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap,
diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index 9da88b8c64..0a5dd3d6ae 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -22,7 +22,7 @@ void arch_get_domain_info(const struct domain *d,
info->flags |= XEN_DOMINF_hap;
}
-static int handle_vuart_init(struct domain *d,
+static int handle_vuart_init(struct domain *d,
struct xen_domctl_vuart_op *vuart_op)
{
int rc;
@@ -161,7 +161,7 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
if ( vuart_op->pad[i] )
return -EINVAL;
- switch( vuart_op->cmd )
+ switch ( vuart_op->cmd )
{
case XEN_DOMCTL_VUART_OP_INIT:
rc = handle_vuart_init(d, vuart_op);
diff --git a/xen/arch/arm/early_printk.c b/xen/arch/arm/early_printk.c
index 97466a12b1..f1c69697fc 100644
--- a/xen/arch/arm/early_printk.c
+++ b/xen/arch/arm/early_printk.c
@@ -19,7 +19,8 @@ void early_flush(void);
void early_puts(const char *s)
{
- while (*s != '\0') {
+ while (*s != '\0')
+ {
if (*s == '\n')
early_putch('\r');
early_putch(*s);
diff --git a/xen/arch/arm/efi/efi-boot.h b/xen/arch/arm/efi/efi-boot.h
index ca655ff003..8743d48f79 100644
--- a/xen/arch/arm/efi/efi-boot.h
+++ b/xen/arch/arm/efi/efi-boot.h
@@ -72,7 +72,7 @@ static int __init setup_chosen_node(void *fdt, int *addr_cells, int *size_cells)
return -1;
}
else if ( fdt32_to_cpu(prop->len) )
- return -1; /* Non-empty ranges property */
+ return -1; /* Non-empty ranges property */
return node;
}
@@ -101,7 +101,7 @@ static int __init fdt_set_reg(void *fdt, int node, int addr_cells,
dt_set_cell(&cellp, addr_cells, addr);
dt_set_cell(&cellp, size_cells, len);
- return(fdt_setprop(fdt, node, "reg", val, sizeof(*cellp) * (cellp - val)));
+ return (fdt_setprop(fdt, node, "reg", val, sizeof(*cellp) * (cellp - val)));
}
static void __init *lookup_fdt_config_table(EFI_SYSTEM_TABLE *sys_table)
@@ -140,9 +140,10 @@ static bool __init meminfo_add_bank(struct meminfo *mem,
return true;
}
-static EFI_STATUS __init efi_process_memory_map_bootinfo(EFI_MEMORY_DESCRIPTOR *map,
- UINTN mmap_size,
- UINTN desc_size)
+static EFI_STATUS __init efi_process_memory_map_bootinfo(
+ EFI_MEMORY_DESCRIPTOR *map,
+ UINTN mmap_size,
+ UINTN desc_size)
{
int Index;
EFI_MEMORY_DESCRIPTOR *desc_ptr = map;
@@ -157,7 +158,7 @@ static EFI_STATUS __init efi_process_memory_map_bootinfo(EFI_MEMORY_DESCRIPTOR *
if ( !meminfo_add_bank(&bootinfo.mem, desc_ptr) )
{
PrintStr(L"Warning: All " __stringify(NR_MEM_BANKS)
- " bootinfo mem banks exhausted.\r\n");
+ " bootinfo mem banks exhausted.\r\n");
break;
}
}
@@ -167,7 +168,7 @@ static EFI_STATUS __init efi_process_memory_map_bootinfo(EFI_MEMORY_DESCRIPTOR *
if ( !meminfo_add_bank(&bootinfo.acpi, desc_ptr) )
{
PrintStr(L"Error: All " __stringify(NR_MEM_BANKS)
- " acpi meminfo mem banks exhausted.\r\n");
+ " acpi meminfo mem banks exhausted.\r\n");
return EFI_LOAD_ERROR;
}
}
@@ -184,11 +185,11 @@ static EFI_STATUS __init efi_process_memory_map_bootinfo(EFI_MEMORY_DESCRIPTOR *
* and memory map information.
*/
EFI_STATUS __init fdt_add_uefi_nodes(EFI_SYSTEM_TABLE *sys_table,
- void *fdt,
- EFI_MEMORY_DESCRIPTOR *memory_map,
- UINTN map_size,
- UINTN desc_size,
- UINT32 desc_ver)
+ void *fdt,
+ EFI_MEMORY_DESCRIPTOR *memory_map,
+ UINTN map_size,
+ UINTN desc_size,
+ UINT32 desc_ver)
{
int node;
int status;
@@ -221,13 +222,13 @@ EFI_STATUS __init fdt_add_uefi_nodes(EFI_SYSTEM_TABLE *sys_table,
prev = node;
}
- /*
- * Delete all memory reserve map entries. When booting via UEFI,
- * kernel will use the UEFI memory map to find reserved regions.
- */
- num_rsv = fdt_num_mem_rsv(fdt);
- while ( num_rsv-- > 0 )
- fdt_del_mem_rsv(fdt, num_rsv);
+ /*
+ * Delete all memory reserve map entries. When booting via UEFI,
+ * kernel will use the UEFI memory map to find reserved regions.
+ */
+ num_rsv = fdt_num_mem_rsv(fdt);
+ while ( num_rsv-- > 0 )
+ fdt_del_mem_rsv(fdt, num_rsv);
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
@@ -372,7 +373,8 @@ static void __init efi_arch_post_exit_boot(void)
efi_xen_start(fdt, fdt_totalsize(fdt));
}
-static void __init efi_arch_cfg_file_early(EFI_FILE_HANDLE dir_handle, char *section)
+static void __init efi_arch_cfg_file_early(EFI_FILE_HANDLE dir_handle,
+ char *section)
{
union string name;
@@ -392,7 +394,8 @@ static void __init efi_arch_cfg_file_early(EFI_FILE_HANDLE dir_handle, char *sec
blexit(L"Unable to create new FDT");
}
-static void __init efi_arch_cfg_file_late(EFI_FILE_HANDLE dir_handle, char *section)
+static void __init efi_arch_cfg_file_late(EFI_FILE_HANDLE dir_handle,
+ char *section)
{
}
@@ -430,7 +433,8 @@ static void __init efi_arch_handle_cmdline(CHAR16 *image_name,
if ( chosen < 0 )
blexit(L"Unable to find chosen node");
- status = efi_bs->AllocatePool(EfiBootServicesData, EFI_PAGE_SIZE, (void **)&buf);
+ status = efi_bs->AllocatePool(EfiBootServicesData, EFI_PAGE_SIZE,
+ (void **)&buf);
if ( EFI_ERROR(status) )
PrintErrMesg(L"Unable to allocate string buffer", status);
@@ -444,14 +448,14 @@ static void __init efi_arch_handle_cmdline(CHAR16 *image_name,
prop_len = 0;
prop_len += snprintf(buf + prop_len,
- EFI_PAGE_SIZE - prop_len, "%s", name.s);
+ EFI_PAGE_SIZE - prop_len, "%s", name.s);
if ( prop_len >= EFI_PAGE_SIZE )
blexit(L"FDT string overflow");
if ( cfgfile_options )
{
prop_len += snprintf(buf + prop_len,
- EFI_PAGE_SIZE - prop_len, " %s", cfgfile_options);
+ EFI_PAGE_SIZE - prop_len, " %s", cfgfile_options);
if ( prop_len >= EFI_PAGE_SIZE )
blexit(L"FDT string overflow");
}
@@ -467,7 +471,7 @@ static void __init efi_arch_handle_cmdline(CHAR16 *image_name,
if ( name.s )
{
prop_len += snprintf(buf + prop_len,
- EFI_PAGE_SIZE - prop_len, " %s", name.s);
+ EFI_PAGE_SIZE - prop_len, " %s", name.s);
if ( prop_len >= EFI_PAGE_SIZE )
blexit(L"FDT string overflow");
}
@@ -501,7 +505,7 @@ static void __init efi_arch_handle_module(struct file *file, const CHAR16 *name,
sizeof(ramdisk_compat)) < 0 )
blexit(L"Unable to set compatible property.");
if ( fdt_set_reg(fdt, node, addr_len, size_len, ramdisk.addr,
- ramdisk.size) < 0 )
+ ramdisk.size) < 0 )
blexit(L"Unable to set reg property.");
}
else if ( file == &xsm )
@@ -514,7 +518,7 @@ static void __init efi_arch_handle_module(struct file *file, const CHAR16 *name,
sizeof(xsm_compat)) < 0 )
blexit(L"Unable to set compatible property.");
if ( fdt_set_reg(fdt, node, addr_len, size_len, xsm.addr,
- xsm.size) < 0 )
+ xsm.size) < 0 )
blexit(L"Unable to set reg property.");
}
else if ( file == &kernel )
diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
index 256988c665..4964fe88fa 100644
--- a/xen/arch/arm/gic-v2.c
+++ b/xen/arch/arm/gic-v2.c
@@ -91,7 +91,8 @@
#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
-struct v2m_data {
+struct v2m_data
+{
struct list_head entry;
/* Pointer to the DT node representing the v2m frame */
const struct dt_device_node *dt_node;
@@ -105,10 +106,11 @@ struct v2m_data {
static LIST_HEAD(gicv2m_info);
/* Global state */
-static struct {
- void __iomem * map_dbase; /* IO mapped Address of distributor registers */
- void __iomem * map_cbase; /* IO mapped Address of CPU interface registers */
- void __iomem * map_hbase; /* IO Address of virtual interface registers */
+static struct
+{
+ void __iomem *map_dbase; /* IO mapped Address of distributor registers */
+ void __iomem *map_cbase; /* IO mapped Address of CPU interface registers */
+ void __iomem *map_hbase; /* IO Address of virtual interface registers */
spinlock_t lock;
} gicv2;
@@ -316,8 +318,8 @@ static void gicv2_set_irq_type(struct irq_desc *desc, unsigned int type)
cfg & edgebit ? "Edge" : "Level",
actual & edgebit ? "Edge" : "Level");
desc->arch.type = actual & edgebit ?
- IRQ_TYPE_EDGE_RISING :
- IRQ_TYPE_LEVEL_HIGH;
+ IRQ_TYPE_EDGE_RISING :
+ IRQ_TYPE_LEVEL_HIGH;
}
spin_unlock(&gicv2.lock);
@@ -505,7 +507,7 @@ static void gicv2_update_lr(int lr, unsigned int virq, uint8_t priority,
lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT) |
((GIC_PRI_TO_GUEST(priority) & GICH_V2_LR_PRIORITY_MASK)
- << GICH_V2_LR_PRIORITY_SHIFT) |
+ << GICH_V2_LR_PRIORITY_SHIFT) |
((virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT));
if ( hw_irq != INVALID_IRQ )
@@ -526,7 +528,8 @@ static void gicv2_read_lr(int lr, struct gic_lr *lr_reg)
lrv = readl_gich(GICH_LR + lr * 4);
lr_reg->virq = (lrv >> GICH_V2_LR_VIRTUAL_SHIFT) & GICH_V2_LR_VIRTUAL_MASK;
- lr_reg->priority = (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & GICH_V2_LR_PRIORITY_MASK;
+ lr_reg->priority = (lrv >> GICH_V2_LR_PRIORITY_SHIFT) &
+ GICH_V2_LR_PRIORITY_MASK;
lr_reg->pending = lrv & GICH_V2_LR_PENDING;
lr_reg->active = lrv & GICH_V2_LR_ACTIVE;
lr_reg->hw_status = lrv & GICH_V2_LR_HW;
@@ -544,7 +547,7 @@ static void gicv2_read_lr(int lr, struct gic_lr *lr_reg)
* read it as it should be 0 by default.
*/
lr_reg->virt.source = (lrv >> GICH_V2_LR_CPUID_SHIFT)
- & GICH_V2_LR_CPUID_MASK;
+ & GICH_V2_LR_CPUID_MASK;
}
}
@@ -552,9 +555,10 @@ static void gicv2_write_lr(int lr, const struct gic_lr *lr_reg)
{
uint32_t lrv = 0;
- lrv = (((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT) |
- ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK)
- << GICH_V2_LR_PRIORITY_SHIFT) );
+ lrv = (((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT)
+ |
+ ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK)
+ << GICH_V2_LR_PRIORITY_SHIFT) );
if ( lr_reg->active )
lrv |= GICH_V2_LR_ACTIVE;
@@ -596,13 +600,13 @@ static void gicv2_hcr_status(uint32_t flag, bool status)
static unsigned int gicv2_read_vmcr_priority(void)
{
- return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT)
- & GICH_V2_VMCR_PRIORITY_MASK);
+ return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT)
+ & GICH_V2_VMCR_PRIORITY_MASK);
}
static unsigned int gicv2_read_apr(int apr_reg)
{
- return readl_gich(GICH_APR);
+ return readl_gich(GICH_APR);
}
static bool gicv2_read_pending_state(struct irq_desc *irqd)
@@ -669,7 +673,8 @@ static void gicv2_guest_irq_end(struct irq_desc *desc)
/* Deactivation happens in maintenance interrupt / via GICV */
}
-static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
+static void gicv2_irq_set_affinity(struct irq_desc *desc,
+ const cpumask_t *cpu_mask)
{
unsigned int mask;
@@ -895,7 +900,8 @@ static int gicv2_make_hwdom_dt_node(const struct domain *d,
}
/* XXX different for level vs edge */
-static hw_irq_controller gicv2_host_irq_type = {
+static hw_irq_controller gicv2_host_irq_type =
+{
.typename = "gic-v2",
.startup = gicv2_irq_startup,
.shutdown = gicv2_irq_shutdown,
@@ -906,7 +912,8 @@ static hw_irq_controller gicv2_host_irq_type = {
.set_affinity = gicv2_irq_set_affinity,
};
-static hw_irq_controller gicv2_guest_irq_type = {
+static hw_irq_controller gicv2_guest_irq_type =
+{
.typename = "gic-v2",
.startup = gicv2_irq_startup,
.shutdown = gicv2_irq_shutdown,
@@ -1070,7 +1077,8 @@ static void __init gicv2_dt_init(void)
{
printk(XENLOG_WARNING "GICv2: enable platform quirk: 64K stride\n");
vsize = csize = SZ_128K;
- } else
+ }
+ else
csize = SZ_8K;
}
@@ -1080,7 +1088,7 @@ static void __init gicv2_dt_init(void)
*/
if ( csize != vsize )
panic("GICv2: Sizes of GICC (%#"PRIpaddr") and GICV (%#"PRIpaddr") don't match\n",
- csize, vsize);
+ csize, vsize);
/*
* Check whether this GIC implements the v2m extension. If so,
@@ -1163,7 +1171,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
{
static int cpu_base_assigned = 0;
struct acpi_madt_generic_interrupt *processor =
- container_of(header, struct acpi_madt_generic_interrupt, header);
+ container_of(header, struct acpi_madt_generic_interrupt, header);
if ( BAD_MADT_ENTRY(processor, end) )
return -EINVAL;
@@ -1204,7 +1212,7 @@ gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_madt_generic_distributor *dist =
- container_of(header, struct acpi_madt_generic_distributor, header);
+ container_of(header, struct acpi_madt_generic_distributor, header);
if ( BAD_MADT_ENTRY(dist, end) )
return -EINVAL;
@@ -1264,13 +1272,13 @@ static int __init gicv2_init(void)
gicv2_acpi_init();
printk("GICv2 initialization:\n"
- " gic_dist_addr=%"PRIpaddr"\n"
- " gic_cpu_addr=%"PRIpaddr"\n"
- " gic_hyp_addr=%"PRIpaddr"\n"
- " gic_vcpu_addr=%"PRIpaddr"\n"
- " gic_maintenance_irq=%u\n",
- dbase, cbase, hbase, vbase,
- gicv2_info.maintenance_irq);
+ " gic_dist_addr=%"PRIpaddr"\n"
+ " gic_cpu_addr=%"PRIpaddr"\n"
+ " gic_hyp_addr=%"PRIpaddr"\n"
+ " gic_vcpu_addr=%"PRIpaddr"\n"
+ " gic_maintenance_irq=%u\n",
+ dbase, cbase, hbase, vbase,
+ gicv2_info.maintenance_irq);
if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) ||
(hbase & ~PAGE_MASK) || (vbase & ~PAGE_MASK) )
@@ -1300,7 +1308,8 @@ static int __init gicv2_init(void)
printk(XENLOG_WARNING
"GICv2: Adjusting CPU interface base to %#"PRIx64"\n",
cbase + aliased_offset);
- } else if ( csize == SZ_128K )
+ }
+ else if ( csize == SZ_128K )
printk(XENLOG_WARNING
"GICv2: GICC size=%#"PRIx64" but not aliased\n",
csize);
@@ -1330,7 +1339,8 @@ static void gicv2_do_LPI(unsigned int lpi)
BUG();
}
-const static struct gic_hw_operations gicv2_ops = {
+const static struct gic_hw_operations gicv2_ops =
+{
.info = &gicv2_info,
.init = gicv2_init,
.secondary_init = gicv2_secondary_cpu_init,
@@ -1383,8 +1393,8 @@ static const struct dt_device_match gicv2_dt_match[] __initconst =
};
DT_DEVICE_START(gicv2, "GICv2", DEVICE_GIC)
- .dt_match = gicv2_dt_match,
- .init = gicv2_dt_preinit,
+.dt_match = gicv2_dt_match,
+.init = gicv2_dt_preinit,
DT_DEVICE_END
#ifdef CONFIG_ACPI
@@ -1398,8 +1408,8 @@ static int __init gicv2_acpi_preinit(const void *data)
}
ACPI_DEVICE_START(agicv2, "GICv2", DEVICE_GIC)
- .class_type = ACPI_MADT_GIC_VERSION_V2,
- .init = gicv2_acpi_preinit,
+.class_type = ACPI_MADT_GIC_VERSION_V2,
+.init = gicv2_acpi_preinit,
ACPI_DEVICE_END
#endif
/*
diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
index 9558bad96a..eaf7d23d94 100644
--- a/xen/arch/arm/gic-v3-its.c
+++ b/xen/arch/arm/gic-v3-its.c
@@ -49,7 +49,8 @@ LIST_HEAD(host_its_list);
* property of MSIs in general and we can easily get to the base address
* of the ITS and look that up.
*/
-struct its_device {
+struct its_device
+{
struct rb_node rbnode;
struct host_its *hw_its;
void *itt_addr;
@@ -89,7 +90,8 @@ static int its_send_command(struct host_its *hw_its, const void *its_cmd)
spin_lock(&hw_its->cmd_lock);
- do {
+ do
+ {
readp = readq_relaxed(hw_its->its_base + GITS_CREADR) & BUFPTR_MASK;
writep = readq_relaxed(hw_its->its_base + GITS_CWRITER) & BUFPTR_MASK;
@@ -144,7 +146,8 @@ static int gicv3_its_wait_commands(struct host_its *hw_its)
s_time_t deadline = NOW() + MILLISECS(100);
uint64_t readp, writep;
- do {
+ do
+ {
spin_lock(&hw_its->cmd_lock);
readp = readq_relaxed(hw_its->its_base + GITS_CREADR) & BUFPTR_MASK;
writep = readq_relaxed(hw_its->its_base + GITS_CWRITER) & BUFPTR_MASK;
@@ -437,7 +440,8 @@ static int gicv3_disable_its(struct host_its *hw_its)
writel_relaxed(reg & ~GITS_CTLR_ENABLE, hw_its->its_base + GITS_CTLR);
- do {
+ do
+ {
reg = readl_relaxed(hw_its->its_base + GITS_CTLR);
if ( reg & GITS_CTLR_QUIESCENT )
return 0;
@@ -672,8 +676,9 @@ int gicv3_its_map_guest_device(struct domain *d,
if ( valid )
{
- printk(XENLOG_G_WARNING "d%d tried to remap guest ITS device 0x%x to host device 0x%x\n",
- d->domain_id, guest_devid, host_devid);
+ printk(XENLOG_G_WARNING
+ "d%d tried to remap guest ITS device 0x%x to host device 0x%x\n",
+ d->domain_id, guest_devid, host_devid);
return -EBUSY;
}
diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c
index 78b9521b21..87e2869605 100644
--- a/xen/arch/arm/gic-v3-lpi.c
+++ b/xen/arch/arm/gic-v3-lpi.c
@@ -43,9 +43,11 @@
* We read or write the "data" view of this union atomically, then can
* access the broken-down fields in our local copy.
*/
-union host_lpi {
+union host_lpi
+{
uint64_t data;
- struct {
+ struct
+ {
uint32_t virt_lpi;
uint16_t dom_id;
uint16_t pad;
@@ -55,7 +57,8 @@ union host_lpi {
#define LPI_PROPTABLE_NEEDS_FLUSHING (1U << 0)
/* Global state */
-static struct {
+static struct
+{
/* The global LPI property table, shared by all redistributors. */
uint8_t *lpi_property;
/*
@@ -79,7 +82,8 @@ static struct {
unsigned int flags;
} lpi_data;
-struct lpi_redist_data {
+struct lpi_redist_data
+{
paddr_t redist_addr;
unsigned int redist_id;
void *pending_table;
@@ -152,7 +156,7 @@ void vgic_vcpu_inject_lpi(struct domain *d, unsigned int virq)
vcpu_id = ACCESS_ONCE(p->lpi_vcpu_id);
if ( vcpu_id >= d->max_vcpus )
- return;
+ return;
vgic_inject_irq(d, d->vcpu[vcpu_id], virq, true);
}
@@ -225,7 +229,8 @@ void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
host_lpi -= LPI_OFFSET;
- hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE][host_lpi % HOST_LPIS_PER_PAGE];
+ hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE][host_lpi %
+ HOST_LPIS_PER_PAGE];
hlpi.virt_lpi = virt_lpi;
hlpi.dom_id = domain_id;
@@ -279,7 +284,7 @@ static int gicv3_lpi_allocate_pendtable(uint64_t *reg)
* Tell a redistributor about the (shared) property table, allocating one
* if not already done.
*/
-static int gicv3_lpi_set_proptable(void __iomem * rdist_base)
+static int gicv3_lpi_set_proptable(void __iomem *rdist_base)
{
uint64_t reg;
@@ -336,7 +341,7 @@ static int gicv3_lpi_set_proptable(void __iomem * rdist_base)
return 0;
}
-int gicv3_lpi_init_rdist(void __iomem * rdist_base)
+int gicv3_lpi_init_rdist(void __iomem *rdist_base)
{
uint32_t reg;
uint64_t table_reg;
@@ -389,7 +394,8 @@ int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits)
* Tell the user about it, the actual number is reported below.
*/
if ( max_lpi_bits < 14 || max_lpi_bits > 32 )
- printk(XENLOG_WARNING "WARNING: max_lpi_bits must be between 14 and 32, adjusting.\n");
+ printk(XENLOG_WARNING
+ "WARNING: max_lpi_bits must be between 14 and 32, adjusting.\n");
max_lpi_bits = max(max_lpi_bits, 14U);
lpi_data.max_host_lpi_ids = BIT(min(host_lpi_bits, max_lpi_bits), UL);
diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c
index 0f6cbf6224..110883f5e7 100644
--- a/xen/arch/arm/gic-v3.c
+++ b/xen/arch/arm/gic-v3.c
@@ -45,7 +45,8 @@
#include <asm/sysregs.h>
/* Global state */
-static struct {
+static struct
+{
void __iomem *map_dbase; /* Mapped address of distributor registers */
struct rdist_region *rdist_regions;
uint32_t rdist_stride;
@@ -57,7 +58,7 @@ static struct {
static struct gic_info gicv3_info;
/* per-cpu re-distributor base */
-static DEFINE_PER_CPU(void __iomem*, rbase);
+static DEFINE_PER_CPU(void __iomem *, rbase);
#define GICD (gicv3.map_dbase)
#define GICD_RDIST_BASE (this_cpu(rbase))
@@ -103,10 +104,10 @@ static inline void gicv3_save_lrs(struct vcpu *v)
case 2:
v->arch.gic.v3.lr[1] = READ_SYSREG(ICH_LR1_EL2);
case 1:
- v->arch.gic.v3.lr[0] = READ_SYSREG(ICH_LR0_EL2);
- break;
+ v->arch.gic.v3.lr[0] = READ_SYSREG(ICH_LR0_EL2);
+ break;
default:
- BUG();
+ BUG();
}
}
@@ -153,7 +154,7 @@ static inline void gicv3_restore_lrs(const struct vcpu *v)
WRITE_SYSREG(v->arch.gic.v3.lr[0], ICH_LR0_EL2);
break;
default:
- BUG();
+ BUG();
}
}
@@ -161,22 +162,38 @@ static uint64_t gicv3_ich_read_lr(int lr)
{
switch ( lr )
{
- case 0: return READ_SYSREG(ICH_LR0_EL2);
- case 1: return READ_SYSREG(ICH_LR1_EL2);
- case 2: return READ_SYSREG(ICH_LR2_EL2);
- case 3: return READ_SYSREG(ICH_LR3_EL2);
- case 4: return READ_SYSREG(ICH_LR4_EL2);
- case 5: return READ_SYSREG(ICH_LR5_EL2);
- case 6: return READ_SYSREG(ICH_LR6_EL2);
- case 7: return READ_SYSREG(ICH_LR7_EL2);
- case 8: return READ_SYSREG(ICH_LR8_EL2);
- case 9: return READ_SYSREG(ICH_LR9_EL2);
- case 10: return READ_SYSREG(ICH_LR10_EL2);
- case 11: return READ_SYSREG(ICH_LR11_EL2);
- case 12: return READ_SYSREG(ICH_LR12_EL2);
- case 13: return READ_SYSREG(ICH_LR13_EL2);
- case 14: return READ_SYSREG(ICH_LR14_EL2);
- case 15: return READ_SYSREG(ICH_LR15_EL2);
+ case 0:
+ return READ_SYSREG(ICH_LR0_EL2);
+ case 1:
+ return READ_SYSREG(ICH_LR1_EL2);
+ case 2:
+ return READ_SYSREG(ICH_LR2_EL2);
+ case 3:
+ return READ_SYSREG(ICH_LR3_EL2);
+ case 4:
+ return READ_SYSREG(ICH_LR4_EL2);
+ case 5:
+ return READ_SYSREG(ICH_LR5_EL2);
+ case 6:
+ return READ_SYSREG(ICH_LR6_EL2);
+ case 7:
+ return READ_SYSREG(ICH_LR7_EL2);
+ case 8:
+ return READ_SYSREG(ICH_LR8_EL2);
+ case 9:
+ return READ_SYSREG(ICH_LR9_EL2);
+ case 10:
+ return READ_SYSREG(ICH_LR10_EL2);
+ case 11:
+ return READ_SYSREG(ICH_LR11_EL2);
+ case 12:
+ return READ_SYSREG(ICH_LR12_EL2);
+ case 13:
+ return READ_SYSREG(ICH_LR13_EL2);
+ case 14:
+ return READ_SYSREG(ICH_LR14_EL2);
+ case 15:
+ return READ_SYSREG(ICH_LR15_EL2);
default:
BUG();
}
@@ -262,7 +279,8 @@ static void gicv3_do_wait_for_rwp(void __iomem *base)
bool timeout = false;
s_time_t deadline = NOW() + MILLISECS(1000);
- do {
+ do
+ {
val = readl_relaxed(base + GICD_CTLR);
if ( !(val & GICD_CTLR_RWP) )
break;
@@ -292,9 +310,9 @@ static void gicv3_redist_wait_for_rwp(void)
static void gicv3_wait_for_rwp(int irq)
{
if ( irq < NR_LOCAL_IRQS )
- gicv3_redist_wait_for_rwp();
+ gicv3_redist_wait_for_rwp();
else
- gicv3_dist_wait_for_rwp();
+ gicv3_dist_wait_for_rwp();
}
static unsigned int gicv3_get_cpu_from_mask(const cpumask_t *cpumask)
@@ -317,11 +335,11 @@ static void restore_aprn_regs(const union gic_state_data *d)
case 7:
WRITE_SYSREG32(d->v3.apr0[2], ICH_AP0R2_EL2);
WRITE_SYSREG32(d->v3.apr1[2], ICH_AP1R2_EL2);
- /* Fall through */
+ /* Fall through */
case 6:
WRITE_SYSREG32(d->v3.apr0[1], ICH_AP0R1_EL2);
WRITE_SYSREG32(d->v3.apr1[1], ICH_AP1R1_EL2);
- /* Fall through */
+ /* Fall through */
case 5:
WRITE_SYSREG32(d->v3.apr0[0], ICH_AP0R0_EL2);
WRITE_SYSREG32(d->v3.apr1[0], ICH_AP1R0_EL2);
@@ -340,11 +358,11 @@ static void save_aprn_regs(union gic_state_data *d)
case 7:
d->v3.apr0[2] = READ_SYSREG32(ICH_AP0R2_EL2);
d->v3.apr1[2] = READ_SYSREG32(ICH_AP1R2_EL2);
- /* Fall through */
+ /* Fall through */
case 6:
d->v3.apr0[1] = READ_SYSREG32(ICH_AP0R1_EL2);
d->v3.apr1[1] = READ_SYSREG32(ICH_AP1R1_EL2);
- /* Fall through */
+ /* Fall through */
case 5:
d->v3.apr0[0] = READ_SYSREG32(ICH_AP0R0_EL2);
d->v3.apr1[0] = READ_SYSREG32(ICH_AP1R0_EL2);
@@ -525,11 +543,11 @@ static void gicv3_set_pending_state(struct irq_desc *irqd, bool pending)
static inline uint64_t gicv3_mpidr_to_affinity(int cpu)
{
- uint64_t mpidr = cpu_logical_map(cpu);
- return (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
- MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
- MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
- MPIDR_AFFINITY_LEVEL(mpidr, 0));
+ uint64_t mpidr = cpu_logical_map(cpu);
+ return (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 0));
}
static void gicv3_set_irq_type(struct irq_desc *desc, unsigned int type)
@@ -568,8 +586,8 @@ static void gicv3_set_irq_type(struct irq_desc *desc, unsigned int type)
cfg & edgebit ? "Edge" : "Level",
actual & edgebit ? "Edge" : "Level");
desc->arch.type = actual & edgebit ?
- IRQ_TYPE_EDGE_RISING :
- IRQ_TYPE_LEVEL_HIGH;
+ IRQ_TYPE_EDGE_RISING :
+ IRQ_TYPE_LEVEL_HIGH;
}
spin_unlock(&gicv3.lock);
}
@@ -644,7 +662,7 @@ static void __init gicv3_dist_init(void)
/* Turn on the distributor */
writel_relaxed(GICD_CTL_ENABLE | GICD_CTLR_ARE_NS |
- GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, GICD + GICD_CTLR);
+ GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, GICD + GICD_CTLR);
/* Route all global IRQs to this CPU */
affinity = gicv3_mpidr_to_affinity(smp_processor_id());
@@ -666,7 +684,8 @@ static int gicv3_enable_redist(void)
val &= ~GICR_WAKER_ProcessorSleep;
writel_relaxed(val, GICD_RDIST_BASE + GICR_WAKER);
- do {
+ do
+ {
val = readl_relaxed(GICD_RDIST_BASE + GICR_WAKER);
if ( !(val & GICR_WAKER_ChildrenAsleep) )
break;
@@ -715,7 +734,7 @@ static int __init gicv3_populate_rdist(void)
* If we ever get a cluster of more than 16 CPUs, just scream.
*/
if ( (mpidr & 0xff) >= 16 )
- dprintk(XENLOG_WARNING, "GICv3:Cluster with more than 16's cpus\n");
+ dprintk(XENLOG_WARNING, "GICv3:Cluster with more than 16's cpus\n");
/*
* Convert affinity to a 32bit value that can be matched to GICR_TYPER
@@ -739,7 +758,8 @@ static int __init gicv3_populate_rdist(void)
break;
}
- do {
+ do
+ {
typer = readq_relaxed(ptr + GICR_TYPER);
if ( (typer >> 32) == aff )
@@ -777,7 +797,7 @@ static int __init gicv3_populate_rdist(void)
}
printk("GICv3: CPU%d: Found redistributor in region %d @%p\n",
- smp_processor_id(), i, ptr);
+ smp_processor_id(), i, ptr);
return 0;
}
@@ -796,7 +816,8 @@ static int __init gicv3_populate_rdist(void)
} while ( !(typer & GICR_TYPER_LAST) );
}
- dprintk(XENLOG_ERR, "GICv3: CPU%d: mpidr 0x%"PRIregister" has no re-distributor!\n",
+ dprintk(XENLOG_ERR,
+ "GICv3: CPU%d: mpidr 0x%"PRIregister" has no re-distributor!\n",
smp_processor_id(), cpu_logical_map(smp_processor_id()));
return -ENODEV;
@@ -829,13 +850,13 @@ static int gicv3_cpu_init(void)
GIC_PRI_IPI);
for (i = 0; i < NR_GIC_SGI; i += 4)
writel_relaxed(priority,
- GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4);
+ GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4);
priority = (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 |
GIC_PRI_IRQ);
for (i = NR_GIC_SGI; i < NR_GIC_LOCAL_IRQS; i += 4)
writel_relaxed(priority,
- GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4);
+ GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4);
/*
* The activate state is unknown at boot, so make sure all
@@ -887,7 +908,7 @@ static void gicv3_hyp_init(void)
vtr = READ_SYSREG32(ICH_VTR_EL2);
gicv3_info.nr_lrs = (vtr & ICH_VTR_NRLRGS) + 1;
gicv3.nr_priorities = ((vtr >> ICH_VTR_PRIBITS_SHIFT) &
- ICH_VTR_PRIBITS_MASK) + 1;
+ ICH_VTR_PRIBITS_MASK) + 1;
if ( !((gicv3.nr_priorities > 4) && (gicv3.nr_priorities < 8)) )
panic("GICv3: Invalid number of priority bits\n");
@@ -949,7 +970,8 @@ static u16 gicv3_compute_target_list(int *base_cpu, const struct cpumask *mask,
}
mpidr = cpu_logical_map(cpu);
- if ( cluster_id != (mpidr & ~MPIDR_AFF0_MASK) ) {
+ if ( cluster_id != (mpidr & ~MPIDR_AFF0_MASK) )
+ {
cpu--;
goto out;
}
@@ -1049,9 +1071,9 @@ static void gicv3_update_lr(int lr, unsigned int virq, uint8_t priority,
val |= (uint64_t)priority << ICH_LR_PRIORITY_SHIFT;
val |= ((uint64_t)virq & ICH_LR_VIRTUAL_MASK) << ICH_LR_VIRTUAL_SHIFT;
- if ( hw_irq != INVALID_IRQ )
- val |= ICH_LR_HW | (((uint64_t)hw_irq & ICH_LR_PHYSICAL_MASK)
- << ICH_LR_PHYSICAL_SHIFT);
+ if ( hw_irq != INVALID_IRQ )
+ val |= ICH_LR_HW | (((uint64_t)hw_irq & ICH_LR_PHYSICAL_MASK)
+ << ICH_LR_PHYSICAL_SHIFT);
gicv3_ich_write_lr(lr, val);
}
@@ -1087,7 +1109,7 @@ static void gicv3_read_lr(int lr, struct gic_lr *lr_reg)
* read it as it should be 0 by default.
*/
lr_reg->virt.source = (lrv >> ICH_LR_CPUID_SHIFT)
- & ICH_LR_CPUID_MASK;
+ & ICH_LR_CPUID_MASK;
}
}
}
@@ -1099,7 +1121,7 @@ static void gicv3_write_lr(int lr_reg, const struct gic_lr *lr)
lrv = ( ((u64)(lr->virq & ICH_LR_VIRTUAL_MASK) << ICH_LR_VIRTUAL_SHIFT) |
- ((u64)(lr->priority & ICH_LR_PRIORITY_MASK) << ICH_LR_PRIORITY_SHIFT) );
+ ((u64)(lr->priority & ICH_LR_PRIORITY_MASK) << ICH_LR_PRIORITY_SHIFT) );
if ( lr->active )
lrv |= ICH_LR_STATE_ACTIVE;
@@ -1152,7 +1174,7 @@ static void gicv3_hcr_status(uint32_t flag, bool status)
static unsigned int gicv3_read_vmcr_priority(void)
{
- return ((READ_SYSREG32(ICH_VMCR_EL2) >> ICH_VMCR_PRIORITY_SHIFT) &
+ return ((READ_SYSREG32(ICH_VMCR_EL2) >> ICH_VMCR_PRIORITY_SHIFT) &
ICH_VMCR_PRIORITY_MASK);
}
@@ -1306,7 +1328,8 @@ static int gicv3_make_hwdom_dt_node(const struct domain *d,
return gicv3_its_make_hwdom_dt_nodes(d, gic, fdt);
}
-static const hw_irq_controller gicv3_host_irq_type = {
+static const hw_irq_controller gicv3_host_irq_type =
+{
.typename = "gic-v3",
.startup = gicv3_irq_startup,
.shutdown = gicv3_irq_shutdown,
@@ -1317,7 +1340,8 @@ static const hw_irq_controller gicv3_host_irq_type = {
.set_affinity = gicv3_irq_set_affinity,
};
-static const hw_irq_controller gicv3_guest_irq_type = {
+static const hw_irq_controller gicv3_guest_irq_type =
+{
.typename = "gic-v3",
.startup = gicv3_irq_startup,
.shutdown = gicv3_irq_shutdown,
@@ -1382,7 +1406,7 @@ static void __init gicv3_dt_init(void)
gicv3_ioremap_distributor(dbase);
if ( !dt_property_read_u32(node, "#redistributor-regions",
- &gicv3.rdist_count) )
+ &gicv3.rdist_count) )
gicv3.rdist_count = 1;
rdist_regs = xzalloc_array(struct rdist_region, gicv3.rdist_count);
@@ -1556,7 +1580,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
{
static int cpu_base_assigned = 0;
struct acpi_madt_generic_interrupt *processor =
- container_of(header, struct acpi_madt_generic_interrupt, header);
+ container_of(header, struct acpi_madt_generic_interrupt, header);
if ( BAD_MADT_ENTRY(processor, end) )
return -EINVAL;
@@ -1594,7 +1618,7 @@ gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_madt_generic_distributor *dist =
- container_of(header, struct acpi_madt_generic_distributor, header);
+ container_of(header, struct acpi_madt_generic_distributor, header);
if ( BAD_MADT_ENTRY(dist, end) )
return -EINVAL;
@@ -1680,7 +1704,8 @@ static void __init gicv3_acpi_init(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
gic_acpi_get_madt_redistributor_num, 0);
/* Count the total number of CPU interface entries */
- if ( count <= 0 ) {
+ if ( count <= 0 )
+ {
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_get_madt_cpu_num, 0);
if (count <= 0)
@@ -1771,14 +1796,14 @@ static int __init gicv3_init(void)
reg = readl_relaxed(GICD + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
if ( reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4 )
- panic("GICv3: no distributor detected\n");
+ panic("GICv3: no distributor detected\n");
for ( i = 0; i < gicv3.rdist_count; i++ )
{
/* map dbase & rdist regions */
gicv3.rdist_regions[i].map_base =
- ioremap_nocache(gicv3.rdist_regions[i].base,
- gicv3.rdist_regions[i].size);
+ ioremap_nocache(gicv3.rdist_regions[i].base,
+ gicv3.rdist_regions[i].size);
if ( !gicv3.rdist_regions[i].map_base )
panic("GICv3: Failed to ioremap rdist region for region %d\n", i);
@@ -1831,7 +1856,8 @@ out:
return res;
}
-static const struct gic_hw_operations gicv3_ops = {
+static const struct gic_hw_operations gicv3_ops =
+{
.info = &gicv3_info,
.init = gicv3_init,
.save_state = gicv3_save_state,
@@ -1864,7 +1890,8 @@ static const struct gic_hw_operations gicv3_ops = {
.do_LPI = gicv3_do_LPI,
};
-static int __init gicv3_dt_preinit(struct dt_device_node *node, const void *data)
+static int __init gicv3_dt_preinit(struct dt_device_node *node,
+ const void *data)
{
gicv3_info.hw_version = GIC_V3;
gicv3_info.node = node;
@@ -1881,8 +1908,8 @@ static const struct dt_device_match gicv3_dt_match[] __initconst =
};
DT_DEVICE_START(gicv3, "GICv3", DEVICE_GIC)
- .dt_match = gicv3_dt_match,
- .init = gicv3_dt_preinit,
+.dt_match = gicv3_dt_match,
+.init = gicv3_dt_preinit,
DT_DEVICE_END
#ifdef CONFIG_ACPI
@@ -1896,13 +1923,13 @@ static int __init gicv3_acpi_preinit(const void *data)
}
ACPI_DEVICE_START(agicv3, "GICv3", DEVICE_GIC)
- .class_type = ACPI_MADT_GIC_VERSION_V3,
- .init = gicv3_acpi_preinit,
+.class_type = ACPI_MADT_GIC_VERSION_V3,
+.init = gicv3_acpi_preinit,
ACPI_DEVICE_END
ACPI_DEVICE_START(agicv4, "GICv4", DEVICE_GIC)
- .class_type = ACPI_MADT_GIC_VERSION_V4,
- .init = gicv3_acpi_preinit,
+.class_type = ACPI_MADT_GIC_VERSION_V4,
+.init = gicv3_acpi_preinit,
ACPI_DEVICE_END
#endif
diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c
index 98c021f1a8..02a6c90f54 100644
--- a/xen/arch/arm/gic-vgic.c
+++ b/xen/arch/arm/gic-vgic.c
@@ -94,7 +94,8 @@ void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq)
}
#ifdef GIC_DEBUG
else
- gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into %pv, when it is still lr_pending\n",
+ gdprintk(XENLOG_DEBUG,
+ "trying to inject irq=%u into %pv, when it is still lr_pending\n",
virtual_irq, v);
#endif
}
@@ -134,7 +135,7 @@ static unsigned int gic_find_unused_lr(struct vcpu *v,
}
void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq,
- unsigned int priority)
+ unsigned int priority)
{
int i;
unsigned int nr_lrs = gic_get_nr_lrs();
@@ -150,7 +151,8 @@ void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq,
{
i = gic_find_unused_lr(v, p, 0);
- if (i < nr_lrs) {
+ if (i < nr_lrs)
+ {
set_bit(i, &this_cpu(lr_mask));
gic_set_lr(i, p, GICH_LR_PENDING);
return;
@@ -201,17 +203,20 @@ static void gic_update_one_lr(struct vcpu *v, int i)
gic_hw_ops->write_lr(i, &lr_val);
}
else
- gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into %pv: already active in LR%d\n",
+ gdprintk(XENLOG_WARNING,
+ "unable to inject hw irq=%d into %pv: already active in LR%d\n",
irq, v, i);
}
}
else if ( lr_val.pending )
{
- int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
+ int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED,
+ &p->status);
#ifdef GIC_DEBUG
if ( q )
- gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into %pv, when it is already pending in LR%d\n",
- irq, v, i);
+ gdprintk(XENLOG_DEBUG,
+ "trying to inject irq=%d into %pv, when it is already pending in LR%d\n",
+ irq, v, i);
#endif
}
else
@@ -230,7 +235,8 @@ static void gic_update_one_lr(struct vcpu *v, int i)
test_bit(GIC_IRQ_GUEST_QUEUED, &p->status) &&
!test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
gic_raise_guest_irq(v, irq, p->priority);
- else {
+ else
+ {
list_del_init(&p->inflight);
/*
* Remove from inflight, then change physical affinity. It
@@ -266,7 +272,8 @@ void vgic_sync_from_lrs(struct vcpu *v)
spin_lock_irqsave(&v->arch.vgic.lock, flags);
while ((i = find_next_bit((const unsigned long *) &this_cpu(lr_mask),
- nr_lrs, i)) < nr_lrs ) {
+ nr_lrs, i)) < nr_lrs )
+ {
gic_update_one_lr(v, i);
i++;
}
@@ -339,7 +346,7 @@ void gic_clear_pending_irqs(struct vcpu *v)
v->arch.lr_mask = 0;
list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
- gic_remove_from_lr_pending(v, p);
+ gic_remove_from_lr_pending(v, p);
}
/**
@@ -407,10 +414,10 @@ void gic_dump_vgic_info(struct vcpu *v)
struct pending_irq *p;
list_for_each_entry ( p, &v->arch.vgic.inflight_irqs, inflight )
- printk("Inflight irq=%u lr=%u\n", p->irq, p->lr);
+ printk("Inflight irq=%u lr=%u\n", p->irq, p->lr);
list_for_each_entry( p, &v->arch.vgic.lr_pending, lr_queue )
- printk("Pending irq=%d\n", p->irq);
+ printk("Pending irq=%d\n", p->irq);
}
struct irq_desc *vgic_get_hw_irq_desc(struct domain *d, struct vcpu *v,
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 113655a789..c0aeb644ae 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -56,7 +56,7 @@ static void clear_cpu_lr_mask(void)
enum gic_version gic_hw_version(void)
{
- return gic_hw_ops->info->hw_version;
+ return gic_hw_ops->info->hw_version;
}
unsigned int gic_number_lines(void)
@@ -114,7 +114,8 @@ static void gic_set_irq_priority(struct irq_desc *desc, unsigned int priority)
void gic_route_irq_to_xen(struct irq_desc *desc, unsigned int priority)
{
ASSERT(priority <= 0xff); /* Only 8 bits of priority */
- ASSERT(desc->irq < gic_number_lines());/* Can't route interrupts that don't exist */
+ ASSERT(desc->irq <
+ gic_number_lines());/* Can't route interrupts that don't exist */
ASSERT(test_bit(_IRQ_DISABLED, &desc->status));
ASSERT(spin_is_locked(&desc->lock));
@@ -313,9 +314,9 @@ void send_SGI_self(enum gic_sgi sgi)
void send_SGI_allbutself(enum gic_sgi sgi)
{
- ASSERT(sgi < 16); /* There are only 16 SGIs */
+ ASSERT(sgi < 16); /* There are only 16 SGIs */
- gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL);
+ gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL);
}
void smp_send_state_dump(unsigned int cpu)
@@ -380,7 +381,8 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
{
unsigned int irq;
- do {
+ do
+ {
/* Reading IRQ will ACK it */
irq = gic_hw_ops->read_irq();
@@ -395,9 +397,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
gic_hw_ops->do_LPI(irq);
}
else if ( unlikely(irq < 16) )
- {
do_sgi(regs, irq);
- }
else
{
local_irq_disable();
@@ -406,7 +406,8 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
} while (1);
}
-static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
+static void maintenance_interrupt(int irq, void *dev_id,
+ struct cpu_user_regs *regs)
{
/*
* This is a dummy interrupt handler.
@@ -482,7 +483,8 @@ static int cpu_gic_callback(struct notifier_block *nfb,
return NOTIFY_DONE;
}
-static struct notifier_block cpu_gic_nfb = {
+static struct notifier_block cpu_gic_nfb =
+{
.notifier_call = cpu_gic_callback,
};
diff --git a/xen/arch/arm/guest_atomics.c b/xen/arch/arm/guest_atomics.c
index 1b78a062f0..1d5b8b7054 100644
--- a/xen/arch/arm/guest_atomics.c
+++ b/xen/arch/arm/guest_atomics.c
@@ -67,7 +67,8 @@ static int cpu_guest_safe_atomic_callback(struct notifier_block *nfb,
return NOTIFY_DONE;
}
-static struct notifier_block cpu_guest_safe_atomic_nfb = {
+static struct notifier_block cpu_guest_safe_atomic_nfb =
+{
.notifier_call = cpu_guest_safe_atomic_callback,
};
diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
index c6d6e23bf5..eccad9ee30 100644
--- a/xen/arch/arm/guest_walk.c
+++ b/xen/arch/arm/guest_walk.c
@@ -188,7 +188,8 @@ static int get_ipa_output_size(struct domain *d, register_t tcr,
#ifdef CONFIG_ARM_64
register_t ips;
- static const unsigned int ipa_sizes[7] = {
+ static const unsigned int ipa_sizes[7] =
+ {
TCR_EL1_IPS_32_BIT_VAL,
TCR_EL1_IPS_36_BIT_VAL,
TCR_EL1_IPS_40_BIT_VAL,
@@ -222,14 +223,16 @@ static int get_ipa_output_size(struct domain *d, register_t tcr,
}
/* Normalized page granule size indices. */
-enum granule_size_index {
+enum granule_size_index
+{
GRANULE_SIZE_INDEX_4K,
GRANULE_SIZE_INDEX_16K,
GRANULE_SIZE_INDEX_64K
};
/* Represent whether TTBR0 or TTBR1 is active. */
-enum active_ttbr {
+enum active_ttbr
+{
TTBR0_ACTIVE,
TTBR1_ACTIVE
};
@@ -379,7 +382,8 @@ static bool guest_walk_ld(const struct vcpu *v,
third_table_offset_##gran(gva) \
}
- const paddr_t offsets[3][4] = {
+ const paddr_t offsets[3][4] =
+ {
OFFSETS(gva, 4K),
OFFSETS(gva, 16K),
OFFSETS(gva, 64K)
@@ -395,7 +399,8 @@ static bool guest_walk_ld(const struct vcpu *v,
third_size(gran) - 1 \
}
- static const paddr_t masks[3][4] = {
+ static const paddr_t masks[3][4] =
+ {
MASKS(4K),
MASKS(16K),
MASKS(64K)
@@ -403,7 +408,8 @@ static bool guest_walk_ld(const struct vcpu *v,
#undef MASKS
- static const unsigned int grainsizes[3] = {
+ static const unsigned int grainsizes[3] =
+ {
PAGE_SHIFT_4K,
PAGE_SHIFT_16K,
PAGE_SHIFT_64K
@@ -493,7 +499,8 @@ static bool guest_walk_ld(const struct vcpu *v,
* The starting level is the number of strides (grainsizes[gran] - 3)
* needed to consume the input address (ARM DDI 0487B.a J1-5924).
*/
- level = 4 - DIV_ROUND_UP((input_size - grainsizes[gran]), (grainsizes[gran] - 3));
+ level = 4 - DIV_ROUND_UP((input_size - grainsizes[gran]),
+ (grainsizes[gran] - 3));
/* Get the IPA output_size. */
ret = get_ipa_output_size(d, tcr, &output_size);
diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 7a0f3e9d5f..c03b57a5ba 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -124,7 +124,8 @@ unsigned long raw_clear_guest(void *to, unsigned len)
COPY_to_guest | COPY_linear);
}
-unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len)
+unsigned long raw_copy_from_guest(void *to, const void __user *from,
+ unsigned len)
{
return copy_guest(to, (vaddr_t)from, len, GVA_INFO(current),
COPY_from_guest | COPY_linear);
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
index 76b27c9168..c97539a0c7 100644
--- a/xen/arch/arm/hvm.c
+++ b/xen/arch/arm/hvm.c
@@ -58,16 +58,14 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
goto param_fail;
if ( op == HVMOP_set_param )
- {
d->arch.hvm.params[a.index] = a.value;
- }
else
{
a.value = d->arch.hvm.params[a.index];
rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
}
- param_fail:
+param_fail:
rcu_unlock_domain(d);
break;
}
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index ae7ef96981..e0cbf048d8 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -114,7 +114,8 @@ enum io_state try_handle_mmio(struct cpu_user_regs *regs,
struct vcpu *v = current;
const struct mmio_handler *handler = NULL;
const struct hsr_dabt dabt = hsr.dabt;
- mmio_info_t info = {
+ mmio_info_t info =
+ {
.gpa = gpa,
.dabt = dabt
};
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index c51cf333ce..905277fadb 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -53,7 +53,8 @@ static void end_none(struct irq_desc *irq)
gic_hw_ops->gic_host_irq_type->end(irq);
}
-hw_irq_controller no_irq_type = {
+hw_irq_controller no_irq_type =
+{
.typename = "none",
.startup = irq_startup_none,
.shutdown = irq_shutdown_none,
@@ -289,7 +290,7 @@ void release_irq(unsigned int irq, const void *dev_id)
desc = irq_to_desc(irq);
- spin_lock_irqsave(&desc->lock,flags);
+ spin_lock_irqsave(&desc->lock, flags);
action_ptr = &desc->action;
for ( ;; )
@@ -318,10 +319,13 @@ void release_irq(unsigned int irq, const void *dev_id)
clear_bit(_IRQ_GUEST, &desc->status);
}
- spin_unlock_irqrestore(&desc->lock,flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
/* Wait to make sure it's not being used on another CPU */
- do { smp_mb(); } while ( test_bit(_IRQ_INPROGRESS, &desc->status) );
+ do
+ {
+ smp_mb();
+ } while ( test_bit(_IRQ_INPROGRESS, &desc->status) );
if ( action->free_on_release )
xfree(action);
@@ -338,7 +342,8 @@ static int __setup_irq(struct irq_desc *desc, unsigned int irqflags,
* - if the IRQ is marked as shared
* - dev_id is not NULL when IRQF_SHARED is set
*/
- if ( desc->action != NULL && (!test_bit(_IRQF_SHARED, &desc->status) || !shared) )
+ if ( desc->action != NULL && (!test_bit(_IRQF_SHARED, &desc->status)
+ || !shared) )
return -EINVAL;
if ( shared && new->dev_id == NULL )
return -EINVAL;
@@ -424,7 +429,7 @@ bool irq_type_set_by_domain(const struct domain *d)
* For now only SPIs are assignable to the guest.
*/
int route_irq_to_guest(struct domain *d, unsigned int virq,
- unsigned int irq, const char * devname)
+ unsigned int irq, const char *devname)
{
struct irqaction *action;
struct irq_guest *info;
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 389bef2afa..57cf92121c 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -31,7 +31,8 @@
#define ZIMAGE64_MAGIC_V0 0x14000008
#define ZIMAGE64_MAGIC_V1 0x644d5241 /* "ARM\x64" */
-struct minimal_dtb_header {
+struct minimal_dtb_header
+{
uint32_t magic;
uint32_t total_size;
/* There are other fields but we don't use them yet. */
@@ -49,7 +50,8 @@ void __init copy_from_paddr(void *dst, paddr_t paddr, unsigned long len)
{
void *src = (void *)FIXMAP_ADDR(FIXMAP_MISC);
- while (len) {
+ while (len)
+ {
unsigned long l, s;
s = paddr & (PAGE_SIZE-1);
@@ -191,7 +193,8 @@ static void __init kernel_zimage_load(struct kernel_info *info)
static int __init kernel_uimage_probe(struct kernel_info *info,
paddr_t addr, paddr_t size)
{
- struct {
+ struct
+ {
__be32 magic; /* Image Header Magic Number */
__be32 hcrc; /* Image Header CRC Checksum */
__be32 time; /* Image Creation Timestamp */
@@ -285,7 +288,8 @@ static __init int kernel_decompress(struct bootmodule *mod)
return -ENOMEM;
}
mfn = page_to_mfn(pages);
- output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
+ output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR,
+ VMAP_DEFAULT);
rc = perform_gunzip(output, input, size);
clean_dcache_va_range(output, output_size);
@@ -320,7 +324,8 @@ static int __init kernel_zimage64_probe(struct kernel_info *info,
paddr_t addr, paddr_t size)
{
/* linux/Documentation/arm64/booting.txt */
- struct {
+ struct
+ {
uint32_t magic0;
uint32_t res0;
uint64_t text_offset; /* Image load offset */
@@ -397,7 +402,8 @@ static int __init kernel_zimage32_probe(struct kernel_info *info,
if ( addr + end - start + sizeof(dtb_hdr) <= size )
{
copy_from_paddr(&dtb_hdr, addr + end - start, sizeof(dtb_hdr));
- if (be32_to_cpu(dtb_hdr.magic) == DTB_MAGIC) {
+ if (be32_to_cpu(dtb_hdr.magic) == DTB_MAGIC)
+ {
end += be32_to_cpu(dtb_hdr.total_size);
if ( end > addr + size )
@@ -456,7 +462,7 @@ int __init kernel_probe(struct kernel_info *info,
val = dt_get_property(node, "reg", &len);
dt_get_range(&val, node, &kernel_addr, &size);
mod = boot_module_find_by_addr_and_kind(
- BOOTMOD_KERNEL, kernel_addr);
+ BOOTMOD_KERNEL, kernel_addr);
info->kernel_bootmodule = mod;
}
else if ( dt_device_is_compatible(node, "multiboot,ramdisk") )
@@ -467,7 +473,7 @@ int __init kernel_probe(struct kernel_info *info,
val = dt_get_property(node, "reg", &len);
dt_get_range(&val, node, &initrd_addr, &size);
info->initrd_bootmodule = boot_module_find_by_addr_and_kind(
- BOOTMOD_RAMDISK, initrd_addr);
+ BOOTMOD_RAMDISK, initrd_addr);
}
else
continue;
diff --git a/xen/arch/arm/livepatch.c b/xen/arch/arm/livepatch.c
index 279d52cc6c..3ae6351b42 100644
--- a/xen/arch/arm/livepatch.c
+++ b/xen/arch/arm/livepatch.c
@@ -64,7 +64,7 @@ int arch_livepatch_verify_func(const struct livepatch_func *func)
{
/* If NOPing only do up to maximum amount we can put in the ->opaque. */
if ( !func->new_addr && (func->new_size > sizeof(func->opaque) ||
- func->new_size % ARCH_PATCH_INSN_SIZE) )
+ func->new_size % ARCH_PATCH_INSN_SIZE) )
return -EOPNOTSUPP;
if ( func->old_size < ARCH_PATCH_INSN_SIZE )
diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index 3e3620294c..e4bf135ae3 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -31,18 +31,19 @@ static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
void *i;
unsigned int index;
- static const xenmem_access_t memaccess[] = {
+ static const xenmem_access_t memaccess[] =
+ {
#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
- ACCESS(n),
- ACCESS(r),
- ACCESS(w),
- ACCESS(rw),
- ACCESS(x),
- ACCESS(rx),
- ACCESS(wx),
- ACCESS(rwx),
- ACCESS(rx2rw),
- ACCESS(n2rwx),
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+ ACCESS(rx2rw),
+ ACCESS(n2rwx),
#undef ACCESS
};
@@ -98,7 +99,7 @@ static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
* Only in these cases we do a software-based type check and fetch the page if
* we indeed found a conflicting mem_access setting.
*/
-struct page_info*
+struct page_info *
p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
const struct vcpu *v)
{
@@ -296,8 +297,8 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
if ( p2m->access_required )
{
gdprintk(XENLOG_INFO, "Memory access permissions failure, "
- "no vm_event listener VCPU %d, dom %d\n",
- v->vcpu_id, v->domain->domain_id);
+ "no vm_event listener VCPU %d, dom %d\n",
+ v->vcpu_id, v->domain->domain_id);
domain_crash(v->domain);
}
else
@@ -360,7 +361,8 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
unsigned int order;
long rc = 0;
- static const p2m_access_t memaccess[] = {
+ static const p2m_access_t memaccess[] =
+ {
#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
ACCESS(n),
ACCESS(r),
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 44258ad89c..7b9a8fcdb6 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -176,7 +176,8 @@ unsigned long total_pages;
extern char __init_begin[], __init_end[];
/* Checking VA memory layout alignment. */
-static inline void check_memory_layout_alignment_constraints(void) {
+static inline void check_memory_layout_alignment_constraints(void)
+{
/* 2MB aligned regions */
BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK);
BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
@@ -204,7 +205,8 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr,
{
static const char *level_strs[4] = { "0TH", "1ST", "2ND", "3RD" };
const mfn_t root_mfn = maddr_to_mfn(ttbr);
- const unsigned int offsets[4] = {
+ const unsigned int offsets[4] =
+ {
zeroeth_table_offset(addr),
first_table_offset(addr),
second_table_offset(addr),
@@ -283,8 +285,10 @@ void dump_hyp_walk(vaddr_t addr)
*/
static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr)
{
- lpae_t e = (lpae_t) {
- .pt = {
+ lpae_t e = (lpae_t)
+ {
+ .pt =
+ {
.valid = 1, /* Mappings are present */
.table = 0, /* Set to 1 for links and 4k maps */
.ai = attr,
@@ -296,7 +300,8 @@ static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr)
.contig = 0, /* Assume non-contiguous */
.xn = 1, /* No need to execute outside .text */
.avail = 0, /* Reference count for domheap mapping */
- }};
+ }
+ };
/*
* For EL2 stage-1 page table, up (aka AP[1]) is RES1 as the translation
* regime applies to only one exception level (see D4.4.4 and G4.6.1
@@ -536,7 +541,7 @@ static inline lpae_t pte_of_xenaddr(vaddr_t va)
return mfn_to_xen_entry(maddr_to_mfn(ma), MT_NORMAL);
}
-void * __init early_fdt_map(paddr_t fdt_paddr)
+void *__init early_fdt_map(paddr_t fdt_paddr)
{
/* We are using 2MB superpage for mapping the FDT */
paddr_t base_paddr = fdt_paddr & SECOND_MASK;
@@ -797,7 +802,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
{
xenheap_mfn_start = _mfn(base_mfn);
xenheap_virt_start = DIRECTMAP_VIRT_START +
- (base_mfn - mfn) * PAGE_SIZE;
+ (base_mfn - mfn) * PAGE_SIZE;
}
if ( base_mfn < mfn_x(xenheap_mfn_start) )
@@ -822,7 +827,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
/* mfn_to_virt is not valid on the 1st 1st mfn, since it
* is not within the xenheap. */
first = slot == xenheap_first_first_slot ?
- xenheap_first_first : mfn_to_virt(lpae_get_mfn(*p));
+ xenheap_first_first : mfn_to_virt(lpae_get_mfn(*p));
}
else if ( xenheap_first_first_slot == -1)
{
@@ -901,7 +906,8 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
memset(&frame_table[nr_pdxs], -1,
frametable_size - (nr_pdxs * sizeof(struct page_info)));
- frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info));
+ frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(
+ struct page_info));
}
void *__init arch_vmap_virt_end(void)
@@ -1087,7 +1093,8 @@ static int xen_pt_update_entry(mfn_t root, unsigned long virt,
DECLARE_OFFSETS(offsets, (paddr_t)virt);
/* _PAGE_POPULATE and _PAGE_PRESENT should never be set together. */
- ASSERT((flags & (_PAGE_POPULATE|_PAGE_PRESENT)) != (_PAGE_POPULATE|_PAGE_PRESENT));
+ ASSERT((flags & (_PAGE_POPULATE|_PAGE_PRESENT)) !=
+ (_PAGE_POPULATE|_PAGE_PRESENT));
table = xen_map_table(root);
for ( level = HYP_PT_ROOT_LEVEL; level < target; level++ )
@@ -1471,7 +1478,8 @@ struct domain *page_get_owner_and_reference(struct page_info *page)
unsigned long x, y = page->count_info;
struct domain *owner;
- do {
+ do
+ {
x = y;
/*
* Count == 0: Page is not allocated, so we cannot take a reference.
@@ -1479,8 +1487,7 @@ struct domain *page_get_owner_and_reference(struct page_info *page)
*/
if ( unlikely(((x + 1) & PGC_count_mask) <= 1) )
return NULL;
- }
- while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
+ } while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
owner = page_get_owner(page);
ASSERT(owner);
@@ -1492,17 +1499,15 @@ void put_page(struct page_info *page)
{
unsigned long nx, x, y = page->count_info;
- do {
+ do
+ {
ASSERT((y & PGC_count_mask) != 0);
x = y;
nx = x - 1;
- }
- while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
+ } while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
if ( unlikely((nx & PGC_count_mask) == 0) )
- {
free_domheap_page(page);
- }
}
int get_page(struct page_info *page, struct domain *domain)
diff --git a/xen/arch/arm/monitor.c b/xen/arch/arm/monitor.c
index 8c4a396e3c..75a76fc00a 100644
--- a/xen/arch/arm/monitor.c
+++ b/xen/arch/arm/monitor.c
@@ -59,7 +59,8 @@ int arch_monitor_domctl_event(struct domain *d,
int monitor_smc(void)
{
- vm_event_request_t req = {
+ vm_event_request_t req =
+ {
.reason = VM_EVENT_REASON_PRIVILEGED_CALL
};
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index e28ea1c85a..c0ca9abbb2 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -38,9 +38,9 @@ unsigned int __read_mostly p2m_ipa_bits;
/* Helpers to lookup the properties of each level */
static const paddr_t level_masks[] =
- { ZEROETH_MASK, FIRST_MASK, SECOND_MASK, THIRD_MASK };
+{ ZEROETH_MASK, FIRST_MASK, SECOND_MASK, THIRD_MASK };
static const uint8_t level_orders[] =
- { ZEROETH_ORDER, FIRST_ORDER, SECOND_ORDER, THIRD_ORDER };
+{ ZEROETH_ORDER, FIRST_ORDER, SECOND_ORDER, THIRD_ORDER };
static mfn_t __read_mostly empty_root_mfn;
@@ -576,7 +576,8 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a)
* sh, xn and write bit will be defined in the following switches
* based on mattr and t.
*/
- lpae_t e = (lpae_t) {
+ lpae_t e = (lpae_t)
+ {
.p2m.af = 1,
.p2m.read = 1,
.p2m.table = 1,
@@ -1118,7 +1119,7 @@ int p2m_set_entry(struct p2m_domain *p2m,
sgfn = gfn_add(sgfn, (1 << order));
if ( !mfn_eq(smfn, INVALID_MFN) )
- smfn = mfn_add(smfn, (1 << order));
+ smfn = mfn_add(smfn, (1 << order));
nr -= (1 << order);
}
@@ -1532,7 +1533,7 @@ int p2m_init(struct domain *d)
* reached the memory
*/
p2m->clean_pte = iommu_enabled &&
- !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK);
+ !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK);
rc = p2m_alloc_table(d);
@@ -1545,7 +1546,7 @@ int p2m_init(struct domain *d)
BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0])* 8)) < INVALID_VCPU_ID);
for_each_possible_cpu(cpu)
- p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID;
+ p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID;
/*
* Besides getting a domain when we only have the p2m in hand,
@@ -1606,7 +1607,9 @@ int relinquish_p2m_mapping(struct domain *d)
p2m_invalid, p2m_access_rwx);
if ( unlikely(rc) )
{
- printk(XENLOG_G_ERR "Unable to remove mapping gfn=%#"PRI_gfn" order=%u from the p2m of domain %d\n", gfn_x(start), order, d->domain_id);
+ printk(XENLOG_G_ERR
+ "Unable to remove mapping gfn=%#"PRI_gfn" order=%u from the p2m of domain %d\n",
+ gfn_x(start), order, d->domain_id);
break;
}
}
@@ -1650,15 +1653,15 @@ int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end)
while ( gfn_x(start) < gfn_x(end) )
{
- /*
- * Cleaning the cache for the P2M may take a long time. So we
- * need to be able to preempt. We will arbitrarily preempt every
- * time count reach 512 or above.
- *
- * The count will be incremented by:
- * - 1 on region skipped
- * - 10 for each page requiring a flush
- */
+ /*
+ * Cleaning the cache for the P2M may take a long time. So we
+ * need to be able to preempt. We will arbitrarily preempt every
+ * time count reach 512 or above.
+ *
+ * The count will be incremented by:
+ * - 1 on region skipped
+ * - 10 for each page requiring a flush
+ */
if ( count >= 512 )
{
if ( softirq_pending(smp_processor_id()) )
@@ -1963,12 +1966,14 @@ void __init setup_virt_paging(void)
val |= VTCR_T0SZ(0x18); /* 40 bit IPA */
val |= VTCR_SL0(0x1); /* P2M starts at first level */
#else /* CONFIG_ARM_64 */
- const struct {
+ const struct
+ {
unsigned int pabits; /* Physical Address Size */
unsigned int t0sz; /* Desired T0SZ, minimum in comment */
unsigned int root_order; /* Page order of the root of the p2m */
unsigned int sl0; /* Desired SL0, maximum in comment */
- } pa_range_info[] = {
+ } pa_range_info[] =
+ {
/* T0SZ minimum and SL0 maximum from ARM DDI 0487A.b Table D4-5 */
/* PA size, t0sz(min), root-order, sl0(max) */
[0] = { 32, 32/*32*/, 0, 1 },
@@ -2070,7 +2075,8 @@ static int cpu_virt_paging_callback(struct notifier_block *nfb,
return NOTIFY_DONE;
}
-static struct notifier_block cpu_virt_paging_nfb = {
+static struct notifier_block cpu_virt_paging_nfb =
+{
.notifier_call = cpu_virt_paging_callback,
};
diff --git a/xen/arch/arm/percpu.c b/xen/arch/arm/percpu.c
index 25442c48fe..c3f68ac9d1 100644
--- a/xen/arch/arm/percpu.c
+++ b/xen/arch/arm/percpu.c
@@ -27,7 +27,8 @@ static int init_percpu_area(unsigned int cpu)
return 0;
}
-struct free_info {
+struct free_info
+{
unsigned int cpu;
struct rcu_head rcu;
};
@@ -71,7 +72,8 @@ static int cpu_percpu_callback(
return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
}
-static struct notifier_block cpu_percpu_nfb = {
+static struct notifier_block cpu_percpu_nfb =
+{
.notifier_call = cpu_percpu_callback,
.priority = 100 /* highest priority */
};
diff --git a/xen/arch/arm/platform_hypercall.c b/xen/arch/arm/platform_hypercall.c
index 5aab856ce7..f1f862e735 100644
--- a/xen/arch/arm/platform_hypercall.c
+++ b/xen/arch/arm/platform_hypercall.c
@@ -45,7 +45,7 @@ long do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op)
while ( !spin_trylock(&xenpf_lock) )
if ( hypercall_preempt_check() )
return hypercall_create_continuation(
- __HYPERVISOR_platform_op, "h", u_xenpf_op);
+ __HYPERVISOR_platform_op, "h", u_xenpf_op);
switch ( op->cmd )
{
diff --git a/xen/arch/arm/platforms/brcm.c b/xen/arch/arm/platforms/brcm.c
index d481b2c60f..6c0aebd15c 100644
--- a/xen/arch/arm/platforms/brcm.c
+++ b/xen/arch/arm/platforms/brcm.c
@@ -23,7 +23,8 @@
#include <asm/io.h>
#include <xen/delay.h>
-struct brcm_plat_regs {
+struct brcm_plat_regs
+{
uint32_t hif_mask;
uint32_t hif_cpu_reset_config;
uint32_t hif_boot_continuation;
@@ -105,13 +106,13 @@ static __init int brcm_populate_plat_regs(void)
regs.hif_boot_continuation = reg_base;
dprintk(XENLOG_INFO, "hif_cpu_reset_config : %08xh\n",
- regs.hif_cpu_reset_config);
+ regs.hif_cpu_reset_config);
dprintk(XENLOG_INFO, "cpu0_pwr_zone_ctrl : %08xh\n",
- regs.cpu0_pwr_zone_ctrl);
+ regs.cpu0_pwr_zone_ctrl);
dprintk(XENLOG_INFO, "hif_boot_continuation : %08xh\n",
- regs.hif_boot_continuation);
+ regs.hif_boot_continuation);
dprintk(XENLOG_INFO, "scratch_reg : %08xh\n",
- regs.scratch_reg);
+ regs.scratch_reg);
return 0;
}
@@ -133,7 +134,7 @@ static int brcm_cpu_power_on(int cpu)
if ( !pwr_ctl )
{
dprintk(XENLOG_ERR, "%s: Unable to map \"cpu0_pwr_zone_ctrl\"\n",
- __func__);
+ __func__);
return -EFAULT;
}
@@ -228,7 +229,7 @@ static int brcm_cpu_up(int cpu)
if ( rc )
return rc;
- return brcm_cpu_release(cpu);
+ return brcm_cpu_release(cpu);
}
static int __init brcm_smp_init(void)
@@ -278,10 +279,10 @@ static const char *const brcm_dt_compat[] __initconst =
};
PLATFORM_START(brcm, "Broadcom B15")
- .compatible = brcm_dt_compat,
- .init = brcm_init,
- .smp_init = brcm_smp_init,
- .cpu_up = brcm_cpu_up,
+.compatible = brcm_dt_compat,
+.init = brcm_init,
+.smp_init = brcm_smp_init,
+.cpu_up = brcm_cpu_up,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/exynos5.c b/xen/arch/arm/platforms/exynos5.c
index 6560507092..41fd9c651e 100644
--- a/xen/arch/arm/platforms/exynos5.c
+++ b/xen/arch/arm/platforms/exynos5.c
@@ -288,35 +288,35 @@ static const struct dt_device_match exynos5_blacklist_dev[] __initconst =
{ /* sentinel */ },
};
-static const char * const exynos5250_dt_compat[] __initconst =
+static const char *const exynos5250_dt_compat[] __initconst =
{
"samsung,exynos5250",
NULL
};
-static const char * const exynos5_dt_compat[] __initconst =
+static const char *const exynos5_dt_compat[] __initconst =
{
"samsung,exynos5410",
NULL
};
PLATFORM_START(exynos5250, "SAMSUNG EXYNOS5250")
- .compatible = exynos5250_dt_compat,
- .init_time = exynos5_init_time,
- .specific_mapping = exynos5250_specific_mapping,
- .smp_init = exynos5_smp_init,
- .cpu_up = cpu_up_send_sgi,
- .reset = exynos5_reset,
- .blacklist_dev = exynos5_blacklist_dev,
+.compatible = exynos5250_dt_compat,
+.init_time = exynos5_init_time,
+.specific_mapping = exynos5250_specific_mapping,
+.smp_init = exynos5_smp_init,
+.cpu_up = cpu_up_send_sgi,
+.reset = exynos5_reset,
+.blacklist_dev = exynos5_blacklist_dev,
PLATFORM_END
PLATFORM_START(exynos5, "SAMSUNG EXYNOS5")
- .compatible = exynos5_dt_compat,
- .init_time = exynos5_init_time,
- .smp_init = exynos5_smp_init,
- .cpu_up = exynos5_cpu_up,
- .reset = exynos5_reset,
- .blacklist_dev = exynos5_blacklist_dev,
+.compatible = exynos5_dt_compat,
+.init_time = exynos5_init_time,
+.smp_init = exynos5_smp_init,
+.cpu_up = exynos5_cpu_up,
+.reset = exynos5_reset,
+.blacklist_dev = exynos5_blacklist_dev,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/midway.c b/xen/arch/arm/platforms/midway.c
index b221279ec7..0e308222d1 100644
--- a/xen/arch/arm/platforms/midway.c
+++ b/xen/arch/arm/platforms/midway.c
@@ -42,15 +42,15 @@ static void midway_reset(void)
iounmap(pmu);
}
-static const char * const midway_dt_compat[] __initconst =
+static const char *const midway_dt_compat[] __initconst =
{
"calxeda,ecx-2000",
NULL
};
PLATFORM_START(midway, "CALXEDA MIDWAY")
- .compatible = midway_dt_compat,
- .reset = midway_reset,
+.compatible = midway_dt_compat,
+.reset = midway_reset,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/omap5.c b/xen/arch/arm/platforms/omap5.c
index aee24e4d28..a2e2ac2ea7 100644
--- a/xen/arch/arm/platforms/omap5.c
+++ b/xen/arch/arm/platforms/omap5.c
@@ -23,7 +23,8 @@
#include <xen/vmap.h>
#include <asm/io.h>
-static uint16_t num_den[8][2] = {
+static uint16_t num_den[8][2] =
+{
{ 0, 0 }, /* not used */
{ 26 * 64, 26 * 125 }, /* 12.0 Mhz */
{ 2 * 768, 2 * 1625 }, /* 13.0 Mhz */
@@ -58,7 +59,7 @@ static int omap5_init_time(void)
}
sys_clksel = readl(ckgen_prm_base + OMAP5_CM_CLKSEL_SYS) &
- ~SYS_CLKSEL_MASK;
+ ~SYS_CLKSEL_MASK;
iounmap(ckgen_prm_base);
@@ -139,31 +140,31 @@ static int __init omap5_smp_init(void)
return 0;
}
-static const char * const omap5_dt_compat[] __initconst =
+static const char *const omap5_dt_compat[] __initconst =
{
"ti,omap5",
NULL
};
-static const char * const dra7_dt_compat[] __initconst =
+static const char *const dra7_dt_compat[] __initconst =
{
"ti,dra7",
NULL
};
PLATFORM_START(omap5, "TI OMAP5")
- .compatible = omap5_dt_compat,
- .init_time = omap5_init_time,
- .specific_mapping = omap5_specific_mapping,
- .smp_init = omap5_smp_init,
- .cpu_up = cpu_up_send_sgi,
+.compatible = omap5_dt_compat,
+.init_time = omap5_init_time,
+.specific_mapping = omap5_specific_mapping,
+.smp_init = omap5_smp_init,
+.cpu_up = cpu_up_send_sgi,
PLATFORM_END
PLATFORM_START(dra7, "TI DRA7")
- .compatible = dra7_dt_compat,
- .init_time = omap5_init_time,
- .cpu_up = cpu_up_send_sgi,
- .smp_init = omap5_smp_init,
+.compatible = dra7_dt_compat,
+.init_time = omap5_init_time,
+.cpu_up = cpu_up_send_sgi,
+.smp_init = omap5_smp_init,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/rcar2.c b/xen/arch/arm/platforms/rcar2.c
index df0ac84709..0ee6c4564e 100644
--- a/xen/arch/arm/platforms/rcar2.c
+++ b/xen/arch/arm/platforms/rcar2.c
@@ -31,7 +31,7 @@ static int __init rcar2_smp_init(void)
/* map ICRAM */
pram = ioremap_nocache(RCAR2_RAM_ADDR, RCAR2_RAM_SIZE);
- if( !pram )
+ if ( !pram )
{
dprintk( XENLOG_ERR, "Unable to map RCAR2 ICRAM\n");
return -ENOMEM;
@@ -53,9 +53,9 @@ static const char *const rcar2_dt_compat[] __initconst =
};
PLATFORM_START(rcar2, "Renesas R-Car Gen2")
- .compatible = rcar2_dt_compat,
- .cpu_up = cpu_up_send_sgi,
- .smp_init = rcar2_smp_init,
+.compatible = rcar2_dt_compat,
+.cpu_up = cpu_up_send_sgi,
+.smp_init = rcar2_smp_init,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/seattle.c b/xen/arch/arm/platforms/seattle.c
index 64cc1868c2..efa0867a10 100644
--- a/xen/arch/arm/platforms/seattle.c
+++ b/xen/arch/arm/platforms/seattle.c
@@ -20,7 +20,7 @@
#include <asm/platform.h>
#include <asm/psci.h>
-static const char * const seattle_dt_compat[] __initconst =
+static const char *const seattle_dt_compat[] __initconst =
{
"amd,seattle",
NULL
@@ -42,9 +42,9 @@ static void seattle_system_off(void)
}
PLATFORM_START(seattle, "SEATTLE")
- .compatible = seattle_dt_compat,
- .reset = seattle_system_reset,
- .poweroff = seattle_system_off,
+.compatible = seattle_dt_compat,
+.reset = seattle_system_reset,
+.poweroff = seattle_system_off,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/sunxi.c b/xen/arch/arm/platforms/sunxi.c
index 55705b15b2..d6963647c3 100644
--- a/xen/arch/arm/platforms/sunxi.c
+++ b/xen/arch/arm/platforms/sunxi.c
@@ -40,7 +40,7 @@ static void __iomem *sunxi_map_watchdog(bool *new_wdt)
node = dt_find_compatible_node(NULL, NULL, "allwinner,sun6i-a31-wdt");
if ( node )
- _new_wdt = true;
+ _new_wdt = true;
else
node = dt_find_compatible_node(NULL, NULL, "allwinner,sun4i-a10-wdt");
@@ -103,7 +103,7 @@ static void sunxi_reset(void)
wfi();
}
-static const char * const sunxi_v7_dt_compat[] __initconst =
+static const char *const sunxi_v7_dt_compat[] __initconst =
{
"allwinner,sun6i-a31",
"allwinner,sun6i-a31s",
@@ -115,7 +115,7 @@ static const char * const sunxi_v7_dt_compat[] __initconst =
NULL
};
-static const char * const sunxi_v8_dt_compat[] __initconst =
+static const char *const sunxi_v8_dt_compat[] __initconst =
{
"allwinner,sun50i-a64",
"allwinner,sun50i-h5",
@@ -133,14 +133,14 @@ static const struct dt_device_match sunxi_blacklist_dev[] __initconst =
};
PLATFORM_START(sunxi_v7, "Allwinner ARMv7")
- .compatible = sunxi_v7_dt_compat,
- .blacklist_dev = sunxi_blacklist_dev,
- .reset = sunxi_reset,
+.compatible = sunxi_v7_dt_compat,
+.blacklist_dev = sunxi_blacklist_dev,
+.reset = sunxi_reset,
PLATFORM_END
PLATFORM_START(sunxi_v8, "Allwinner ARMv8")
- .compatible = sunxi_v8_dt_compat,
- .blacklist_dev = sunxi_blacklist_dev,
+.compatible = sunxi_v8_dt_compat,
+.blacklist_dev = sunxi_blacklist_dev,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/thunderx.c b/xen/arch/arm/platforms/thunderx.c
index 9b32a29c6b..08e6d0fffc 100644
--- a/xen/arch/arm/platforms/thunderx.c
+++ b/xen/arch/arm/platforms/thunderx.c
@@ -20,7 +20,7 @@
#include <asm/platform.h>
-static const char * const thunderx_dt_compat[] __initconst =
+static const char *const thunderx_dt_compat[] __initconst =
{
"cavium,thunder-88xx",
NULL
@@ -34,6 +34,6 @@ static const struct dt_device_match thunderx_blacklist_dev[] __initconst =
};
PLATFORM_START(thunderx, "THUNDERX")
- .compatible = thunderx_dt_compat,
- .blacklist_dev = thunderx_blacklist_dev,
+.compatible = thunderx_dt_compat,
+.blacklist_dev = thunderx_blacklist_dev,
PLATFORM_END
diff --git a/xen/arch/arm/platforms/vexpress.c b/xen/arch/arm/platforms/vexpress.c
index b6193f75b5..78a34d7301 100644
--- a/xen/arch/arm/platforms/vexpress.c
+++ b/xen/arch/arm/platforms/vexpress.c
@@ -39,13 +39,14 @@ static inline int vexpress_ctrl_start(uint32_t *syscfg, int write,
/* set control register */
syscfg[V2M_SYS_CFGCTRL/4] = V2M_SYS_CFG_START |
- (write ? V2M_SYS_CFG_WRITE : 0) |
- (dcc << DCC_SHIFT) | (function << FUNCTION_SHIFT) |
- (site << SITE_SHIFT) | (position << POSITION_SHIFT) |
- (device << DEVICE_SHIFT);
+ (write ? V2M_SYS_CFG_WRITE : 0) |
+ (dcc << DCC_SHIFT) | (function << FUNCTION_SHIFT) |
+ (site << SITE_SHIFT) | (position << POSITION_SHIFT) |
+ (device << DEVICE_SHIFT);
/* wait for complete flag to be set */
- do {
+ do
+ {
stat = syscfg[V2M_SYS_CFGSTAT/4];
dsb(sy);
} while ( !(stat & V2M_SYS_CFG_COMPLETE) );
@@ -78,10 +79,12 @@ static void vexpress_reset(void)
/* switch to slow mode */
writel(0x3, sp810);
- dsb(sy); isb();
+ dsb(sy);
+ isb();
/* writing any value to SCSYSSTAT reg will reset the system */
writel(0x1, sp810 + 4);
- dsb(sy); isb();
+ dsb(sy);
+ isb();
iounmap(sp810);
}
@@ -111,7 +114,7 @@ static int __init vexpress_smp_init(void)
#endif
-static const char * const vexpress_dt_compat[] __initconst =
+static const char *const vexpress_dt_compat[] __initconst =
{
"arm,vexpress",
NULL
@@ -134,13 +137,13 @@ static const struct dt_device_match vexpress_blacklist_dev[] __initconst =
};
PLATFORM_START(vexpress, "VERSATILE EXPRESS")
- .compatible = vexpress_dt_compat,
+.compatible = vexpress_dt_compat,
#ifdef CONFIG_ARM_32
- .smp_init = vexpress_smp_init,
- .cpu_up = cpu_up_send_sgi,
+.smp_init = vexpress_smp_init,
+.cpu_up = cpu_up_send_sgi,
#endif
- .reset = vexpress_reset,
- .blacklist_dev = vexpress_blacklist_dev,
+.reset = vexpress_reset,
+.blacklist_dev = vexpress_blacklist_dev,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/xgene-storm.c b/xen/arch/arm/platforms/xgene-storm.c
index fced4d7c2c..30ad155c6c 100644
--- a/xen/arch/arm/platforms/xgene-storm.c
+++ b/xen/arch/arm/platforms/xgene-storm.c
@@ -109,17 +109,17 @@ static int xgene_storm_init(void)
return 0;
}
-static const char * const xgene_storm_dt_compat[] __initconst =
+static const char *const xgene_storm_dt_compat[] __initconst =
{
"apm,xgene-storm",
NULL
};
PLATFORM_START(xgene_storm, "APM X-GENE STORM")
- .compatible = xgene_storm_dt_compat,
- .init = xgene_storm_init,
- .reset = xgene_storm_reset,
- .quirks = xgene_storm_quirks,
+.compatible = xgene_storm_dt_compat,
+.init = xgene_storm_init,
+.reset = xgene_storm_reset,
+.quirks = xgene_storm_quirks,
PLATFORM_END
/*
diff --git a/xen/arch/arm/platforms/xilinx-zynqmp.c b/xen/arch/arm/platforms/xilinx-zynqmp.c
index 3060d79b34..d0ce9bd583 100644
--- a/xen/arch/arm/platforms/xilinx-zynqmp.c
+++ b/xen/arch/arm/platforms/xilinx-zynqmp.c
@@ -21,7 +21,7 @@
#include <asm/platforms/xilinx-zynqmp-eemi.h>
#include <asm/smccc.h>
-static const char * const zynqmp_dt_compat[] __initconst =
+static const char *const zynqmp_dt_compat[] __initconst =
{
"xlnx,zynqmp",
NULL
@@ -44,8 +44,8 @@ static bool zynqmp_smc(struct cpu_user_regs *regs)
}
PLATFORM_START(xilinx_zynqmp, "Xilinx ZynqMP")
- .compatible = zynqmp_dt_compat,
- .smc = zynqmp_smc,
+.compatible = zynqmp_dt_compat,
+.smc = zynqmp_smc,
PLATFORM_END
/*
diff --git a/xen/arch/arm/psci.c b/xen/arch/arm/psci.c
index 0c90c2305c..67e1a9a99a 100644
--- a/xen/arch/arm/psci.c
+++ b/xen/arch/arm/psci.c
@@ -193,7 +193,8 @@ static int __init psci_init_0_2(void)
}
else
{
- if ( acpi_psci_hvc_present() ) {
+ if ( acpi_psci_hvc_present() )
+ {
printk("PSCI conduit must be SMC, but is HVC\n");
return -EINVAL;
}
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index d5d188a105..9aa865fc80 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -86,7 +86,8 @@ static void __init init_idle_domain(void)
/* TODO: setup_idle_pagetable(); */
}
-static const char * __initdata processor_implementers[] = {
+static const char *__initdata processor_implementers[] =
+{
['A'] = "ARM Limited",
['B'] = "Broadcom Corporation",
['C'] = "Cavium Inc.",
@@ -176,9 +177,7 @@ static void __init processor_id(void)
boot_cpu_data.isa32.bits[4], boot_cpu_data.isa32.bits[5]);
}
else
- {
printk("32-bit Execution: Unsupported\n");
- }
processor_setup();
}
@@ -248,7 +247,7 @@ struct bootmodule __init *add_boot_module(bootmodule_kind kind,
* XSM, DTB) or Dom0 modules. This is not suitable for looking up guest
* modules.
*/
-struct bootmodule * __init boot_module_find_by_kind(bootmodule_kind kind)
+struct bootmodule *__init boot_module_find_by_kind(bootmodule_kind kind)
{
struct bootmodules *mods = &bootinfo.modules;
struct bootmodule *mod;
@@ -292,7 +291,7 @@ void __init add_boot_cmdline(const char *name, const char *cmdline,
* XSM, DTB) or Dom0 modules. This is not suitable for looking up guest
* modules.
*/
-struct bootcmdline * __init boot_cmdline_find_by_kind(bootmodule_kind kind)
+struct bootcmdline *__init boot_cmdline_find_by_kind(bootmodule_kind kind)
{
struct bootcmdlines *cmds = &bootinfo.cmdlines;
struct bootcmdline *cmd;
@@ -307,7 +306,7 @@ struct bootcmdline * __init boot_cmdline_find_by_kind(bootmodule_kind kind)
return NULL;
}
-struct bootcmdline * __init boot_cmdline_find_by_name(const char *name)
+struct bootcmdline *__init boot_cmdline_find_by_name(const char *name)
{
struct bootcmdlines *mods = &bootinfo.cmdlines;
struct bootcmdline *mod;
@@ -322,8 +321,9 @@ struct bootcmdline * __init boot_cmdline_find_by_name(const char *name)
return NULL;
}
-struct bootmodule * __init boot_module_find_by_addr_and_kind(bootmodule_kind kind,
- paddr_t start)
+struct bootmodule *__init boot_module_find_by_addr_and_kind(
+ bootmodule_kind kind,
+ paddr_t start)
{
struct bootmodules *mods = &bootinfo.modules;
struct bootmodule *mod;
@@ -338,17 +338,24 @@ struct bootmodule * __init boot_module_find_by_addr_and_kind(bootmodule_kind kin
return NULL;
}
-const char * __init boot_module_kind_as_string(bootmodule_kind kind)
+const char *__init boot_module_kind_as_string(bootmodule_kind kind)
{
switch ( kind )
{
- case BOOTMOD_XEN: return "Xen";
- case BOOTMOD_FDT: return "Device Tree";
- case BOOTMOD_KERNEL: return "Kernel";
- case BOOTMOD_RAMDISK: return "Ramdisk";
- case BOOTMOD_XSM: return "XSM";
- case BOOTMOD_UNKNOWN: return "Unknown";
- default: BUG();
+ case BOOTMOD_XEN:
+ return "Xen";
+ case BOOTMOD_FDT:
+ return "Device Tree";
+ case BOOTMOD_KERNEL:
+ return "Kernel";
+ case BOOTMOD_RAMDISK:
+ return "Ramdisk";
+ case BOOTMOD_XSM:
+ return "XSM";
+ case BOOTMOD_UNKNOWN:
+ return "Unknown";
+ default:
+ BUG();
}
}
@@ -550,8 +557,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
paddr_t bank_end = bank_start + bank_size;
ram_size = ram_size + bank_size;
- ram_start = min(ram_start,bank_start);
- ram_end = max(ram_end,bank_end);
+ ram_start = min(ram_start, bank_start);
+ ram_end = max(ram_end, bank_end);
}
total_pages = ram_pages = ram_size >> PAGE_SHIFT;
@@ -630,9 +637,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
e = next_module(s, &n);
if ( e == ~(paddr_t)0 )
- {
e = n = ram_end;
- }
/*
* Module in a RAM bank other than the one which we are
@@ -685,8 +690,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
paddr_t s, e;
ram_size = ram_size + bank_size;
- ram_start = min(ram_start,bank_start);
- ram_end = max(ram_end,bank_end);
+ ram_start = min(ram_start, bank_start);
+ ram_end = max(ram_end, bank_end);
setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);
@@ -698,9 +703,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
e = next_module(s, &n);
if ( e == ~(paddr_t)0 )
- {
e = n = bank_end;
- }
if ( e > bank_end )
e = bank_end;
@@ -742,7 +745,8 @@ void __init start_xen(unsigned long boot_phys_offset,
const char *cmdline;
struct bootmodule *xen_bootmodule;
struct domain *dom0;
- struct xen_domctl_createdomain dom0_cfg = {
+ struct xen_domctl_createdomain dom0_cfg =
+ {
.flags = XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap,
.max_evtchn_port = -1,
.max_grant_frames = gnttab_dom0_frames(),
@@ -780,8 +784,8 @@ void __init start_xen(unsigned long boot_phys_offset,
/* Register Xen's load address as a boot module. */
xen_bootmodule = add_boot_module(BOOTMOD_XEN,
- (paddr_t)(uintptr_t)(_start + boot_phys_offset),
- (paddr_t)(uintptr_t)(_end - _start + 1), false);
+ (paddr_t)(uintptr_t)(_start + boot_phys_offset),
+ (paddr_t)(uintptr_t)(_end - _start + 1), false);
BUG_ON(!xen_bootmodule);
setup_mm(fdt_paddr, fdt_size);
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index 00b64c3322..d94d4c1d8c 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -50,7 +50,7 @@ nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
/* Xen stack for bringing up the first CPU. */
static unsigned char __initdata cpu0_boot_stack[STACK_SIZE]
- __attribute__((__aligned__(STACK_SIZE)));
+__attribute__((__aligned__(STACK_SIZE)));
/* Boot cpu data */
struct init_info init_data =
@@ -329,7 +329,8 @@ void start_secondary(void)
if ( dcache_line_bytes != read_dcache_line_bytes() )
{
- printk(XENLOG_ERR "CPU%u dcache line size (%zu) does not match the boot CPU (%zu)\n",
+ printk(XENLOG_ERR
+ "CPU%u dcache line size (%zu) does not match the boot CPU (%zu)\n",
smp_processor_id(), read_dcache_line_bytes(),
dcache_line_bytes);
stop_cpu();
@@ -386,7 +387,7 @@ void __cpu_disable(void)
smp_mb();
- /* Return to caller; eventually the IPI mechanism will unwind and the
+ /* Return to caller; eventually the IPI mechanism will unwind and the
* scheduler will drop to the idle loop, which will call stop_cpu(). */
}
@@ -519,7 +520,8 @@ static int cpu_smpboot_callback(struct notifier_block *nfb,
return NOTIFY_DONE;
}
-static struct notifier_block cpu_smpboot_nfb = {
+static struct notifier_block cpu_smpboot_nfb =
+{
.notifier_call = cpu_smpboot_callback,
};
diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
index ec5402e89b..1e0f1965ac 100644
--- a/xen/arch/arm/tee/optee.c
+++ b/xen/arch/arm/tee/optee.c
@@ -83,7 +83,8 @@ static unsigned int __read_mostly max_optee_threads;
* Call context. OP-TEE can issue multiple RPC returns during one call.
* We need to preserve context during them.
*/
-struct optee_std_call {
+struct optee_std_call
+{
struct list_head list;
/* Page where shadowed copy of call arguments is stored */
struct page_info *xen_arg_pg;
@@ -99,7 +100,8 @@ struct optee_std_call {
};
/* Pre-allocated SHM buffer for RPC commands */
-struct shm_rpc {
+struct shm_rpc
+{
struct list_head list;
struct page_info *guest_page;
struct page_info *xen_arg_pg;
@@ -109,7 +111,8 @@ struct shm_rpc {
};
/* Shared memory buffer for arbitrary data */
-struct optee_shm_buf {
+struct optee_shm_buf
+{
struct list_head list;
uint64_t cookie;
unsigned int page_cnt;
@@ -127,7 +130,8 @@ struct optee_shm_buf {
};
/* Domain context */
-struct optee_domain {
+struct optee_domain
+{
struct list_head call_list;
struct list_head shm_rpc_list;
struct list_head optee_shm_buf_list;
@@ -450,8 +454,8 @@ static struct shm_rpc *find_shm_rpc(struct optee_domain *ctx, uint64_t cookie)
{
if ( shm_rpc->cookie == cookie )
{
- spin_unlock(&ctx->lock);
- return shm_rpc;
+ spin_unlock(&ctx->lock);
+ return shm_rpc;
}
}
spin_unlock(&ctx->lock);
@@ -475,9 +479,8 @@ static struct optee_shm_buf *allocate_optee_shm_buf(struct optee_domain *ctx,
new = old + pages_cnt;
if ( new >= MAX_TOTAL_SMH_BUF_PG )
return ERR_PTR(-ENOMEM);
- }
- while ( unlikely(old != atomic_cmpxchg(&ctx->optee_shm_buf_pages,
- old, new)) );
+ } while ( unlikely(old != atomic_cmpxchg(&ctx->optee_shm_buf_pages,
+ old, new)) );
/*
* TODO: Guest can try to register many small buffers, thus, forcing
@@ -608,7 +611,7 @@ static int optee_relinquish_resources(struct domain *d)
* (CFG_NUM_THREADS option).
*/
list_for_each_entry_safe( call, call_tmp, &ctx->call_list, list )
- free_std_call(ctx, call);
+ free_std_call(ctx, call);
if ( hypercall_preempt_check() )
return -ERESTART;
@@ -618,7 +621,7 @@ static int optee_relinquish_resources(struct domain *d)
* check the comment above.
*/
list_for_each_entry_safe( shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list )
- free_shm_rpc(ctx, shm_rpc->cookie);
+ free_shm_rpc(ctx, shm_rpc->cookie);
if ( hypercall_preempt_check() )
return -ERESTART;
@@ -630,7 +633,7 @@ static int optee_relinquish_resources(struct domain *d)
*/
list_for_each_entry_safe( optee_shm_buf, optee_shm_buf_tmp,
&ctx->optee_shm_buf_list, list )
- free_optee_shm_buf(ctx, optee_shm_buf->cookie);
+ free_optee_shm_buf(ctx, optee_shm_buf->cookie);
if ( hypercall_preempt_check() )
return -ERESTART;
@@ -685,7 +688,8 @@ static int translate_noncontig(struct optee_domain *ctx,
*
* Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h for details.
*/
- struct {
+ struct
+ {
uint64_t pages_list[PAGELIST_ENTRIES_PER_PAGE];
uint64_t next_page_data;
} *guest_data, *xen_data;
@@ -1040,10 +1044,10 @@ static int handle_rpc_return(struct optee_domain *ctx,
shm_rpc->xen_arg = __map_domain_page(shm_rpc->xen_arg_pg);
if ( access_guest_memory_by_ipa(current->domain,
- gfn_to_gaddr(shm_rpc->gfn),
- shm_rpc->xen_arg,
- OPTEE_MSG_GET_ARG_SIZE(shm_rpc->xen_arg->num_params),
- true) )
+ gfn_to_gaddr(shm_rpc->gfn),
+ shm_rpc->xen_arg,
+ OPTEE_MSG_GET_ARG_SIZE(shm_rpc->xen_arg->num_params),
+ true) )
{
/*
* We were unable to propagate request to guest, so let's return
@@ -1187,7 +1191,7 @@ static void handle_std_call(struct optee_domain *ctx,
case OPTEE_MSG_CMD_CANCEL:
case OPTEE_MSG_CMD_REGISTER_SHM:
case OPTEE_MSG_CMD_UNREGISTER_SHM:
- if( translate_params(ctx, call) )
+ if ( translate_params(ctx, call) )
{
/*
* translate_params() sets xen_arg->ret value to non-zero.
@@ -1440,7 +1444,8 @@ static void handle_exchange_capabilities(struct cpu_user_regs *regs)
arm_smccc_smc(OPTEE_SMC_EXCHANGE_CAPABILITIES, caps, 0, 0, 0, 0, 0,
OPTEE_CLIENT_ID(current->domain), &resp);
- if ( resp.a0 != OPTEE_SMC_RETURN_OK ) {
+ if ( resp.a0 != OPTEE_SMC_RETURN_OK )
+ {
set_user_reg(regs, 0, resp.a0);
return;
}
@@ -1496,7 +1501,7 @@ static bool optee_handle_call(struct cpu_user_regs *regs)
case OPTEE_SMC_CALL_GET_OS_UUID:
arm_smccc_smc(OPTEE_SMC_CALL_GET_OS_UUID, 0, 0, 0, 0, 0, 0,
- OPTEE_CLIENT_ID(current->domain),&resp);
+ OPTEE_CLIENT_ID(current->domain), &resp);
set_user_reg(regs, 0, resp.a0);
set_user_reg(regs, 1, resp.a1);
set_user_reg(regs, 2, resp.a2);
@@ -1520,7 +1525,8 @@ static bool optee_handle_call(struct cpu_user_regs *regs)
arm_smccc_smc(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
OPTEE_CLIENT_ID(current->domain), &resp);
set_user_reg(regs, 0, resp.a0);
- if ( resp.a0 == OPTEE_SMC_RETURN_OK ) {
+ if ( resp.a0 == OPTEE_SMC_RETURN_OK )
+ {
free_shm_rpc(ctx, regpair_to_uint64(resp.a1, resp.a2));
set_user_reg(regs, 1, resp.a1);
set_user_reg(regs, 2, resp.a2);
diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c
index 739bcf186c..2b402f77e7 100644
--- a/xen/arch/arm/time.c
+++ b/xen/arch/arm/time.c
@@ -70,7 +70,7 @@ static __initdata struct dt_device_node *timer;
static u32 __init acpi_get_timer_irq_type(u32 flags)
{
return (flags & ACPI_GTDT_INTERRUPT_MODE) ? IRQ_TYPE_EDGE_BOTH
- : IRQ_TYPE_LEVEL_MASK;
+ : IRQ_TYPE_LEVEL_MASK;
}
/* Initialize per-processor generic timer */
@@ -305,7 +305,7 @@ void init_timer_interrupt(void)
request_irq(timer_irq[TIMER_HYP_PPI], 0, timer_interrupt,
"hyptimer", NULL);
request_irq(timer_irq[TIMER_VIRT_PPI], 0, vtimer_interrupt,
- "virtimer", NULL);
+ "virtimer", NULL);
request_irq(timer_irq[TIMER_PHYS_NONSECURE_PPI], 0, timer_interrupt,
"phytimer", NULL);
@@ -373,7 +373,8 @@ static int cpu_time_callback(struct notifier_block *nfb,
return NOTIFY_DONE;
}
-static struct notifier_block cpu_time_nfb = {
+static struct notifier_block cpu_time_nfb =
+{
.notifier_call = cpu_time_callback,
};
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 3103620323..06bc724287 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -54,7 +54,8 @@
* that both the kernel half of struct cpu_user_regs (which is pushed in
* entry.S) and struct cpu_info (which lives at the bottom of a Xen
* stack) must be doubleword-aligned in size. */
-static inline void check_stack_alignment_constraints(void) {
+static inline void check_stack_alignment_constraints(void)
+{
#ifdef CONFIG_ARM_64
BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0xf);
BUILD_BUG_ON((offsetof(struct cpu_user_regs, spsr_el1)) & 0xf);
@@ -77,19 +78,20 @@ static int debug_stack_lines = 40;
integer_param("debug_stack_lines", debug_stack_lines);
-static enum {
- TRAP,
- NATIVE,
+static enum
+{
+ TRAP,
+ NATIVE,
} vwfi;
static int __init parse_vwfi(const char *s)
{
- if ( !strcmp(s, "native") )
- vwfi = NATIVE;
- else
- vwfi = TRAP;
+ if ( !strcmp(s, "native") )
+ vwfi = NATIVE;
+ else
+ vwfi = TRAP;
- return 0;
+ return 0;
}
custom_param("vwfi", parse_vwfi);
@@ -100,7 +102,8 @@ register_t get_default_hcr_flags(void)
HCR_TSC|HCR_TAC|HCR_SWIO|HCR_TIDCP|HCR_FB|HCR_TSW);
}
-static enum {
+static enum
+{
SERRORS_DIVERSE,
SERRORS_FORWARD,
SERRORS_PANIC,
@@ -356,11 +359,16 @@ static const char *fsc_level_str(int level)
{
switch ( level )
{
- case -1: return "";
- case 1: return " at level 1";
- case 2: return " at level 2";
- case 3: return " at level 3";
- default: return " (level invalid)";
+ case -1:
+ return "";
+ case 1:
+ return " at level 1";
+ case 2:
+ return " at level 2";
+ case 3:
+ return " at level 3";
+ default:
+ return " (level invalid)";
}
}
@@ -518,7 +526,7 @@ static vaddr_t exception_handler64(struct cpu_user_regs *regs, vaddr_t offset)
if ( usr_mode(regs) )
base += VECTOR64_LOWER32_BASE;
- else if ( psr_mode(regs->cpsr,PSR_MODE_EL0t) )
+ else if ( psr_mode(regs->cpsr, PSR_MODE_EL0t) )
base += VECTOR64_LOWER64_BASE;
else /* Otherwise must be from kernel mode */
base += VECTOR64_CURRENT_SPx_BASE;
@@ -530,7 +538,8 @@ static vaddr_t exception_handler64(struct cpu_user_regs *regs, vaddr_t offset)
void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len)
{
vaddr_t handler;
- const union hsr esr = {
+ const union hsr esr =
+ {
.iss = 0,
.len = instr_len,
.ec = HSR_EC_UNKNOWN,
@@ -544,7 +553,7 @@ void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len)
regs->elr_el1 = regs->pc;
regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \
- PSR_IRQ_MASK | PSR_DBG_MASK;
+ PSR_IRQ_MASK | PSR_DBG_MASK;
regs->pc = handler;
WRITE_SYSREG32(esr.bits, ESR_EL1);
@@ -557,17 +566,18 @@ static void inject_abt64_exception(struct cpu_user_regs *regs,
int instr_len)
{
vaddr_t handler;
- union hsr esr = {
+ union hsr esr =
+ {
.iss = 0,
.len = instr_len,
};
if ( psr_mode_is_user(regs) )
esr.ec = prefetch
- ? HSR_EC_INSTR_ABORT_LOWER_EL : HSR_EC_DATA_ABORT_LOWER_EL;
+ ? HSR_EC_INSTR_ABORT_LOWER_EL : HSR_EC_DATA_ABORT_LOWER_EL;
else
esr.ec = prefetch
- ? HSR_EC_INSTR_ABORT_CURR_EL : HSR_EC_DATA_ABORT_CURR_EL;
+ ? HSR_EC_INSTR_ABORT_CURR_EL : HSR_EC_DATA_ABORT_CURR_EL;
BUG_ON( is_32bit_domain(current->domain) );
@@ -577,7 +587,7 @@ static void inject_abt64_exception(struct cpu_user_regs *regs,
regs->elr_el1 = regs->pc;
regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \
- PSR_IRQ_MASK | PSR_DBG_MASK;
+ PSR_IRQ_MASK | PSR_DBG_MASK;
regs->pc = handler;
WRITE_SYSREG(addr, FAR_EL1);
@@ -585,15 +595,15 @@ static void inject_abt64_exception(struct cpu_user_regs *regs,
}
static void inject_dabt64_exception(struct cpu_user_regs *regs,
- register_t addr,
- int instr_len)
+ register_t addr,
+ int instr_len)
{
inject_abt64_exception(regs, 0, addr, instr_len);
}
static void inject_iabt64_exception(struct cpu_user_regs *regs,
- register_t addr,
- int instr_len)
+ register_t addr,
+ int instr_len)
{
inject_abt64_exception(regs, 1, addr, instr_len);
}
@@ -602,11 +612,11 @@ static void inject_iabt64_exception(struct cpu_user_regs *regs,
void inject_undef_exception(struct cpu_user_regs *regs, const union hsr hsr)
{
- if ( is_32bit_domain(current->domain) )
- inject_undef32_exception(regs);
+ if ( is_32bit_domain(current->domain) )
+ inject_undef32_exception(regs);
#ifdef CONFIG_ARM_64
- else
- inject_undef64_exception(regs, hsr.len);
+ else
+ inject_undef64_exception(regs, hsr.len);
#endif
}
@@ -614,11 +624,11 @@ static void inject_iabt_exception(struct cpu_user_regs *regs,
register_t addr,
int instr_len)
{
- if ( is_32bit_domain(current->domain) )
- inject_pabt32_exception(regs, addr);
+ if ( is_32bit_domain(current->domain) )
+ inject_pabt32_exception(regs, addr);
#ifdef CONFIG_ARM_64
- else
- inject_iabt64_exception(regs, addr, instr_len);
+ else
+ inject_iabt64_exception(regs, addr, instr_len);
#endif
}
@@ -626,11 +636,11 @@ static void inject_dabt_exception(struct cpu_user_regs *regs,
register_t addr,
int instr_len)
{
- if ( is_32bit_domain(current->domain) )
- inject_dabt32_exception(regs, addr);
+ if ( is_32bit_domain(current->domain) )
+ inject_dabt32_exception(regs, addr);
#ifdef CONFIG_ARM_64
- else
- inject_dabt64_exception(regs, addr, instr_len);
+ else
+ inject_dabt64_exception(regs, addr, instr_len);
#endif
}
@@ -717,7 +727,8 @@ crash_system:
do_unexpected_trap("SError", regs);
}
-struct reg_ctxt {
+struct reg_ctxt
+{
/* Guest-side state */
uint32_t sctlr_el1;
register_t tcr_el1;
@@ -738,24 +749,25 @@ struct reg_ctxt {
static const char *mode_string(uint32_t cpsr)
{
uint32_t mode;
- static const char *mode_strings[] = {
- [PSR_MODE_USR] = "32-bit Guest USR",
- [PSR_MODE_FIQ] = "32-bit Guest FIQ",
- [PSR_MODE_IRQ] = "32-bit Guest IRQ",
- [PSR_MODE_SVC] = "32-bit Guest SVC",
- [PSR_MODE_MON] = "32-bit Monitor",
- [PSR_MODE_ABT] = "32-bit Guest ABT",
- [PSR_MODE_HYP] = "Hypervisor",
- [PSR_MODE_UND] = "32-bit Guest UND",
- [PSR_MODE_SYS] = "32-bit Guest SYS",
+ static const char *mode_strings[] =
+ {
+ [PSR_MODE_USR] = "32-bit Guest USR",
+ [PSR_MODE_FIQ] = "32-bit Guest FIQ",
+ [PSR_MODE_IRQ] = "32-bit Guest IRQ",
+ [PSR_MODE_SVC] = "32-bit Guest SVC",
+ [PSR_MODE_MON] = "32-bit Monitor",
+ [PSR_MODE_ABT] = "32-bit Guest ABT",
+ [PSR_MODE_HYP] = "Hypervisor",
+ [PSR_MODE_UND] = "32-bit Guest UND",
+ [PSR_MODE_SYS] = "32-bit Guest SYS",
#ifdef CONFIG_ARM_64
- [PSR_MODE_EL3h] = "64-bit EL3h (Monitor, handler)",
- [PSR_MODE_EL3t] = "64-bit EL3t (Monitor, thread)",
- [PSR_MODE_EL2h] = "64-bit EL2h (Hypervisor, handler)",
- [PSR_MODE_EL2t] = "64-bit EL2t (Hypervisor, thread)",
- [PSR_MODE_EL1h] = "64-bit EL1h (Guest Kernel, handler)",
- [PSR_MODE_EL1t] = "64-bit EL1t (Guest Kernel, thread)",
- [PSR_MODE_EL0t] = "64-bit EL0t (Guest User)",
+ [PSR_MODE_EL3h] = "64-bit EL3h (Monitor, handler)",
+ [PSR_MODE_EL3t] = "64-bit EL3t (Monitor, thread)",
+ [PSR_MODE_EL2h] = "64-bit EL2h (Hypervisor, handler)",
+ [PSR_MODE_EL2t] = "64-bit EL2t (Hypervisor, thread)",
+ [PSR_MODE_EL1h] = "64-bit EL1h (Guest Kernel, handler)",
+ [PSR_MODE_EL1t] = "64-bit EL1t (Guest Kernel, thread)",
+ [PSR_MODE_EL0t] = "64-bit EL0t (Guest User)",
#endif
};
mode = cpsr & PSR_MODE_MASK;
@@ -814,9 +826,7 @@ static void show_registers_32(const struct cpu_user_regs *regs,
}
#ifndef CONFIG_ARM_64
else
- {
printk("HYP: SP: %08"PRIx32" LR: %08"PRIregister"\n", regs->sp, regs->lr);
- }
#endif
printk("\n");
@@ -836,7 +846,7 @@ static void show_registers_32(const struct cpu_user_regs *regs,
#else
ctxt->ifar, ctxt->ifsr, ctxt->dfar, ctxt->dfsr
#endif
- );
+ );
printk("\n");
}
}
@@ -861,9 +871,7 @@ static void show_registers_64(const struct cpu_user_regs *regs,
printk("SP_EL1: %016"PRIx64"\n", regs->sp_el1);
}
else
- {
printk("SP: %016"PRIx64"\n", regs->sp);
- }
printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr,
mode_string(regs->cpsr));
printk(" X0: %016"PRIx64" X1: %016"PRIx64" X2: %016"PRIx64"\n",
@@ -925,9 +933,7 @@ static void _show_registers(const struct cpu_user_regs *regs,
show_registers_32(regs, ctxt, guest_mode, v);
}
else
- {
show_registers_64(regs, ctxt, guest_mode, v);
- }
}
#endif
}
@@ -1151,7 +1157,7 @@ static void show_trace(const struct cpu_user_regs *regs)
/* Bounds for range of valid frame pointer. */
low = (register_t)(STACK_BEFORE_EXCEPTION(regs));
high = (low & ~(STACK_SIZE - 1)) +
- (STACK_SIZE - sizeof(struct cpu_info));
+ (STACK_SIZE - sizeof(struct cpu_info));
/* The initial frame pointer. */
next = regs->fp;
@@ -1261,7 +1267,7 @@ int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc)
}
}
}
- found:
+found:
if ( !bug )
return -ENOENT;
@@ -1342,10 +1348,10 @@ static register_t do_deprecated_hypercall(void)
const register_t op =
#ifdef CONFIG_ARM_64
!is_32bit_domain(current->domain) ?
- regs->x16
+ regs->x16
:
#endif
- regs->r12;
+ regs->r12;
gdprintk(XENLOG_DEBUG, "%pv: deprecated hypercall %lu\n",
current, (unsigned long)op);
@@ -1355,7 +1361,8 @@ static register_t do_deprecated_hypercall(void)
typedef register_t (*arm_hypercall_fn_t)(
register_t, register_t, register_t, register_t, register_t);
-typedef struct {
+typedef struct
+{
arm_hypercall_fn_t fn;
int nr_args;
} arm_hypercall_t;
@@ -1382,7 +1389,8 @@ typedef struct {
.nr_args = _nr_args, \
}
-static arm_hypercall_t arm_hypercall_table[] = {
+static arm_hypercall_t arm_hypercall_table[] =
+{
HYPERCALL(memory_op, 2),
HYPERCALL(domctl, 1),
HYPERCALL(sched_op, 2),
@@ -1492,14 +1500,20 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, register_t *nr,
if ( !current->hcall_preempted )
{
/* Deliberately corrupt parameter regs used by this hypercall. */
- switch ( arm_hypercall_table[*nr].nr_args ) {
- case 5: HYPERCALL_ARG5(regs) = 0xDEADBEEF;
- case 4: HYPERCALL_ARG4(regs) = 0xDEADBEEF;
- case 3: HYPERCALL_ARG3(regs) = 0xDEADBEEF;
- case 2: HYPERCALL_ARG2(regs) = 0xDEADBEEF;
+ switch ( arm_hypercall_table[*nr].nr_args )
+ {
+ case 5:
+ HYPERCALL_ARG5(regs) = 0xDEADBEEF;
+ case 4:
+ HYPERCALL_ARG4(regs) = 0xDEADBEEF;
+ case 3:
+ HYPERCALL_ARG3(regs) = 0xDEADBEEF;
+ case 2:
+ HYPERCALL_ARG2(regs) = 0xDEADBEEF;
case 1: /* Don't clobber x0/r0 -- it's the return value */
break;
- default: BUG();
+ default:
+ BUG();
}
*nr = 0xDEADBEEF;
}
@@ -1566,23 +1580,24 @@ enum mc_disposition arch_do_multicall_call(struct mc_state *state)
*
* bit position in short is condition code: NZCV
*/
-static const unsigned short cc_map[16] = {
- 0xF0F0, /* EQ == Z set */
- 0x0F0F, /* NE */
- 0xCCCC, /* CS == C set */
- 0x3333, /* CC */
- 0xFF00, /* MI == N set */
- 0x00FF, /* PL */
- 0xAAAA, /* VS == V set */
- 0x5555, /* VC */
- 0x0C0C, /* HI == C set && Z clear */
- 0xF3F3, /* LS == C clear || Z set */
- 0xAA55, /* GE == (N==V) */
- 0x55AA, /* LT == (N!=V) */
- 0x0A05, /* GT == (!Z && (N==V)) */
- 0xF5FA, /* LE == (Z || (N!=V)) */
- 0xFFFF, /* AL always */
- 0 /* NV */
+static const unsigned short cc_map[16] =
+{
+ 0xF0F0, /* EQ == Z set */
+ 0x0F0F, /* NE */
+ 0xCCCC, /* CS == C set */
+ 0x3333, /* CC */
+ 0xFF00, /* MI == N set */
+ 0x00FF, /* PL */
+ 0xAAAA, /* VS == V set */
+ 0x5555, /* VC */
+ 0x0C0C, /* HI == C set && Z clear */
+ 0xF3F3, /* LS == C clear || Z set */
+ 0xAA55, /* GE == (N==V) */
+ 0x55AA, /* LT == (N!=V) */
+ 0x0A05, /* GT == (!Z && (N==V)) */
+ 0xF5FA, /* LE == (Z || (N!=V)) */
+ 0xFFFF, /* AL always */
+ 0 /* NV */
};
int check_conditional_instr(struct cpu_user_regs *regs, const union hsr hsr)
@@ -1790,7 +1805,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
printk("1ST[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
offset, mfn_to_maddr(mfn), first[offset]);
if ( !(first[offset] & 0x1) ||
- (first[offset] & 0x2) )
+ (first[offset] & 0x2) )
goto done;
mfn = gfn_to_mfn(d, gaddr_to_gfn(first[offset]));
@@ -1806,8 +1821,10 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
offset, mfn_to_maddr(mfn), second[offset]);
done:
- if ( second ) unmap_domain_page(second);
- if ( first ) unmap_domain_page(first);
+ if ( second )
+ unmap_domain_page(second);
+ if ( first )
+ unmap_domain_page(first);
}
/*
@@ -1937,7 +1954,8 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
{
case FSC_FLT_PERM:
{
- const struct npfec npfec = {
+ const struct npfec npfec =
+ {
.insn_fetch = !is_data,
.read_access = is_data && !hsr.dabt.write,
.write_access = is_data && hsr.dabt.write,
@@ -2008,7 +2026,7 @@ static inline bool needs_ssbd_flip(struct vcpu *v)
return false;
return !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG) &&
- cpu_require_ssbd_mitigation();
+ cpu_require_ssbd_mitigation();
}
static void enter_hypervisor_head(struct cpu_user_regs *regs)
@@ -2066,11 +2084,14 @@ void do_trap_guest_sync(struct cpu_user_regs *regs)
advance_pc(regs, hsr);
return;
}
- if ( hsr.wfi_wfe.ti ) {
+ if ( hsr.wfi_wfe.ti )
+ {
/* Yield the VCPU for WFE */
perfc_incr(trap_wfe);
vcpu_yield();
- } else {
+ }
+ else
+ {
/* Block the VCPU for WFI */
perfc_incr(trap_wfi);
vcpu_block_unless_event_pending(current);
diff --git a/xen/arch/arm/vcpreg.c b/xen/arch/arm/vcpreg.c
index cdc91cdf5b..2dc2798ea1 100644
--- a/xen/arch/arm/vcpreg.c
+++ b/xen/arch/arm/vcpreg.c
@@ -207,30 +207,30 @@ void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr)
p2m_set_way_flush(current);
break;
- /*
- * HCR_EL2.TVM
- *
- * ARMv8 (DDI 0487D.a): Table D1-38
- */
- GENERATE_CASE(SCTLR, 32)
- GENERATE_CASE(TTBR0_32, 32)
- GENERATE_CASE(TTBR1_32, 32)
- GENERATE_CASE(TTBCR, 32)
- GENERATE_CASE(TTBCR2, 32)
- GENERATE_CASE(DACR, 32)
- GENERATE_CASE(DFSR, 32)
- GENERATE_CASE(IFSR, 32)
- GENERATE_CASE(DFAR, 32)
- GENERATE_CASE(IFAR, 32)
- GENERATE_CASE(ADFSR, 32)
- GENERATE_CASE(AIFSR, 32)
- /* AKA PRRR */
- GENERATE_CASE(MAIR0, 32)
- /* AKA NMRR */
- GENERATE_CASE(MAIR1, 32)
- GENERATE_CASE(AMAIR0, 32)
- GENERATE_CASE(AMAIR1, 32)
- GENERATE_CASE(CONTEXTIDR, 32)
+ /*
+ * HCR_EL2.TVM
+ *
+ * ARMv8 (DDI 0487D.a): Table D1-38
+ */
+ GENERATE_CASE(SCTLR, 32)
+ GENERATE_CASE(TTBR0_32, 32)
+ GENERATE_CASE(TTBR1_32, 32)
+ GENERATE_CASE(TTBCR, 32)
+ GENERATE_CASE(TTBCR2, 32)
+ GENERATE_CASE(DACR, 32)
+ GENERATE_CASE(DFSR, 32)
+ GENERATE_CASE(IFSR, 32)
+ GENERATE_CASE(DFAR, 32)
+ GENERATE_CASE(IFAR, 32)
+ GENERATE_CASE(ADFSR, 32)
+ GENERATE_CASE(AIFSR, 32)
+ /* AKA PRRR */
+ GENERATE_CASE(MAIR0, 32)
+ /* AKA NMRR */
+ GENERATE_CASE(MAIR1, 32)
+ GENERATE_CASE(AMAIR0, 32)
+ GENERATE_CASE(AMAIR1, 32)
+ GENERATE_CASE(CONTEXTIDR, 32)
/*
* MDCR_EL2.TPM
@@ -361,8 +361,8 @@ void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr)
return inject_undef_exception(regs, hsr);
break;
- GENERATE_CASE(TTBR0, 64)
- GENERATE_CASE(TTBR1, 64)
+ GENERATE_CASE(TTBR0, 64)
+ GENERATE_CASE(TTBR1, 64)
/*
* CPTR_EL2.T{0..9,12..13}
@@ -382,18 +382,18 @@ void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr)
* And all other unknown registers.
*/
default:
- {
- const struct hsr_cp64 cp64 = hsr.cp64;
-
- gdprintk(XENLOG_ERR,
- "%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
- cp64.read ? "mrrc" : "mcrr",
- cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc);
- gdprintk(XENLOG_ERR, "unhandled 64-bit CP15 access %#x\n",
- hsr.bits & HSR_CP64_REGS_MASK);
- inject_undef_exception(regs, hsr);
- return;
- }
+ {
+ const struct hsr_cp64 cp64 = hsr.cp64;
+
+ gdprintk(XENLOG_ERR,
+ "%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
+ cp64.read ? "mrrc" : "mcrr",
+ cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc);
+ gdprintk(XENLOG_ERR, "unhandled 64-bit CP15 access %#x\n",
+ hsr.bits & HSR_CP64_REGS_MASK);
+ inject_undef_exception(regs, hsr);
+ return;
+ }
}
advance_pc(regs, hsr);
}
@@ -467,7 +467,7 @@ void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr)
*/
val = (1 << 24) | (5 << 16);
val |= ((current_cpu_data.midr.bits >> 20) & 0xf) |
- (current_cpu_data.midr.bits & 0xf);
+ (current_cpu_data.midr.bits & 0xf);
set_user_reg(regs, regidx, val);
break;
@@ -518,8 +518,8 @@ void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr)
default:
gdprintk(XENLOG_ERR,
"%s p14, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n",
- cp32.read ? "mrc" : "mcr",
- cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc);
+ cp32.read ? "mrc" : "mcr",
+ cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc);
gdprintk(XENLOG_ERR, "unhandled 32-bit cp14 access %#x\n",
hsr.bits & HSR_CP32_REGS_MASK);
inject_undef_exception(regs, hsr);
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 64b141fea5..69afaf172f 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -33,7 +33,8 @@
#include <asm/vgic-emul.h>
#include <asm/vreg.h>
-static struct {
+static struct
+{
bool enabled;
/* Distributor interface address */
paddr_t dbase;
@@ -144,8 +145,8 @@ static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank,
if ( !new_target || (new_target > d->max_vcpus) )
{
gprintk(XENLOG_WARNING,
- "No valid vCPU found for vIRQ%u in the target list (%#x). Skip it\n",
- virq, new_mask);
+ "No valid vCPU found for vIRQ%u in the target list (%#x). Skip it\n",
+ virq, new_mask);
continue;
}
@@ -158,8 +159,8 @@ static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank,
if ( new_target != old_target )
{
if ( vgic_migrate_irq(d->vcpu[old_target],
- d->vcpu[new_target],
- virq) )
+ d->vcpu[new_target],
+ virq) )
write_atomic(&rank->vcpu[offset], new_target);
}
}
@@ -178,7 +179,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
switch ( gicd_reg )
{
case VREG32(GICD_CTLR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
vgic_lock(v);
*r = vreg_reg32_extract(v->domain->arch.vgic.ctlr, info);
vgic_unlock(v);
@@ -188,11 +190,12 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
{
uint32_t typer;
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
/* No secure world support for guests. */
vgic_lock(v);
typer = ((v->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT)
- | DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32);
+ | DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32);
vgic_unlock(v);
*r = vreg_reg32_extract(typer, info);
@@ -201,7 +204,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
}
case VREG32(GICD_IIDR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
/*
* XXX Do we need a JEP106 manufacturer ID?
* Just use the physical h/w value for now
@@ -223,18 +227,22 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
goto read_as_zero_32;
case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ISENABLER, DABT_WORD);
- if ( rank == NULL) goto read_as_zero;
+ if ( rank == NULL)
+ goto read_as_zero;
vgic_lock_rank(v, rank, flags);
*r = vreg_reg32_extract(rank->ienable, info);
vgic_unlock_rank(v, rank, flags);
return 1;
case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ICENABLER, DABT_WORD);
- if ( rank == NULL) goto read_as_zero;
+ if ( rank == NULL)
+ goto read_as_zero;
vgic_lock_rank(v, rank, flags);
*r = vreg_reg32_extract(rank->ienable, info);
vgic_unlock_rank(v, rank, flags);
@@ -255,9 +263,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
uint32_t ipriorityr;
uint8_t rank_index;
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_IPRIORITYR, DABT_WORD);
- if ( rank == NULL ) goto read_as_zero;
+ if ( rank == NULL )
+ goto read_as_zero;
rank_index = REG_RANK_INDEX(8, gicd_reg - GICD_IPRIORITYR, DABT_WORD);
vgic_lock_rank(v, rank, flags);
@@ -275,9 +285,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
{
uint32_t itargetsr;
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD);
- if ( rank == NULL) goto read_as_zero;
+ if ( rank == NULL)
+ goto read_as_zero;
vgic_lock_rank(v, rank, flags);
itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR);
vgic_unlock_rank(v, rank, flags);
@@ -293,9 +305,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
{
uint32_t icfgr;
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 2, gicd_reg - GICD_ICFGR, DABT_WORD);
- if ( rank == NULL) goto read_as_zero;
+ if ( rank == NULL)
+ goto read_as_zero;
vgic_lock_rank(v, rank, flags);
icfgr = rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR, DABT_WORD)];
vgic_unlock_rank(v, rank, flags);
@@ -313,7 +327,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
goto read_as_zero_32;
case VREG32(GICD_SGIR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
/* Write only -- read unknown */
*r = 0xdeadbeef;
return 1;
@@ -333,7 +348,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
goto read_impl_defined;
case VREG32(GICD_ICPIDR2):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR "%pv: vGICD: unhandled read from ICPIDR2\n", v);
return 0;
@@ -352,7 +368,8 @@ bad_width:
return 0;
read_as_zero_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
read_as_zero:
*r = 0;
return 1;
@@ -421,7 +438,8 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
switch ( gicd_reg )
{
case VREG32(GICD_CTLR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
/* Ignore all but the enable bit */
vgic_lock(v);
vreg_reg32_update(&v->domain->arch.vgic.ctlr, r, info);
@@ -449,9 +467,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
goto write_ignore_32;
case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ISENABLER, DABT_WORD);
- if ( rank == NULL) goto write_ignore;
+ if ( rank == NULL)
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
tr = rank->ienable;
vreg_reg32_setbits(&rank->ienable, r, info);
@@ -460,9 +480,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
return 1;
case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ICENABLER, DABT_WORD);
- if ( rank == NULL) goto write_ignore;
+ if ( rank == NULL)
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
tr = rank->ienable;
vreg_reg32_clearbits(&rank->ienable, r, info);
@@ -471,21 +493,24 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
return 1;
case VRANGE32(GICD_ISPENDR, GICD_ISPENDRN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: vGICD: unhandled word write %#"PRIregister" to ISPENDR%d\n",
v, r, gicd_reg - GICD_ISPENDR);
return 0;
case VRANGE32(GICD_ICPENDR, GICD_ICPENDRN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: vGICD: unhandled word write %#"PRIregister" to ICPENDR%d\n",
v, r, gicd_reg - GICD_ICPENDR);
return 0;
case VRANGE32(GICD_ISACTIVER, GICD_ISACTIVERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
if ( r == 0 )
goto write_ignore_32;
printk(XENLOG_G_ERR
@@ -503,9 +528,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
{
uint32_t *ipriorityr, priority;
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_IPRIORITYR, DABT_WORD);
- if ( rank == NULL) goto write_ignore;
+ if ( rank == NULL)
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
ipriorityr = &rank->ipriorityr[REG_RANK_INDEX(8,
gicd_reg - GICD_IPRIORITYR,
@@ -529,9 +556,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
{
uint32_t itargetsr;
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD);
- if ( rank == NULL) goto write_ignore;
+ if ( rank == NULL)
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR);
vreg_reg32_update(&itargetsr, r, info);
@@ -552,12 +581,14 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
goto write_ignore_32;
case VRANGE32(GICD_ICFGR2, GICD_ICFGRN): /* SPIs */
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 2, gicd_reg - GICD_ICFGR, DABT_WORD);
- if ( rank == NULL) goto write_ignore;
+ if ( rank == NULL)
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
vreg_reg32_update(&rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR,
- DABT_WORD)],
+ DABT_WORD)],
r, info);
vgic_unlock_rank(v, rank, flags);
return 1;
@@ -570,21 +601,24 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
goto write_ignore_32;
case VREG32(GICD_SGIR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
return vgic_v2_to_sgi(v, r);
case VRANGE32(0xF04, 0xF0C):
goto write_reserved;
case VRANGE32(GICD_CPENDSGIR, GICD_CPENDSGIRN):
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
v, dabt.size ? "word" : "byte", r, gicd_reg - GICD_CPENDSGIR);
return 0;
case VRANGE32(GICD_SPENDSGIR, GICD_SPENDSGIRN):
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
v, dabt.size ? "word" : "byte", r, gicd_reg - GICD_SPENDSGIR);
@@ -602,7 +636,7 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
goto write_ignore_32;
case VRANGE32(0xFEC, 0xFFC):
- /* Implementation defined identification registers */
+ /* Implementation defined identification registers */
default:
printk(XENLOG_G_ERR
@@ -618,7 +652,8 @@ bad_width:
return 0;
write_ignore_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
write_ignore:
return 1;
@@ -635,7 +670,8 @@ write_reserved:
return 1;
}
-static const struct mmio_handler_ops vgic_v2_distr_mmio_handler = {
+static const struct mmio_handler_ops vgic_v2_distr_mmio_handler =
+{
.read = vgic_v2_distr_mmio_read,
.write = vgic_v2_distr_mmio_write,
};
@@ -719,7 +755,8 @@ static int vgic_v2_lpi_get_priority(struct domain *d, unsigned int vlpi)
BUG();
}
-static const struct vgic_ops vgic_v2_ops = {
+static const struct vgic_ops vgic_v2_ops =
+{
.vcpu_init = vgic_v2_vcpu_init,
.domain_init = vgic_v2_domain_init,
.domain_free = vgic_v2_domain_free,
diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index 6e153c698d..ebd143106c 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -52,7 +52,8 @@
* If both the vcmd_lock and the its_lock are required, the vcmd_lock must
* be taken first.
*/
-struct virt_its {
+struct virt_its
+{
struct domain *d;
struct list_head vits_list;
paddr_t doorbell_address;
@@ -113,7 +114,7 @@ static paddr_t get_baser_phys_addr(uint64_t reg)
{
if ( reg & BIT(9, UL) )
return (reg & GENMASK(47, 16)) |
- ((reg & GENMASK(15, 12)) << 36);
+ ((reg & GENMASK(15, 12)) << 36);
else
return reg & GENMASK(47, 12);
}
@@ -569,11 +570,11 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr)
else
ret = err;
}
- /*
- * Loop over the next gang of pending_irqs until we reached the end of
- * a (fully populated) tree or the lookup function returns less LPIs than
- * it has been asked for.
- */
+ /*
+ * Loop over the next gang of pending_irqs until we reached the end of
+ * a (fully populated) tree or the lookup function returns less LPIs than
+ * it has been asked for.
+ */
} while ( (++vlpi < its->d->arch.vgic.nr_lpis) &&
(nr_lpis == ARRAY_SIZE(pirqs)) );
@@ -1006,7 +1007,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info,
*/
bool have_cmd_lock;
- if ( info->dabt.size != DABT_WORD ) goto bad_width;
+ if ( info->dabt.size != DABT_WORD )
+ goto bad_width;
have_cmd_lock = spin_trylock(&its->vcmd_lock);
reg = its->enabled ? GITS_CTLR_ENABLE : 0;
@@ -1022,12 +1024,14 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info,
}
case VREG32(GITS_IIDR):
- if ( info->dabt.size != DABT_WORD ) goto bad_width;
+ if ( info->dabt.size != DABT_WORD )
+ goto bad_width;
*r = vreg_reg32_extract(GITS_IIDR_VALUE, info);
break;
case VREG64(GITS_TYPER):
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
reg = GITS_TYPER_PHYSICAL;
reg |= (sizeof(struct vits_itte) - 1) << GITS_TYPER_ITT_SIZE_SHIFT;
@@ -1044,14 +1048,16 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info,
goto read_reserved;
case VREG64(GITS_CBASER):
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
spin_lock(&its->its_lock);
*r = vreg_reg64_extract(its->cbaser, info);
spin_unlock(&its->its_lock);
break;
case VREG64(GITS_CWRITER):
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
/* CWRITER is only written by the guest, so no extra locking here. */
reg = its->cwriter;
@@ -1059,7 +1065,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info,
break;
case VREG64(GITS_CREADR):
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
/*
* Lockless access, to avoid waiting for the whole command queue to be
@@ -1075,14 +1082,16 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info,
goto read_reserved;
case VREG64(GITS_BASER0): /* device table */
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
spin_lock(&its->its_lock);
*r = vreg_reg64_extract(its->baser_dev, info);
spin_unlock(&its->its_lock);
break;
case VREG64(GITS_BASER1): /* collection table */
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
spin_lock(&its->its_lock);
*r = vreg_reg64_extract(its->baser_coll, info);
spin_unlock(&its->its_lock);
@@ -1098,7 +1107,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info,
goto read_impl_defined;
case VREG32(GITS_PIDR2):
- if ( info->dabt.size != DABT_WORD ) goto bad_width;
+ if ( info->dabt.size != DABT_WORD )
+ goto bad_width;
*r = vreg_reg32_extract(GIC_PIDR2_ARCH_GICv3, info);
break;
@@ -1115,7 +1125,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info,
return 1;
read_as_zero_64:
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
*r = 0;
return 1;
@@ -1173,7 +1184,8 @@ static bool vgic_v3_verify_its_status(struct virt_its *its, bool status)
!(its->baser_dev & GITS_VALID_BIT) ||
!(its->baser_coll & GITS_VALID_BIT) )
{
- printk(XENLOG_G_WARNING "d%d tried to enable ITS without having the tables configured.\n",
+ printk(XENLOG_G_WARNING
+ "d%d tried to enable ITS without having the tables configured.\n",
its->d->domain_id);
return false;
}
@@ -1249,7 +1261,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
{
uint32_t ctlr;
- if ( info->dabt.size != DABT_WORD ) goto bad_width;
+ if ( info->dabt.size != DABT_WORD )
+ goto bad_width;
/*
* We need to take the vcmd_lock to prevent a guest from disabling
@@ -1283,7 +1296,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
goto write_reserved;
case VREG64(GITS_CBASER):
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
spin_lock(&its->its_lock);
/* Changing base registers with the ITS enabled is UNPREDICTABLE. */
@@ -1306,7 +1320,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
return 1;
case VREG64(GITS_CWRITER):
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
spin_lock(&its->vcmd_lock);
reg = ITS_CMD_OFFSET(its->cwriter);
@@ -1328,7 +1343,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
goto write_reserved;
case VREG64(GITS_BASER0): /* device table */
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
spin_lock(&its->its_lock);
@@ -1339,7 +1355,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
if ( its->enabled )
{
spin_unlock(&its->its_lock);
- gdprintk(XENLOG_WARNING, "vGITS: tried to change BASER with the ITS enabled.\n");
+ gdprintk(XENLOG_WARNING,
+ "vGITS: tried to change BASER with the ITS enabled.\n");
return 1;
}
@@ -1367,7 +1384,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
return 1;
case VREG64(GITS_BASER1): /* collection table */
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
spin_lock(&its->its_lock);
/*
@@ -1423,11 +1441,13 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
return 1;
write_ignore_64:
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(info->dabt) )
+ goto bad_width;
return 1;
write_ignore_32:
- if ( info->dabt.size != DABT_WORD ) goto bad_width;
+ if ( info->dabt.size != DABT_WORD )
+ goto bad_width;
return 1;
write_impl_defined:
@@ -1449,7 +1469,8 @@ bad_width:
return 0;
}
-static const struct mmio_handler_ops vgic_its_mmio_handler = {
+static const struct mmio_handler_ops vgic_its_mmio_handler =
+{
.read = vgic_v3_its_mmio_read,
.write = vgic_v3_its_mmio_write,
};
@@ -1504,7 +1525,7 @@ unsigned int vgic_v3_its_count(const struct domain *d)
return 0;
list_for_each_entry(hw_its, &host_its_list, entry)
- ret++;
+ ret++;
return ret;
}
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 422b94f902..8fcc2c128d 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -51,7 +51,8 @@
*/
#define VGICD_CTLR_DEFAULT (GICD_CTLR_ARE_NS)
-static struct {
+static struct
+{
bool enabled;
/* Distributor interface address */
paddr_t dbase;
@@ -174,7 +175,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
if ( !v->domain->arch.vgic.has_its )
goto read_as_zero_32;
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
spin_lock_irqsave(&v->arch.vgic.lock, flags);
*r = vreg_reg32_extract(!!(v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED),
@@ -184,7 +186,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
}
case VREG32(GICR_IIDR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
*r = vreg_reg32_extract(GICV3_GICR_IIDR_VAL, info);
return 1;
@@ -192,7 +195,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
{
uint64_t typer, aff;
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(dabt) )
+ goto bad_width;
aff = (MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 3) << 56 |
MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 2) << 48 |
MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 1) << 40 |
@@ -240,7 +244,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
case VREG64(GICR_PROPBASER):
if ( !v->domain->arch.vgic.has_its )
goto read_as_zero_64;
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(dabt) )
+ goto bad_width;
vgic_lock(v);
*r = vreg_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
@@ -253,7 +258,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
if ( !v->domain->arch.vgic.has_its )
goto read_as_zero_64;
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(dabt) )
+ goto bad_width;
spin_lock_irqsave(&v->arch.vgic.lock, flags);
*r = vreg_reg64_extract(v->arch.vgic.rdist_pendbase, info);
@@ -280,7 +286,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
goto read_reserved;
case VREG32(GICR_SYNCR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
/* RO . But when read it always returns busy bito bit[0] */
*r = vreg_reg32_extract(GICR_SYNCR_NOT_BUSY, info);
return 1;
@@ -305,16 +312,17 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
case 0xFFD0 ... 0xFFE4:
/* Implementation defined identification registers */
- goto read_impl_defined;
+ goto read_impl_defined;
case VREG32(GICR_PIDR2):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
*r = vreg_reg32_extract(GICV3_GICR_PIDR2, info);
- return 1;
+ return 1;
case 0xFFEC ... 0xFFFC:
- /* Implementation defined identification registers */
- goto read_impl_defined;
+ /* Implementation defined identification registers */
+ goto read_impl_defined;
default:
printk(XENLOG_G_ERR
@@ -328,12 +336,14 @@ bad_width:
return 0;
read_as_zero_64:
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(dabt) )
+ goto bad_width;
*r = 0;
return 1;
read_as_zero_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
*r = 0;
return 1;
@@ -488,7 +498,8 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
if ( !v->domain->arch.vgic.has_its )
goto write_ignore_32;
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
vgic_lock(v); /* protects rdists_enabled */
spin_lock_irqsave(&v->arch.vgic.lock, flags);
@@ -540,7 +551,8 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
case VREG64(GICR_PROPBASER):
if ( !v->domain->arch.vgic.has_its )
goto write_ignore_64;
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(dabt) )
+ goto bad_width;
vgic_lock(v);
@@ -566,7 +578,8 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
if ( !v->domain->arch.vgic.has_its )
goto write_ignore_64;
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(dabt) )
+ goto bad_width;
spin_lock_irqsave(&v->arch.vgic.lock, flags);
@@ -625,15 +638,15 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
case 0xFFD0 ... 0xFFE4:
/* Implementation defined identification registers */
- goto write_impl_defined;
+ goto write_impl_defined;
case VREG32(GICR_PIDR2):
/* RO */
goto write_ignore_32;
case 0xFFEC ... 0xFFFC:
- /* Implementation defined identification registers */
- goto write_impl_defined;
+ /* Implementation defined identification registers */
+ goto write_impl_defined;
default:
printk(XENLOG_G_ERR "%pv: vGICR: unhandled write r%d offset %#08x\n",
@@ -642,16 +655,18 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
}
bad_width:
printk(XENLOG_G_ERR
- "%pv: vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n",
- v, dabt.size, dabt.reg, r, gicr_reg);
+ "%pv: vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, r, gicr_reg);
return 0;
write_ignore_64:
- if ( vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( vgic_reg64_check_access(dabt) )
+ goto bad_width;
return 1;
write_ignore_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
return 1;
write_impl_defined:
@@ -679,22 +694,27 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v,
{
case VRANGE32(GICD_IGROUPR, GICD_IGROUPRN):
/* We do not implement security extensions for guests, read zero */
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
goto read_as_zero;
case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 1, reg - GICD_ISENABLER, DABT_WORD);
- if ( rank == NULL ) goto read_as_zero;
+ if ( rank == NULL )
+ goto read_as_zero;
vgic_lock_rank(v, rank, flags);
*r = vreg_reg32_extract(rank->ienable, info);
vgic_unlock_rank(v, rank, flags);
return 1;
case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 1, reg - GICD_ICENABLER, DABT_WORD);
- if ( rank == NULL ) goto read_as_zero;
+ if ( rank == NULL )
+ goto read_as_zero;
vgic_lock_rank(v, rank, flags);
*r = vreg_reg32_extract(rank->ienable, info);
vgic_unlock_rank(v, rank, flags);
@@ -715,9 +735,11 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v,
uint32_t ipriorityr;
uint8_t rank_index;
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 8, reg - GICD_IPRIORITYR, DABT_WORD);
- if ( rank == NULL ) goto read_as_zero;
+ if ( rank == NULL )
+ goto read_as_zero;
rank_index = REG_RANK_INDEX(8, reg - GICD_IPRIORITYR, DABT_WORD);
vgic_lock_rank(v, rank, flags);
@@ -733,9 +755,11 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v,
{
uint32_t icfgr;
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 2, reg - GICD_ICFGR, DABT_WORD);
- if ( rank == NULL ) goto read_as_zero;
+ if ( rank == NULL )
+ goto read_as_zero;
vgic_lock_rank(v, rank, flags);
icfgr = rank->icfg[REG_RANK_INDEX(2, reg - GICD_ICFGR, DABT_WORD)];
vgic_unlock_rank(v, rank, flags);
@@ -778,9 +802,11 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v,
goto write_ignore_32;
case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 1, reg - GICD_ISENABLER, DABT_WORD);
- if ( rank == NULL ) goto write_ignore;
+ if ( rank == NULL )
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
tr = rank->ienable;
vreg_reg32_setbits(&rank->ienable, r, info);
@@ -789,9 +815,11 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v,
return 1;
case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 1, reg - GICD_ICENABLER, DABT_WORD);
- if ( rank == NULL ) goto write_ignore;
+ if ( rank == NULL )
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
tr = rank->ienable;
vreg_reg32_clearbits(&rank->ienable, r, info);
@@ -800,21 +828,24 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v,
return 1;
case VRANGE32(GICD_ISPENDR, GICD_ISPENDRN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: %s: unhandled word write %#"PRIregister" to ISPENDR%d\n",
v, name, r, reg - GICD_ISPENDR);
return 0;
case VRANGE32(GICD_ICPENDR, GICD_ICPENDRN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: %s: unhandled word write %#"PRIregister" to ICPENDR%d\n",
v, name, r, reg - GICD_ICPENDR);
return 0;
case VRANGE32(GICD_ISACTIVER, GICD_ISACTIVERN):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: %s: unhandled word write %#"PRIregister" to ISACTIVER%d\n",
v, name, r, reg - GICD_ISACTIVER);
@@ -830,12 +861,14 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v,
{
uint32_t *ipriorityr, priority;
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 8, reg - GICD_IPRIORITYR, DABT_WORD);
- if ( rank == NULL ) goto write_ignore;
+ if ( rank == NULL )
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
ipriorityr = &rank->ipriorityr[REG_RANK_INDEX(8, reg - GICD_IPRIORITYR,
- DABT_WORD)];
+ DABT_WORD)];
priority = ACCESS_ONCE(*ipriorityr);
vreg_reg32_update(&priority, r, info);
ACCESS_ONCE(*ipriorityr) = priority;
@@ -849,12 +882,14 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v,
case VRANGE32(GICD_ICFGR + 4, GICD_ICFGRN): /* PPI + SPIs */
/* ICFGR1 for PPI's, which is implementation defined
if ICFGR1 is programmable or not. We chose to program */
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
rank = vgic_rank_offset(v, 2, reg - GICD_ICFGR, DABT_WORD);
- if ( rank == NULL ) goto write_ignore;
+ if ( rank == NULL )
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
vreg_reg32_update(&rank->icfg[REG_RANK_INDEX(2, reg - GICD_ICFGR,
- DABT_WORD)],
+ DABT_WORD)],
r, info);
vgic_unlock_rank(v, rank, flags);
return 1;
@@ -873,7 +908,8 @@ bad_width:
return 0;
write_ignore_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
write_ignore:
return 1;
}
@@ -892,10 +928,10 @@ static int vgic_v3_rdistr_sgi_mmio_read(struct vcpu *v, mmio_info_t *info,
case VREG32(GICR_ICACTIVER0):
case VRANGE32(GICR_IPRIORITYR0, GICR_IPRIORITYR7):
case VRANGE32(GICR_ICFGR0, GICR_ICFGR1):
- /*
- * Above registers offset are common with GICD.
- * So handle in common with GICD handling
- */
+ /*
+ * Above registers offset are common with GICD.
+ * So handle in common with GICD handling
+ */
return __vgic_v3_distr_common_mmio_read("vGICR: SGI", v, info,
gicr_reg, r);
@@ -933,7 +969,8 @@ bad_width:
return 0;
read_as_zero_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
read_as_zero:
*r = 0;
return 1;
@@ -968,22 +1005,24 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info,
case VREG32(GICR_ICACTIVER0):
case VREG32(GICR_ICFGR1):
case VRANGE32(GICR_IPRIORITYR0, GICR_IPRIORITYR7):
- /*
- * Above registers offset are common with GICD.
- * So handle common with GICD handling
- */
+ /*
+ * Above registers offset are common with GICD.
+ * So handle common with GICD handling
+ */
return __vgic_v3_distr_common_mmio_write("vGICR: SGI", v,
info, gicr_reg, r);
case VREG32(GICR_ISPENDR0):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: vGICR: SGI: unhandled word write %#"PRIregister" to ISPENDR0\n",
v, r);
return 0;
case VREG32(GICR_ICPENDR0):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
printk(XENLOG_G_ERR
"%pv: vGICR: SGI: unhandled word write %#"PRIregister" to ICPENDR0\n",
v, r);
@@ -1012,13 +1051,14 @@ bad_width:
return 0;
write_ignore_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
return 1;
}
static struct vcpu *get_vcpu_from_rdist(struct domain *d,
- const struct vgic_rdist_region *region,
- paddr_t gpa, uint32_t *offset)
+ const struct vgic_rdist_region *region,
+ paddr_t gpa, uint32_t *offset)
{
struct vcpu *v;
unsigned int vcpu_id;
@@ -1053,7 +1093,7 @@ static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info,
else
printk(XENLOG_G_WARNING
"%pv: vGICR: unknown gpa read address %"PRIpaddr"\n",
- v, info->gpa);
+ v, info->gpa);
return 0;
}
@@ -1095,7 +1135,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
switch ( gicd_reg )
{
case VREG32(GICD_CTLR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
vgic_lock(v);
*r = vreg_reg32_extract(v->domain->arch.vgic.ctlr, info);
vgic_unlock(v);
@@ -1114,7 +1155,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
unsigned int ncpus = min_t(unsigned int, v->domain->max_vcpus, 8);
uint32_t typer;
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
/* No secure world support for guests. */
typer = ((ncpus - 1) << GICD_TYPE_CPUS_SHIFT |
DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32));
@@ -1130,7 +1172,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
}
case VREG32(GICD_IIDR):
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
*r = vreg_reg32_extract(GICV3_GICD_IIDR_VAL, info);
return 1;
@@ -1216,10 +1259,12 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
{
uint64_t irouter;
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(dabt) )
+ goto bad_width;
rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
DABT_DOUBLE_WORD);
- if ( rank == NULL ) goto read_as_zero;
+ if ( rank == NULL )
+ goto read_as_zero;
vgic_lock_rank(v, rank, flags);
irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER);
vgic_unlock_rank(v, rank, flags);
@@ -1237,17 +1282,18 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
case VRANGE32(0xFFD0, 0xFFE4):
/* Implementation defined identification registers */
- goto read_impl_defined;
+ goto read_impl_defined;
case VREG32(GICD_PIDR2):
/* GICv3 identification value */
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
*r = vreg_reg32_extract(GICV3_GICD_PIDR2, info);
return 1;
case VRANGE32(0xFFEC, 0xFFFC):
- /* Implementation defined identification registers */
- goto read_impl_defined;
+ /* Implementation defined identification registers */
+ goto read_impl_defined;
default:
printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n",
@@ -1261,7 +1307,8 @@ bad_width:
return 0;
read_as_zero_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
*r = 0;
return 1;
@@ -1300,7 +1347,8 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
{
uint32_t ctlr = 0;
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
vgic_lock(v);
@@ -1389,12 +1437,14 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
case VRANGE32(GICD_CPENDSGIR, GICD_CPENDSGIRN):
/* Replaced with GICR_ICPENDR0. So ignore write */
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
return 0;
case VRANGE32(GICD_SPENDSGIR, GICD_SPENDSGIRN):
/* Replaced with GICR_ISPENDR0. So ignore write */
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
return 0;
case VRANGE32(0x0F30, 0x60FC):
@@ -1404,10 +1454,12 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
{
uint64_t irouter;
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+ if ( !vgic_reg64_check_access(dabt) )
+ goto bad_width;
rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
DABT_DOUBLE_WORD);
- if ( rank == NULL ) goto write_ignore;
+ if ( rank == NULL )
+ goto write_ignore;
vgic_lock_rank(v, rank, flags);
irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER);
vreg_reg64_update(&irouter, r, info);
@@ -1424,15 +1476,15 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
case VRANGE32(0xFFD0, 0xFFE4):
/* Implementation defined identification registers */
- goto write_impl_defined;
+ goto write_impl_defined;
case VREG32(GICD_PIDR2):
/* RO -- write ignore */
goto write_ignore_32;
case VRANGE32(0xFFEC, 0xFFFC):
- /* Implementation defined identification registers */
- goto write_impl_defined;
+ /* Implementation defined identification registers */
+ goto write_impl_defined;
default:
printk(XENLOG_G_ERR
@@ -1448,7 +1500,8 @@ bad_width:
return 0;
write_ignore_32:
- if ( dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD )
+ goto bad_width;
return 1;
write_ignore:
@@ -1563,12 +1616,14 @@ static bool vgic_v3_emulate_reg(struct cpu_user_regs *regs, union hsr hsr)
}
}
-static const struct mmio_handler_ops vgic_rdistr_mmio_handler = {
+static const struct mmio_handler_ops vgic_rdistr_mmio_handler =
+{
.read = vgic_v3_rdistr_mmio_read,
.write = vgic_v3_rdistr_mmio_write,
};
-static const struct mmio_handler_ops vgic_distr_mmio_handler = {
+static const struct mmio_handler_ops vgic_distr_mmio_handler =
+{
.read = vgic_v3_distr_mmio_read,
.write = vgic_v3_distr_mmio_write,
};
@@ -1642,7 +1697,7 @@ static inline unsigned int vgic_v3_max_rdist_count(struct domain *d)
* the architected single redistributor region.
*/
return is_hardware_domain(d) ? vgic_v3_hw.nr_rdist_regions :
- GUEST_GICV3_RDIST_REGIONS;
+ GUEST_GICV3_RDIST_REGIONS;
}
static int vgic_v3_domain_init(struct domain *d)
@@ -1790,7 +1845,8 @@ static int vgic_v3_lpi_get_priority(struct domain *d, uint32_t vlpi)
return p->lpi_priority;
}
-static const struct vgic_ops v3_ops = {
+static const struct vgic_ops v3_ops =
+{
.vcpu_init = vgic_v3_vcpu_init,
.domain_init = vgic_v3_domain_init,
.domain_free = vgic_v3_domain_free,
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 82f524a35c..dedc0ac2a8 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -48,7 +48,7 @@ static inline struct vgic_irq_rank *vgic_get_rank(struct vcpu *v, int rank)
* GICD_<FOO> with <b>-bits-per-interrupt.
*/
struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n,
- int s)
+ int s)
{
int rank = REG_RANK_NR(b, (n >> s));
@@ -101,7 +101,7 @@ int domain_vgic_register(struct domain *d, int *mmio_count)
#ifdef CONFIG_GICV3
case GIC_V3:
if ( vgic_v3_init(d, mmio_count) )
- return -ENODEV;
+ return -ENODEV;
break;
#endif
case GIC_V2:
@@ -174,7 +174,7 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis)
void register_vgic_ops(struct domain *d, const struct vgic_ops *ops)
{
- d->arch.vgic.handler = ops;
+ d->arch.vgic.handler = ops;
}
void domain_vgic_free(struct domain *d)
@@ -208,7 +208,7 @@ int vcpu_vgic_init(struct vcpu *v)
v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank);
if ( v->arch.vgic.private_irqs == NULL )
- return -ENOMEM;
+ return -ENOMEM;
/* SGIs/PPIs are always routed to this VCPU */
vgic_rank_init(v->arch.vgic.private_irqs, 0, v->vcpu_id);
@@ -273,7 +273,8 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
/* migration already in progress, no need to do anything */
if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
{
- gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in progress\n", irq);
+ gprintk(XENLOG_WARNING,
+ "irq %u migration failed: requested while in progress\n", irq);
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
return false;
}
@@ -346,7 +347,8 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
/* LPIs will never be disabled via this function. */
ASSERT(!is_lpi(32 * n + 31));
- while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
+ while ( (i = find_next_bit(&mask, 32, i)) < 32 )
+ {
irq = i + (32 * n);
v_target = vgic_get_target_vcpu(v, irq);
@@ -396,7 +398,8 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
/* LPIs will never be enabled via this function. */
ASSERT(!is_lpi(32 * n + 31));
- while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
+ while ( (i = find_next_bit(&mask, 32, i)) < 32 )
+ {
irq = i + (32 * n);
v_target = vgic_get_target_vcpu(v, irq);
spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
@@ -511,7 +514,7 @@ void vgic_clear_pending_irqs(struct vcpu *v)
spin_lock_irqsave(&v->arch.vgic.lock, flags);
list_for_each_entry_safe ( p, t, &v->arch.vgic.inflight_irqs, inflight )
- list_del_init(&p->inflight);
+ list_del_init(&p->inflight);
gic_clear_pending_irqs(v);
spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
}
@@ -651,8 +654,7 @@ int vgic_allocate_virq(struct domain *d, bool spi)
virq = find_next_zero_bit(d->arch.vgic.allocated_irqs, end, first);
if ( virq >= end )
return -1;
- }
- while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) );
+ } while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) );
return virq;
}
diff --git a/xen/arch/arm/vgic/vgic-init.c b/xen/arch/arm/vgic/vgic-init.c
index 62ae553699..dcc98656db 100644
--- a/xen/arch/arm/vgic/vgic-init.c
+++ b/xen/arch/arm/vgic/vgic-init.c
@@ -215,7 +215,7 @@ int vcpu_vgic_init(struct vcpu *vcpu)
void domain_vgic_free(struct domain *d)
{
struct vgic_dist *dist = &d->arch.vgic;
- int i, ret;
+ int i, ret;
for ( i = 0; i < dist->nr_spis; i++ )
{
diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
index 2e507b10fe..d5d6bcc1f7 100644
--- a/xen/arch/arm/vgic/vgic-mmio-v2.c
+++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
@@ -251,56 +251,55 @@ static void vgic_mmio_write_sgipends(struct vcpu *vcpu,
vgic_queue_irq_unlock(vcpu->domain, irq, flags);
}
else
- {
spin_unlock_irqrestore(&irq->irq_lock, flags);
- }
vgic_put_irq(vcpu->domain, irq);
}
}
-static const struct vgic_register_region vgic_v2_dist_registers[] = {
+static const struct vgic_register_region vgic_v2_dist_registers[] =
+{
REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
- vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IGROUPR,
- vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER,
- vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER,
- vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
- vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR,
- vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER,
- vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICACTIVER,
- vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IPRIORITYR,
- vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR,
- vgic_mmio_read_target, vgic_mmio_write_target, 8,
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ vgic_mmio_read_target, vgic_mmio_write_target, 8,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR,
- vgic_mmio_read_config, vgic_mmio_write_config, 2,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_config, vgic_mmio_write_config, 2,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICD_SGIR,
- vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICD_CPENDSGIR,
- vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
REGISTER_DESC_WITH_LENGTH(GICD_SPENDSGIR,
- vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
};
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c
index 5d935a7301..cc7036ce80 100644
--- a/xen/arch/arm/vgic/vgic-mmio.c
+++ b/xen/arch/arm/vgic/vgic-mmio.c
@@ -603,7 +603,8 @@ static int dispatch_mmio_write(struct vcpu *vcpu, mmio_info_t *info,
return 1;
}
-struct mmio_handler_ops vgic_io_ops = {
+struct mmio_handler_ops vgic_io_ops =
+{
.read = dispatch_mmio_read,
.write = dispatch_mmio_write,
};
diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h
index 3566cf237c..985fed6d6c 100644
--- a/xen/arch/arm/vgic/vgic-mmio.h
+++ b/xen/arch/arm/vgic/vgic-mmio.h
@@ -16,7 +16,8 @@
#ifndef __XEN_ARM_VGIC_VGIC_MMIO_H__
#define __XEN_ARM_VGIC_VGIC_MMIO_H__
-struct vgic_register_region {
+struct vgic_register_region
+{
unsigned int reg_offset;
unsigned int len;
unsigned int bits_per_irq;
@@ -120,18 +121,18 @@ void vgic_mmio_write_sactive(struct vcpu *vcpu,
unsigned long val);
unsigned long vgic_mmio_read_priority(struct vcpu *vcpu,
- paddr_t addr, unsigned int len);
+ paddr_t addr, unsigned int len);
void vgic_mmio_write_priority(struct vcpu *vcpu,
- paddr_t addr, unsigned int len,
- unsigned long val);
+ paddr_t addr, unsigned int len,
+ unsigned long val);
unsigned long vgic_mmio_read_config(struct vcpu *vcpu,
- paddr_t addr, unsigned int len);
+ paddr_t addr, unsigned int len);
void vgic_mmio_write_config(struct vcpu *vcpu,
- paddr_t addr, unsigned int len,
- unsigned long val);
+ paddr_t addr, unsigned int len,
+ unsigned long val);
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
diff --git a/xen/arch/arm/vgic/vgic-v2.c b/xen/arch/arm/vgic/vgic-v2.c
index b5ba4ace87..e9b7719300 100644
--- a/xen/arch/arm/vgic/vgic-v2.c
+++ b/xen/arch/arm/vgic/vgic-v2.c
@@ -23,7 +23,8 @@
#include "vgic.h"
-static struct {
+static struct
+{
bool enabled;
paddr_t dbase; /* Distributor interface address */
paddr_t cbase; /* CPU interface address & size */
diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
index f0f2ea5021..e0f14027c0 100644
--- a/xen/arch/arm/vgic/vgic.c
+++ b/xen/arch/arm/vgic/vgic.c
@@ -549,7 +549,7 @@ static int compute_ap_list_depth(struct vcpu *vcpu)
ASSERT(spin_is_locked(&vgic_cpu->ap_list_lock));
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list)
- count++;
+ count++;
return count;
}
@@ -888,9 +888,7 @@ int vgic_connect_hw_irq(struct domain *d, struct vcpu *vcpu,
else /* remove a mapped IRQ */
{
if ( desc && irq->hwintid != desc->irq )
- {
ret = -EINVAL;
- }
else
{
irq->hw = false;
diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c
index 7bc5eeb207..c7fa950781 100644
--- a/xen/arch/arm/vpl011.c
+++ b/xen/arch/arm/vpl011.c
@@ -355,7 +355,8 @@ static int vpl011_mmio_read(struct vcpu *v,
switch ( vpl011_reg )
{
case DR:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
if ( vpl011->backend_in_domain )
*r = vreg_reg32_extract(vpl011_read_data(d), info);
@@ -364,14 +365,16 @@ static int vpl011_mmio_read(struct vcpu *v,
return 1;
case RSR:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
/* It always returns 0 as there are no physical errors. */
*r = 0;
return 1;
case FR:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
VPL011_LOCK(d, flags);
*r = vreg_reg32_extract(vpl011->uartfr, info);
@@ -379,7 +382,8 @@ static int vpl011_mmio_read(struct vcpu *v,
return 1;
case RIS:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
VPL011_LOCK(d, flags);
*r = vreg_reg32_extract(vpl011->uartris, info);
@@ -387,7 +391,8 @@ static int vpl011_mmio_read(struct vcpu *v,
return 1;
case MIS:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
VPL011_LOCK(d, flags);
*r = vreg_reg32_extract(vpl011->uartris & vpl011->uartimsc,
@@ -396,7 +401,8 @@ static int vpl011_mmio_read(struct vcpu *v,
return 1;
case IMSC:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
VPL011_LOCK(d, flags);
*r = vreg_reg32_extract(vpl011->uartimsc, info);
@@ -404,7 +410,8 @@ static int vpl011_mmio_read(struct vcpu *v,
return 1;
case ICR:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
/* Only write is valid. */
return 0;
@@ -441,7 +448,8 @@ static int vpl011_mmio_write(struct vcpu *v,
{
uint32_t data = 0;
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
vreg_reg32_update(&data, r, info);
data &= 0xFF;
@@ -453,7 +461,8 @@ static int vpl011_mmio_write(struct vcpu *v,
}
case RSR: /* Nothing to clear. */
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
return 1;
@@ -463,7 +472,8 @@ static int vpl011_mmio_write(struct vcpu *v,
goto write_ignore;
case IMSC:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
VPL011_LOCK(d, flags);
vreg_reg32_update(&vpl011->uartimsc, r, info);
@@ -472,7 +482,8 @@ static int vpl011_mmio_write(struct vcpu *v,
return 1;
case ICR:
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
+ if ( !vpl011_reg32_check_access(dabt) )
+ goto bad_width;
VPL011_LOCK(d, flags);
vreg_reg32_clearbits(&vpl011->uartris, r, info);
@@ -496,7 +507,8 @@ bad_width:
}
-static const struct mmio_handler_ops vpl011_mmio_handler = {
+static const struct mmio_handler_ops vpl011_mmio_handler =
+{
.read = vpl011_mmio_read,
.write = vpl011_mmio_write,
};
diff --git a/xen/arch/arm/vpsci.c b/xen/arch/arm/vpsci.c
index c1e250be59..c1a45f87ab 100644
--- a/xen/arch/arm/vpsci.c
+++ b/xen/arch/arm/vpsci.c
@@ -154,12 +154,13 @@ static int32_t do_psci_0_2_cpu_on(register_t target_cpu,
return do_common_cpu_on(target_cpu, entry_point, context_id);
}
-static const unsigned long target_affinity_mask[] = {
+static const unsigned long target_affinity_mask[] =
+{
( MPIDR_HWID_MASK & AFFINITY_MASK( 0 ) ),
( MPIDR_HWID_MASK & AFFINITY_MASK( 1 ) ),
( MPIDR_HWID_MASK & AFFINITY_MASK( 2 ) )
#ifdef CONFIG_ARM_64
- ,( MPIDR_HWID_MASK & AFFINITY_MASK( 3 ) )
+ , ( MPIDR_HWID_MASK & AFFINITY_MASK( 3 ) )
#endif
};
@@ -184,7 +185,7 @@ static int32_t do_psci_0_2_affinity_info(register_t target_affinity,
v = d->vcpu[vcpuid];
if ( ( ( v->arch.vmpidr & tmask ) == target_affinity )
- && ( !test_bit(_VPF_down, &v->pause_flags) ) )
+ && ( !test_bit(_VPF_down, &v->pause_flags) ) )
return PSCI_0_2_AFFINITY_LEVEL_ON;
}
@@ -199,13 +200,13 @@ static int32_t do_psci_0_2_migrate_info_type(void)
static void do_psci_0_2_system_off( void )
{
struct domain *d = current->domain;
- domain_shutdown(d,SHUTDOWN_poweroff);
+ domain_shutdown(d, SHUTDOWN_poweroff);
}
static void do_psci_0_2_system_reset(void)
{
struct domain *d = current->domain;
- domain_shutdown(d,SHUTDOWN_reboot);
+ domain_shutdown(d, SHUTDOWN_reboot);
}
static int32_t do_psci_1_0_features(uint32_t psci_func_id)
diff --git a/xen/arch/arm/vsmc.c b/xen/arch/arm/vsmc.c
index f8e350311d..25dd3568a9 100644
--- a/xen/arch/arm/vsmc.c
+++ b/xen/arch/arm/vsmc.c
@@ -60,7 +60,7 @@ static bool fill_uid(struct cpu_user_regs *regs, xen_uuid_t uuid)
}
static bool fill_revision(struct cpu_user_regs *regs, uint32_t major,
- uint32_t minor)
+ uint32_t minor)
{
/*
* Revision is returned in registers r0 and r1.
diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c
index e6aebdac9e..9bf0251d17 100644
--- a/xen/arch/arm/vtimer.c
+++ b/xen/arch/arm/vtimer.c
@@ -64,7 +64,8 @@ int domain_vtimer_init(struct domain *d, struct xen_arch_domainconfig *config)
{
d->arch.phys_timer_base.offset = NOW();
d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0);
- d->time_offset_seconds = ticks_to_ns(d->arch.virt_timer_base.offset - boot_count);
+ d->time_offset_seconds = ticks_to_ns(d->arch.virt_timer_base.offset -
+ boot_count);
do_div(d->time_offset_seconds, 1000000000);
config->clock_frequency = timer_dt_clock_frequency;
@@ -110,16 +111,16 @@ int vcpu_vtimer_init(struct vcpu *v)
t->ctl = 0;
t->cval = NOW();
t->irq = d0
- ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI)
- : GUEST_TIMER_PHYS_NS_PPI;
+ ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI)
+ : GUEST_TIMER_PHYS_NS_PPI;
t->v = v;
t = &v->arch.virt_timer;
init_timer(&t->timer, virt_timer_expired, t, v->processor);
t->ctl = 0;
t->irq = d0
- ? timer_get_irq(TIMER_VIRT_PPI)
- : GUEST_TIMER_VIRT_PPI;
+ ? timer_get_irq(TIMER_VIRT_PPI)
+ : GUEST_TIMER_VIRT_PPI;
t->v = v;
v->arch.vtimer_initialized = 1;
@@ -147,7 +148,7 @@ void virt_timer_save(struct vcpu *v)
!(v->arch.virt_timer.ctl & CNTx_CTL_MASK))
{
set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval +
- v->domain->arch.virt_timer_base.offset - boot_count));
+ v->domain->arch.virt_timer_base.offset - boot_count));
}
}
@@ -172,9 +173,7 @@ static bool vtimer_cntp_ctl(struct cpu_user_regs *regs, uint32_t *r, bool read)
return false;
if ( read )
- {
*r = v->arch.phys_timer.ctl;
- }
else
{
uint32_t ctl = *r & ~CNTx_CTL_PENDING;
@@ -205,9 +204,7 @@ static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r,
now = NOW() - v->domain->arch.phys_timer_base.offset;
if ( read )
- {
*r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & 0xffffffffull);
- }
else
{
v->arch.phys_timer.cval = now + ticks_to_ns(*r);
@@ -231,9 +228,7 @@ static bool vtimer_cntp_cval(struct cpu_user_regs *regs, uint64_t *r,
return false;
if ( read )
- {
*r = ns_to_ticks(v->arch.phys_timer.cval);
- }
else
{
v->arch.phys_timer.cval = ticks_to_ns(*r);
@@ -318,7 +313,8 @@ static bool vtimer_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr)
bool vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr)
{
- switch (hsr.ec) {
+ switch (hsr.ec)
+ {
case HSR_EC_CP15_32:
return vtimer_emulate_cp32(regs, hsr);
case HSR_EC_CP15_64:
diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c
index 80d4755d43..654cd1d6fc 100644
--- a/xen/arch/arm/vuart.c
+++ b/xen/arch/arm/vuart.c
@@ -47,7 +47,8 @@ static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info,
static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info,
register_t r, void *priv);
-static const struct mmio_handler_ops vuart_mmio_handler = {
+static const struct mmio_handler_ops vuart_mmio_handler =
+{
.read = vuart_mmio_read,
.write = vuart_mmio_write,
};
diff --git a/xen/arch/x86/acpi/boot.c b/xen/arch/x86/acpi/boot.c
index 1382b4dcd0..e6cbe8103e 100644
--- a/xen/arch/x86/acpi/boot.c
+++ b/xen/arch/x86/acpi/boot.c
@@ -62,250 +62,258 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
static int __init acpi_parse_madt(struct acpi_table_header *table)
{
- struct acpi_table_madt *madt;
+ struct acpi_table_madt *madt;
- madt = (struct acpi_table_madt *)table;
+ madt = (struct acpi_table_madt *)table;
- if (madt->address) {
- acpi_lapic_addr = (u64) madt->address;
+ if (madt->address)
+ {
+ acpi_lapic_addr = (u64) madt->address;
- printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
- madt->address);
- }
+ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
+ madt->address);
+ }
- acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
+ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
- return 0;
+ return 0;
}
static int __init
acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
{
- struct acpi_madt_local_x2apic *processor =
- container_of(header, struct acpi_madt_local_x2apic, header);
- bool enabled = false, log = false;
-
- if (BAD_MADT_ENTRY(processor, end))
- return -EINVAL;
-
- if ((processor->lapic_flags & ACPI_MADT_ENABLED) ||
- processor->local_apic_id != 0xffffffff || opt_cpu_info) {
- acpi_table_print_madt_entry(header);
- log = true;
- }
-
- /* Record local apic id only when enabled and fitting. */
- if (processor->local_apic_id >= MAX_APICS ||
- processor->uid >= MAX_MADT_ENTRIES) {
- if (log)
- printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit"
- " - processor ignored\n",
- processor->lapic_flags & ACPI_MADT_ENABLED
- ? KERN_WARNING "WARNING: " : KERN_INFO,
- processor->local_apic_id, processor->uid);
- /*
- * Must not return an error here, to prevent
- * acpi_table_parse_entries() from terminating early.
- */
- return 0 /* -ENOSPC */;
- }
- if (processor->lapic_flags & ACPI_MADT_ENABLED) {
- x86_acpiid_to_apicid[processor->uid] =
- processor->local_apic_id;
- enabled = true;
- }
-
- /*
- * We need to register disabled CPU as well to permit
- * counting disabled CPUs. This allows us to size
- * cpus_possible_map more accurately, to permit
- * to not preallocating memory for all NR_CPUS
- * when we use CPU hotplug.
- */
- mp_register_lapic(processor->local_apic_id, enabled, 0);
-
- return 0;
+ struct acpi_madt_local_x2apic *processor =
+ container_of(header, struct acpi_madt_local_x2apic, header);
+ bool enabled = false, log = false;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ if ((processor->lapic_flags & ACPI_MADT_ENABLED) ||
+ processor->local_apic_id != 0xffffffff || opt_cpu_info)
+ {
+ acpi_table_print_madt_entry(header);
+ log = true;
+ }
+
+ /* Record local apic id only when enabled and fitting. */
+ if (processor->local_apic_id >= MAX_APICS ||
+ processor->uid >= MAX_MADT_ENTRIES)
+ {
+ if (log)
+ printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit"
+ " - processor ignored\n",
+ processor->lapic_flags & ACPI_MADT_ENABLED
+ ? KERN_WARNING "WARNING: " : KERN_INFO,
+ processor->local_apic_id, processor->uid);
+ /*
+ * Must not return an error here, to prevent
+ * acpi_table_parse_entries() from terminating early.
+ */
+ return 0 /* -ENOSPC */;
+ }
+ if (processor->lapic_flags & ACPI_MADT_ENABLED)
+ {
+ x86_acpiid_to_apicid[processor->uid] =
+ processor->local_apic_id;
+ enabled = true;
+ }
+
+ /*
+ * We need to register disabled CPU as well to permit
+ * counting disabled CPUs. This allows us to size
+ * cpus_possible_map more accurately, to permit
+ * to not preallocating memory for all NR_CPUS
+ * when we use CPU hotplug.
+ */
+ mp_register_lapic(processor->local_apic_id, enabled, 0);
+
+ return 0;
}
static int __init
-acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_lapic(struct acpi_subtable_header *header, const unsigned long end)
{
- struct acpi_madt_local_apic *processor =
- container_of(header, struct acpi_madt_local_apic, header);
- bool enabled = false;
-
- if (BAD_MADT_ENTRY(processor, end))
- return -EINVAL;
-
- if ((processor->lapic_flags & ACPI_MADT_ENABLED) ||
- processor->id != 0xff || opt_cpu_info)
- acpi_table_print_madt_entry(header);
-
- /* Record local apic id only when enabled */
- if (processor->lapic_flags & ACPI_MADT_ENABLED) {
- x86_acpiid_to_apicid[processor->processor_id] = processor->id;
- enabled = true;
- }
-
- /*
- * We need to register disabled CPU as well to permit
- * counting disabled CPUs. This allows us to size
- * cpus_possible_map more accurately, to permit
- * to not preallocating memory for all NR_CPUS
- * when we use CPU hotplug.
- */
- mp_register_lapic(processor->id, enabled, 0);
-
- return 0;
+ struct acpi_madt_local_apic *processor =
+ container_of(header, struct acpi_madt_local_apic, header);
+ bool enabled = false;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ if ((processor->lapic_flags & ACPI_MADT_ENABLED) ||
+ processor->id != 0xff || opt_cpu_info)
+ acpi_table_print_madt_entry(header);
+
+ /* Record local apic id only when enabled */
+ if (processor->lapic_flags & ACPI_MADT_ENABLED)
+ {
+ x86_acpiid_to_apicid[processor->processor_id] = processor->id;
+ enabled = true;
+ }
+
+ /*
+ * We need to register disabled CPU as well to permit
+ * counting disabled CPUs. This allows us to size
+ * cpus_possible_map more accurately, to permit
+ * to not preallocating memory for all NR_CPUS
+ * when we use CPU hotplug.
+ */
+ mp_register_lapic(processor->id, enabled, 0);
+
+ return 0;
}
static int __init
-acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
- const unsigned long end)
+acpi_parse_lapic_addr_ovr(struct acpi_subtable_header *header,
+ const unsigned long end)
{
- struct acpi_madt_local_apic_override *lapic_addr_ovr =
- container_of(header, struct acpi_madt_local_apic_override,
- header);
+ struct acpi_madt_local_apic_override *lapic_addr_ovr =
+ container_of(header, struct acpi_madt_local_apic_override,
+ header);
- if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
- return -EINVAL;
+ if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
+ return -EINVAL;
- acpi_lapic_addr = lapic_addr_ovr->address;
+ acpi_lapic_addr = lapic_addr_ovr->address;
- return 0;
+ return 0;
}
static int __init
acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
- const unsigned long end)
+ const unsigned long end)
{
- struct acpi_madt_local_x2apic_nmi *x2apic_nmi =
- container_of(header, struct acpi_madt_local_x2apic_nmi,
- header);
+ struct acpi_madt_local_x2apic_nmi *x2apic_nmi =
+ container_of(header, struct acpi_madt_local_x2apic_nmi,
+ header);
- if (BAD_MADT_ENTRY(x2apic_nmi, end))
- return -EINVAL;
+ if (BAD_MADT_ENTRY(x2apic_nmi, end))
+ return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(header);
- if (x2apic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ if (x2apic_nmi->lint != 1)
+ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
- return 0;
+ return 0;
}
static int __init
-acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_lapic_nmi(struct acpi_subtable_header *header,
+ const unsigned long end)
{
- struct acpi_madt_local_apic_nmi *lapic_nmi =
- container_of(header, struct acpi_madt_local_apic_nmi, header);
+ struct acpi_madt_local_apic_nmi *lapic_nmi =
+ container_of(header, struct acpi_madt_local_apic_nmi, header);
- if (BAD_MADT_ENTRY(lapic_nmi, end))
- return -EINVAL;
+ if (BAD_MADT_ENTRY(lapic_nmi, end))
+ return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(header);
- if (lapic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ if (lapic_nmi->lint != 1)
+ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
- return 0;
+ return 0;
}
static int __init
-acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_ioapic(struct acpi_subtable_header *header, const unsigned long end)
{
- struct acpi_madt_io_apic *ioapic =
- container_of(header, struct acpi_madt_io_apic, header);
+ struct acpi_madt_io_apic *ioapic =
+ container_of(header, struct acpi_madt_io_apic, header);
- if (BAD_MADT_ENTRY(ioapic, end))
- return -EINVAL;
+ if (BAD_MADT_ENTRY(ioapic, end))
+ return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(header);
- mp_register_ioapic(ioapic->id,
- ioapic->address, ioapic->global_irq_base);
+ mp_register_ioapic(ioapic->id,
+ ioapic->address, ioapic->global_irq_base);
- return 0;
+ return 0;
}
static int __init
-acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
- const unsigned long end)
+acpi_parse_int_src_ovr(struct acpi_subtable_header *header,
+ const unsigned long end)
{
- struct acpi_madt_interrupt_override *intsrc =
- container_of(header, struct acpi_madt_interrupt_override,
- header);
+ struct acpi_madt_interrupt_override *intsrc =
+ container_of(header, struct acpi_madt_interrupt_override,
+ header);
- if (BAD_MADT_ENTRY(intsrc, end))
- return -EINVAL;
+ if (BAD_MADT_ENTRY(intsrc, end))
+ return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(header);
- if (acpi_skip_timer_override &&
- intsrc->source_irq == 0 && intsrc->global_irq == 2) {
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
- return 0;
- }
+ if (acpi_skip_timer_override &&
+ intsrc->source_irq == 0 && intsrc->global_irq == 2)
+ {
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ return 0;
+ }
- mp_override_legacy_irq(intsrc->source_irq,
- ACPI_MADT_GET_POLARITY(intsrc->inti_flags),
- ACPI_MADT_GET_TRIGGER(intsrc->inti_flags),
- intsrc->global_irq);
+ mp_override_legacy_irq(intsrc->source_irq,
+ ACPI_MADT_GET_POLARITY(intsrc->inti_flags),
+ ACPI_MADT_GET_TRIGGER(intsrc->inti_flags),
+ intsrc->global_irq);
- return 0;
+ return 0;
}
static int __init
-acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_nmi_src(struct acpi_subtable_header *header, const unsigned long end)
{
- struct acpi_madt_nmi_source *nmi_src =
- container_of(header, struct acpi_madt_nmi_source, header);
+ struct acpi_madt_nmi_source *nmi_src =
+ container_of(header, struct acpi_madt_nmi_source, header);
- if (BAD_MADT_ENTRY(nmi_src, end))
- return -EINVAL;
+ if (BAD_MADT_ENTRY(nmi_src, end))
+ return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(header);
- /* TBD: Support nimsrc entries? */
+ /* TBD: Support nimsrc entries? */
- return 0;
+ return 0;
}
#ifdef CONFIG_HPET_TIMER
static int __init acpi_parse_hpet(struct acpi_table_header *table)
{
- struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
-
- if (hpet_tbl->address.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- printk(KERN_WARNING PREFIX "HPET timers must be located in "
- "memory.\n");
- return -1;
- }
-
- /*
- * Some BIOSes provide multiple HPET tables. Sometimes this is a BIOS
- * bug; the intended way of supporting more than 1 HPET is to use AML
- * entries.
- *
- * If someone finds a real system with two genuine HPET tables, perhaps
- * they will be kind and implement support. Until then however, warn
- * that we will ignore subsequent tables.
- */
- if (hpet_address)
- {
- printk(KERN_WARNING PREFIX
- "Found multiple HPET tables. Only using first\n");
- return -1;
- }
-
- hpet_address = hpet_tbl->address.address;
- hpet_blockid = hpet_tbl->sequence;
- hpet_flags = hpet_tbl->flags;
- printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
- hpet_tbl->id, hpet_address);
-
- return 0;
+ struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
+
+ if (hpet_tbl->address.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ {
+ printk(KERN_WARNING PREFIX "HPET timers must be located in "
+ "memory.\n");
+ return -1;
+ }
+
+ /*
+ * Some BIOSes provide multiple HPET tables. Sometimes this is a BIOS
+ * bug; the intended way of supporting more than 1 HPET is to use AML
+ * entries.
+ *
+ * If someone finds a real system with two genuine HPET tables, perhaps
+ * they will be kind and implement support. Until then however, warn
+ * that we will ignore subsequent tables.
+ */
+ if (hpet_address)
+ {
+ printk(KERN_WARNING PREFIX
+ "Found multiple HPET tables. Only using first\n");
+ return -1;
+ }
+
+ hpet_address = hpet_tbl->address.address;
+ hpet_blockid = hpet_tbl->sequence;
+ hpet_flags = hpet_tbl->flags;
+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+ hpet_tbl->id, hpet_address);
+
+ return 0;
}
#else
#define acpi_parse_hpet NULL
@@ -313,23 +321,23 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
static int __init acpi_invalidate_bgrt(struct acpi_table_header *table)
{
- struct acpi_table_bgrt *bgrt_tbl =
- container_of(table, struct acpi_table_bgrt, header);
+ struct acpi_table_bgrt *bgrt_tbl =
+ container_of(table, struct acpi_table_bgrt, header);
- if (table->length < sizeof(*bgrt_tbl))
- return -1;
+ if (table->length < sizeof(*bgrt_tbl))
+ return -1;
- if (bgrt_tbl->version == 1 && bgrt_tbl->image_address
- && !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address),
- RAM_TYPE_CONVENTIONAL))
- return 0;
+ if (bgrt_tbl->version == 1 && bgrt_tbl->image_address
+ && !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address),
+ RAM_TYPE_CONVENTIONAL))
+ return 0;
- printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#"PRIx64"\n",
- bgrt_tbl->version, bgrt_tbl->image_address);
- bgrt_tbl->image_address = 0;
- bgrt_tbl->status &= ~1;
+ printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#"PRIx64"\n",
+ bgrt_tbl->version, bgrt_tbl->image_address);
+ bgrt_tbl->image_address = 0;
+ bgrt_tbl->status &= ~1;
- return 0;
+ return 0;
}
#ifdef CONFIG_ACPI_SLEEP
@@ -350,162 +358,170 @@ static int __init acpi_invalidate_bgrt(struct acpi_table_header *table)
static void __init
acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt)
{
- struct acpi_table_facs *facs = NULL;
- uint64_t facs_pa;
-
- if (fadt->header.revision >= 5 &&
- fadt->header.length >= ACPI_FADT_V5_SIZE) {
- acpi_sinfo.sleep_control = fadt->sleep_control;
- acpi_sinfo.sleep_status = fadt->sleep_status;
-
- printk(KERN_INFO PREFIX
- "v5 SLEEP INFO: control[%d:%"PRIx64"],"
- " status[%d:%"PRIx64"]\n",
- acpi_sinfo.sleep_control.space_id,
- acpi_sinfo.sleep_control.address,
- acpi_sinfo.sleep_status.space_id,
- acpi_sinfo.sleep_status.address);
-
- if ((fadt->sleep_control.address &&
- (fadt->sleep_control.bit_offset ||
- fadt->sleep_control.bit_width !=
- fadt->sleep_control.access_width * 8)) ||
- (fadt->sleep_status.address &&
- (fadt->sleep_status.bit_offset ||
- fadt->sleep_status.bit_width !=
- fadt->sleep_status.access_width * 8))) {
- printk(KERN_WARNING PREFIX
- "Invalid sleep control/status register data:"
- " %#x:%#x:%#x %#x:%#x:%#x\n",
- fadt->sleep_control.bit_offset,
- fadt->sleep_control.bit_width,
- fadt->sleep_control.access_width,
- fadt->sleep_status.bit_offset,
- fadt->sleep_status.bit_width,
- fadt->sleep_status.access_width);
- fadt->sleep_control.address = 0;
- fadt->sleep_status.address = 0;
- }
- }
-
- if (fadt->flags & ACPI_FADT_HW_REDUCED)
- goto bad;
-
- acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control);
- acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control);
- acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event);
- acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event);
-
- printk(KERN_INFO PREFIX
- "SLEEP INFO: pm1x_cnt[%d:%"PRIx64",%d:%"PRIx64"], "
- "pm1x_evt[%d:%"PRIx64",%d:%"PRIx64"]\n",
- acpi_sinfo.pm1a_cnt_blk.space_id,
- acpi_sinfo.pm1a_cnt_blk.address,
- acpi_sinfo.pm1b_cnt_blk.space_id,
- acpi_sinfo.pm1b_cnt_blk.address,
- acpi_sinfo.pm1a_evt_blk.space_id,
- acpi_sinfo.pm1a_evt_blk.address,
- acpi_sinfo.pm1b_evt_blk.space_id,
- acpi_sinfo.pm1b_evt_blk.address);
-
- /* Now FACS... */
- facs_pa = ((fadt->header.revision >= FADT2_REVISION_ID)
- ? fadt->Xfacs : (uint64_t)fadt->facs);
- if (fadt->facs && ((uint64_t)fadt->facs != facs_pa)) {
- printk(KERN_WARNING PREFIX
- "32/64X FACS address mismatch in FADT - "
- "%08x/%016"PRIx64", using 32\n",
- fadt->facs, facs_pa);
- facs_pa = (uint64_t)fadt->facs;
- }
- if (!facs_pa)
- goto bad;
-
- facs = (struct acpi_table_facs *)
- __acpi_map_table(facs_pa, sizeof(struct acpi_table_facs));
- if (!facs)
- goto bad;
-
- if (strncmp(facs->signature, "FACS", 4)) {
- printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n",
- facs->signature);
- goto bad;
- }
-
- if (facs->length < 24) {
- printk(KERN_ERR PREFIX "Invalid FACS table length: %#x",
- facs->length);
- goto bad;
- }
-
- if (facs->length < 64)
- printk(KERN_WARNING PREFIX
- "FACS is shorter than ACPI spec allow: %#x",
- facs->length);
-
- acpi_sinfo.wakeup_vector = facs_pa +
- offsetof(struct acpi_table_facs, firmware_waking_vector);
- acpi_sinfo.vector_width = 32;
-
- printk(KERN_INFO PREFIX
- " wakeup_vec[%"PRIx64"], vec_size[%x]\n",
- acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width);
- return;
+ struct acpi_table_facs *facs = NULL;
+ uint64_t facs_pa;
+
+ if (fadt->header.revision >= 5 &&
+ fadt->header.length >= ACPI_FADT_V5_SIZE)
+ {
+ acpi_sinfo.sleep_control = fadt->sleep_control;
+ acpi_sinfo.sleep_status = fadt->sleep_status;
+
+ printk(KERN_INFO PREFIX
+ "v5 SLEEP INFO: control[%d:%"PRIx64"],"
+ " status[%d:%"PRIx64"]\n",
+ acpi_sinfo.sleep_control.space_id,
+ acpi_sinfo.sleep_control.address,
+ acpi_sinfo.sleep_status.space_id,
+ acpi_sinfo.sleep_status.address);
+
+ if ((fadt->sleep_control.address &&
+ (fadt->sleep_control.bit_offset ||
+ fadt->sleep_control.bit_width !=
+ fadt->sleep_control.access_width * 8)) ||
+ (fadt->sleep_status.address &&
+ (fadt->sleep_status.bit_offset ||
+ fadt->sleep_status.bit_width !=
+ fadt->sleep_status.access_width * 8)))
+ {
+ printk(KERN_WARNING PREFIX
+ "Invalid sleep control/status register data:"
+ " %#x:%#x:%#x %#x:%#x:%#x\n",
+ fadt->sleep_control.bit_offset,
+ fadt->sleep_control.bit_width,
+ fadt->sleep_control.access_width,
+ fadt->sleep_status.bit_offset,
+ fadt->sleep_status.bit_width,
+ fadt->sleep_status.access_width);
+ fadt->sleep_control.address = 0;
+ fadt->sleep_status.address = 0;
+ }
+ }
+
+ if (fadt->flags & ACPI_FADT_HW_REDUCED)
+ goto bad;
+
+ acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control);
+ acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control);
+ acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event);
+ acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event);
+
+ printk(KERN_INFO PREFIX
+ "SLEEP INFO: pm1x_cnt[%d:%"PRIx64",%d:%"PRIx64"], "
+ "pm1x_evt[%d:%"PRIx64",%d:%"PRIx64"]\n",
+ acpi_sinfo.pm1a_cnt_blk.space_id,
+ acpi_sinfo.pm1a_cnt_blk.address,
+ acpi_sinfo.pm1b_cnt_blk.space_id,
+ acpi_sinfo.pm1b_cnt_blk.address,
+ acpi_sinfo.pm1a_evt_blk.space_id,
+ acpi_sinfo.pm1a_evt_blk.address,
+ acpi_sinfo.pm1b_evt_blk.space_id,
+ acpi_sinfo.pm1b_evt_blk.address);
+
+ /* Now FACS... */
+ facs_pa = ((fadt->header.revision >= FADT2_REVISION_ID)
+ ? fadt->Xfacs : (uint64_t)fadt->facs);
+ if (fadt->facs && ((uint64_t)fadt->facs != facs_pa))
+ {
+ printk(KERN_WARNING PREFIX
+ "32/64X FACS address mismatch in FADT - "
+ "%08x/%016"PRIx64", using 32\n",
+ fadt->facs, facs_pa);
+ facs_pa = (uint64_t)fadt->facs;
+ }
+ if (!facs_pa)
+ goto bad;
+
+ facs = (struct acpi_table_facs *)
+ __acpi_map_table(facs_pa, sizeof(struct acpi_table_facs));
+ if (!facs)
+ goto bad;
+
+ if (strncmp(facs->signature, "FACS", 4))
+ {
+ printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n",
+ facs->signature);
+ goto bad;
+ }
+
+ if (facs->length < 24)
+ {
+ printk(KERN_ERR PREFIX "Invalid FACS table length: %#x",
+ facs->length);
+ goto bad;
+ }
+
+ if (facs->length < 64)
+ printk(KERN_WARNING PREFIX
+ "FACS is shorter than ACPI spec allow: %#x",
+ facs->length);
+
+ acpi_sinfo.wakeup_vector = facs_pa +
+ offsetof(struct acpi_table_facs, firmware_waking_vector);
+ acpi_sinfo.vector_width = 32;
+
+ printk(KERN_INFO PREFIX
+ " wakeup_vec[%"PRIx64"], vec_size[%x]\n",
+ acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width);
+ return;
bad:
- memset(&acpi_sinfo, 0,
- offsetof(struct acpi_sleep_info, sleep_control));
- memset(&acpi_sinfo.sleep_status + 1, 0,
- (long)(&acpi_sinfo + 1) - (long)(&acpi_sinfo.sleep_status + 1));
+ memset(&acpi_sinfo, 0,
+ offsetof(struct acpi_sleep_info, sleep_control));
+ memset(&acpi_sinfo.sleep_status + 1, 0,
+ (long)(&acpi_sinfo + 1) - (long)(&acpi_sinfo.sleep_status + 1));
}
#endif
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
- struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table;
+ struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table;
#ifdef CONFIG_ACPI_INTERPRETER
- /* initialize sci_int early for INT_SRC_OVR MADT parsing */
- acpi_fadt.sci_int = fadt->sci_int;
+ /* initialize sci_int early for INT_SRC_OVR MADT parsing */
+ acpi_fadt.sci_int = fadt->sci_int;
- /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
- acpi_fadt.revision = fadt->revision;
- acpi_fadt.force_apic_physical_destination_mode =
- fadt->force_apic_physical_destination_mode;
+ /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
+ acpi_fadt.revision = fadt->revision;
+ acpi_fadt.force_apic_physical_destination_mode =
+ fadt->force_apic_physical_destination_mode;
#endif
#ifdef CONFIG_X86_PM_TIMER
- /* detect the location of the ACPI PM Timer */
- if (fadt->header.revision >= FADT2_REVISION_ID) {
- /* FADT rev. 2 */
- if (fadt->xpm_timer_block.space_id ==
- ACPI_ADR_SPACE_SYSTEM_IO) {
- pmtmr_ioport = fadt->xpm_timer_block.address;
- pmtmr_width = fadt->xpm_timer_block.bit_width;
- }
- }
- /*
- * "X" fields are optional extensions to the original V1.0
- * fields, so we must selectively expand V1.0 fields if the
- * corresponding X field is zero.
- */
- if (!pmtmr_ioport) {
- pmtmr_ioport = fadt->pm_timer_block;
- pmtmr_width = fadt->pm_timer_length == 4 ? 24 : 0;
- }
- if (pmtmr_ioport)
- printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x (%u bits)\n",
- pmtmr_ioport, pmtmr_width);
+ /* detect the location of the ACPI PM Timer */
+ if (fadt->header.revision >= FADT2_REVISION_ID)
+ {
+ /* FADT rev. 2 */
+ if (fadt->xpm_timer_block.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_IO)
+ {
+ pmtmr_ioport = fadt->xpm_timer_block.address;
+ pmtmr_width = fadt->xpm_timer_block.bit_width;
+ }
+ }
+ /*
+ * "X" fields are optional extensions to the original V1.0
+ * fields, so we must selectively expand V1.0 fields if the
+ * corresponding X field is zero.
+ */
+ if (!pmtmr_ioport)
+ {
+ pmtmr_ioport = fadt->pm_timer_block;
+ pmtmr_width = fadt->pm_timer_length == 4 ? 24 : 0;
+ }
+ if (pmtmr_ioport)
+ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x (%u bits)\n",
+ pmtmr_ioport, pmtmr_width);
#endif
- acpi_smi_cmd = fadt->smi_command;
- acpi_enable_value = fadt->acpi_enable;
- acpi_disable_value = fadt->acpi_disable;
+ acpi_smi_cmd = fadt->smi_command;
+ acpi_enable_value = fadt->acpi_enable;
+ acpi_disable_value = fadt->acpi_disable;
#ifdef CONFIG_ACPI_SLEEP
- acpi_fadt_parse_sleep_info(fadt);
+ acpi_fadt_parse_sleep_info(fadt);
#endif
- return 0;
+ return 0;
}
/*
@@ -514,54 +530,59 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
*/
static int __init acpi_parse_madt_lapic_entries(void)
{
- int count, x2count;
-
- if (!cpu_has_apic)
- return -ENODEV;
-
- /*
- * Note that the LAPIC address is obtained from the MADT (32-bit value)
- * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
- */
-
- count =
- acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
- acpi_parse_lapic_addr_ovr, 0);
- if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
- return count;
- }
-
- mp_register_lapic_address(acpi_lapic_addr);
-
- BUILD_BUG_ON(MAX_APICS != MAX_LOCAL_APIC);
- count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
- acpi_parse_lapic, MAX_APICS);
- x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
- acpi_parse_x2apic, MAX_APICS);
- if (!count && !x2count) {
- printk(KERN_ERR PREFIX "No LAPIC entries present\n");
- /* TBD: Cleanup to allow fallback to MPS */
- return -ENODEV;
- } else if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
- /* TBD: Cleanup to allow fallback to MPS */
- return count < 0 ? count : x2count;
- }
-
- count =
- acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
- acpi_parse_lapic_nmi, 0);
- x2count =
- acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
- acpi_parse_x2apic_nmi, 0);
- if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
- /* TBD: Cleanup to allow fallback to MPS */
- return count < 0 ? count : x2count;
- }
- return 0;
+ int count, x2count;
+
+ if (!cpu_has_apic)
+ return -ENODEV;
+
+ /*
+ * Note that the LAPIC address is obtained from the MADT (32-bit value)
+ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
+ */
+
+ count =
+ acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
+ acpi_parse_lapic_addr_ovr, 0);
+ if (count < 0)
+ {
+ printk(KERN_ERR PREFIX
+ "Error parsing LAPIC address override entry\n");
+ return count;
+ }
+
+ mp_register_lapic_address(acpi_lapic_addr);
+
+ BUILD_BUG_ON(MAX_APICS != MAX_LOCAL_APIC);
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
+ acpi_parse_lapic, MAX_APICS);
+ x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
+ acpi_parse_x2apic, MAX_APICS);
+ if (!count && !x2count)
+ {
+ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return -ENODEV;
+ }
+ else if (count < 0 || x2count < 0)
+ {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return count < 0 ? count : x2count;
+ }
+
+ count =
+ acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
+ acpi_parse_lapic_nmi, 0);
+ x2count =
+ acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
+ acpi_parse_x2apic_nmi, 0);
+ if (count < 0 || x2count < 0)
+ {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return count < 0 ? count : x2count;
+ }
+ return 0;
}
/*
@@ -570,100 +591,109 @@ static int __init acpi_parse_madt_lapic_entries(void)
*/
static int __init acpi_parse_madt_ioapic_entries(void)
{
- int count;
-
- /*
- * ACPI interpreter is required to complete interrupt setup,
- * so if it is off, don't enumerate the io-apics with ACPI.
- * If MPS is present, it will handle them,
- * otherwise the system will stay in PIC mode
- */
- if (acpi_disabled || acpi_noirq) {
- return -ENODEV;
- }
-
- if (!cpu_has_apic)
- return -ENODEV;
-
- /*
- * if "noapic" boot option, don't look for IO-APICs
- */
- if (skip_ioapic_setup) {
- printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
- "due to 'noapic' option.\n");
- return -ENODEV;
- }
-
- count =
- acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
- MAX_IO_APICS);
- if (!count) {
- printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
- return -ENODEV;
- } else if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
- return count;
- }
-
- count =
- acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
- acpi_parse_int_src_ovr, MAX_IRQ_SOURCES);
- if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing interrupt source overrides entry\n");
- /* TBD: Cleanup to allow fallback to MPS */
- return count;
- }
-
- /* Fill in identity legacy mapings where no override */
- mp_config_acpi_legacy_irqs();
-
- count =
- acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
- acpi_parse_nmi_src, MAX_IRQ_SOURCES);
- if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
- /* TBD: Cleanup to allow fallback to MPS */
- return count;
- }
-
- return 0;
+ int count;
+
+ /*
+ * ACPI interpreter is required to complete interrupt setup,
+ * so if it is off, don't enumerate the io-apics with ACPI.
+ * If MPS is present, it will handle them,
+ * otherwise the system will stay in PIC mode
+ */
+ if (acpi_disabled || acpi_noirq)
+ return -ENODEV;
+
+ if (!cpu_has_apic)
+ return -ENODEV;
+
+ /*
+ * if "noapic" boot option, don't look for IO-APICs
+ */
+ if (skip_ioapic_setup)
+ {
+ printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
+ "due to 'noapic' option.\n");
+ return -ENODEV;
+ }
+
+ count =
+ acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
+ MAX_IO_APICS);
+ if (!count)
+ {
+ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+ return -ENODEV;
+ }
+ else if (count < 0)
+ {
+ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
+ return count;
+ }
+
+ count =
+ acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
+ acpi_parse_int_src_ovr, MAX_IRQ_SOURCES);
+ if (count < 0)
+ {
+ printk(KERN_ERR PREFIX
+ "Error parsing interrupt source overrides entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return count;
+ }
+
+ /* Fill in identity legacy mapings where no override */
+ mp_config_acpi_legacy_irqs();
+
+ count =
+ acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
+ acpi_parse_nmi_src, MAX_IRQ_SOURCES);
+ if (count < 0)
+ {
+ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return count;
+ }
+
+ return 0;
}
static void __init acpi_process_madt(void)
{
- int error;
-
- if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
-
- /*
- * Parse MADT LAPIC entries
- */
- error = acpi_parse_madt_lapic_entries();
- if (!error) {
- acpi_lapic = true;
- generic_bigsmp_probe();
-
- /*
- * Parse MADT IO-APIC entries
- */
- error = acpi_parse_madt_ioapic_entries();
- if (!error) {
- acpi_ioapic = true;
-
- smp_found_config = true;
- clustered_apic_check();
- }
- }
- if (error == -EINVAL) {
- /*
- * Dell Precision Workstation 410, 610 come here.
- */
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
- disable_acpi();
- }
- }
+ int error;
+
+ if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt))
+ {
+
+ /*
+ * Parse MADT LAPIC entries
+ */
+ error = acpi_parse_madt_lapic_entries();
+ if (!error)
+ {
+ acpi_lapic = true;
+ generic_bigsmp_probe();
+
+ /*
+ * Parse MADT IO-APIC entries
+ */
+ error = acpi_parse_madt_ioapic_entries();
+ if (!error)
+ {
+ acpi_ioapic = true;
+
+ smp_found_config = true;
+ clustered_apic_check();
+ }
+ }
+ if (error == -EINVAL)
+ {
+ /*
+ * Dell Precision Workstation 410, 610 come here.
+ */
+ printk(KERN_ERR PREFIX
+ "Invalid BIOS MADT, disabling ACPI\n");
+ disable_acpi();
+ }
+ }
}
/*
@@ -689,57 +719,58 @@ static void __init acpi_process_madt(void)
int __init acpi_boot_table_init(void)
{
- int error;
-
- /*
- * If acpi_disabled, bail out
- * One exception: acpi=ht continues far enough to enumerate LAPICs
- */
- if (acpi_disabled && !acpi_ht)
- return 1;
-
- /*
- * Initialize the ACPI boot-time table parser.
- */
- error = acpi_table_init();
- if (error) {
- disable_acpi();
- return error;
- }
-
- return 0;
+ int error;
+
+ /*
+ * If acpi_disabled, bail out
+ * One exception: acpi=ht continues far enough to enumerate LAPICs
+ */
+ if (acpi_disabled && !acpi_ht)
+ return 1;
+
+ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+ error = acpi_table_init();
+ if (error)
+ {
+ disable_acpi();
+ return error;
+ }
+
+ return 0;
}
int __init acpi_boot_init(void)
{
- /*
- * If acpi_disabled, bail out
- * One exception: acpi=ht continues far enough to enumerate LAPICs
- */
- if (acpi_disabled && !acpi_ht)
- return 1;
+ /*
+ * If acpi_disabled, bail out
+ * One exception: acpi=ht continues far enough to enumerate LAPICs
+ */
+ if (acpi_disabled && !acpi_ht)
+ return 1;
- /*
- * set sci_int and PM timer address
- */
- acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
+ /*
+ * set sci_int and PM timer address
+ */
+ acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
- /*
- * Process the Multiple APIC Description Table (MADT), if present
- */
- acpi_process_madt();
+ /*
+ * Process the Multiple APIC Description Table (MADT), if present
+ */
+ acpi_process_madt();
- acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
+ acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
- acpi_mmcfg_init();
+ acpi_mmcfg_init();
- acpi_iommu_init();
+ acpi_iommu_init();
- erst_init();
+ erst_init();
- acpi_hest_init();
+ acpi_hest_init();
- acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt);
+ acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt);
- return 0;
+ return 0;
}
diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 5931928d6f..9f8434cb5c 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -1,6 +1,6 @@
/*
- * cpu_idle - xen idle state module derived from Linux
- * drivers/acpi/processor_idle.c &
+ * cpu_idle - xen idle state module derived from Linux
+ * drivers/acpi/processor_idle.c &
* arch/x86/kernel/acpi/cstate.c
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
@@ -143,7 +143,7 @@ static void do_get_hw_residencies(void *arg)
GET_PC8_RES(hw_res->pc8);
GET_PC9_RES(hw_res->pc9);
GET_PC10_RES(hw_res->pc10);
- /* fall through */
+ /* fall through */
/* Sandy bridge */
case 0x2A:
case 0x2D:
@@ -170,7 +170,7 @@ static void do_get_hw_residencies(void *arg)
case 0x9E:
GET_PC2_RES(hw_res->pc2);
GET_CC7_RES(hw_res->cc7);
- /* fall through */
+ /* fall through */
/* Nehalem */
case 0x1A:
case 0x1E:
@@ -264,7 +264,7 @@ static void print_hw_residencies(uint32_t cpu)
hw_res.cc6, hw_res.cc7);
}
-static char* acpi_cstate_method_name[] =
+static char *acpi_cstate_method_name[] =
{
"NONE",
"SYSIO",
@@ -272,11 +272,23 @@ static char* acpi_cstate_method_name[] =
"HALT"
};
-static uint64_t get_stime_tick(void) { return (uint64_t)NOW(); }
-static uint64_t stime_ticks_elapsed(uint64_t t1, uint64_t t2) { return t2 - t1; }
-static uint64_t stime_tick_to_ns(uint64_t ticks) { return ticks; }
+static uint64_t get_stime_tick(void)
+{
+ return (uint64_t)NOW();
+}
+static uint64_t stime_ticks_elapsed(uint64_t t1, uint64_t t2)
+{
+ return t2 - t1;
+}
+static uint64_t stime_tick_to_ns(uint64_t ticks)
+{
+ return ticks;
+}
-static uint64_t get_acpi_pm_tick(void) { return (uint64_t)inl(pmtmr_ioport); }
+static uint64_t get_acpi_pm_tick(void)
+{
+ return (uint64_t)inl(pmtmr_ioport);
+}
static uint64_t acpi_pm_ticks_elapsed(uint64_t t1, uint64_t t2)
{
if ( t2 >= t1 )
@@ -386,7 +398,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mask)
/* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
for_each_cpu(cpu, &target)
- mwait_wakeup(cpu) = 0;
+ mwait_wakeup(cpu) = 0;
cpumask_andnot(mask, mask, &target);
}
@@ -487,7 +499,8 @@ static int acpi_idle_bm_check(void)
return bm_status;
}
-static struct {
+static struct
+{
spinlock_t lock;
unsigned int count;
} c3_cpu_status = { .lock = SPIN_LOCK_UNLOCKED };
@@ -513,11 +526,11 @@ void trace_exit_reason(u32 *irq_traced)
}
/*
- * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During
+ * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During
* an Interrupt Service Routine"
- *
- * There was an errata with some Core i7 processors that an EOI transaction
- * may not be sent if software enters core C6 during an interrupt service
+ *
+ * There was an errata with some Core i7 processors that an EOI transaction
+ * may not be sent if software enters core C6 during an interrupt service
* routine. So we don't enter deep Cx state if there is an EOI pending.
*/
static bool errata_c6_eoi_workaround(void)
@@ -664,7 +677,7 @@ static void acpi_processor_idle(void)
case ACPI_STATE_C3:
/*
- * Before invoking C3, be aware that TSC/APIC timer may be
+ * Before invoking C3, be aware that TSC/APIC timer may be
* stopped by H/W. Without carefully handling of TSC/APIC stop issues,
* deep C state can't work correctly.
*/
@@ -788,11 +801,11 @@ void acpi_dead_idle(void)
{
/*
* 1. The CLFLUSH is a workaround for erratum AAI65 for
- * the Xeon 7400 series.
+ * the Xeon 7400 series.
* 2. The WBINVD is insufficient due to the spurious-wakeup
* case where we return around the loop.
- * 3. Unlike wbinvd, clflush is a light weight but not serializing
- * instruction, hence memory fence is necessary to make sure all
+ * 3. Unlike wbinvd, clflush is a light weight but not serializing
+ * instruction, hence memory fence is necessary to make sure all
* load/store visible before flush cache line.
*/
mb();
@@ -922,7 +935,8 @@ static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx)
*
* This routine is called only after all the CPUs are online
*/
-static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags)
+static void acpi_processor_power_init_bm_check(struct acpi_processor_flags
+ *flags)
{
struct cpuinfo_x86 *c = &current_cpu_data;
@@ -947,8 +961,8 @@ static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flag
* P4, Core and beyond CPUs
*/
if ( c->x86_vendor == X86_VENDOR_INTEL &&
- (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
- flags->bm_control = 0;
+ (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
+ flags->bm_control = 0;
}
#define VENDOR_INTEL (1)
@@ -967,7 +981,7 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx)
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
- if ( cx->reg.bit_width != VENDOR_INTEL ||
+ if ( cx->reg.bit_width != VENDOR_INTEL ||
cx->reg.bit_offset != NATIVE_CSTATE_BEYOND_HALT )
return -EINVAL;
@@ -1009,14 +1023,14 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx)
{
/* bus mastering control is necessary */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "C3 support requires BM control\n"));
+ "C3 support requires BM control\n"));
return -EINVAL;
}
else
{
/* Here we enter C3 without bus mastering */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "C3 support without BM control\n"));
+ "C3 support without BM control\n"));
}
}
/*
@@ -1037,8 +1051,8 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx)
if ( !(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD) )
{
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Cache invalidation should work properly"
- " for C3 to be enabled on SMP systems\n"));
+ "Cache invalidation should work properly"
+ " for C3 to be enabled on SMP systems\n"));
return -EINVAL;
}
}
@@ -1076,7 +1090,7 @@ static void set_cx(
default:
if ( acpi_power->count >= ACPI_PROCESSOR_MAX_POWER )
{
- case ACPI_STATE_C0:
+ case ACPI_STATE_C0:
printk(XENLOG_WARNING "CPU%u: C%d data ignored\n",
acpi_power->cpu, xen_cx->type);
return;
@@ -1154,14 +1168,14 @@ static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power)
"\t pwr_setup_done[%d], bm_rld_set[%d]\n",
power->flags.bm_control, power->flags.bm_check, power->flags.has_cst,
power->flags.power_setup_done, power->flags.bm_rld_set);
-
+
states = power->states;
-
+
for ( i = 0; i < power->count; i++ )
{
if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) )
return;
-
+
printk("\tstates[%d]:\n", i);
printk("\t\treg.space_id = %#x\n", state.reg.space_id);
printk("\t\treg.bit_width = %#x\n", state.reg.bit_width);
@@ -1174,7 +1188,7 @@ static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power)
csd = state.dp;
printk("\t\tdp(@0x%p)\n", csd.p);
-
+
if ( csd.p != NULL )
{
if ( unlikely(copy_from_guest(&dp, csd, 1)) )
@@ -1270,7 +1284,7 @@ long set_cx_pminfo(uint32_t acpi_id, struct xen_processor_power *power)
dead_idle = acpi_dead_idle;
}
-
+
return 0;
}
@@ -1434,7 +1448,8 @@ static int cpu_callback(
return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block cpu_nfb =
+{
.notifier_call = cpu_callback
};
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 7086d1aa15..fd4e0be74e 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -43,7 +43,8 @@
#include <acpi/acpi.h>
#include <acpi/cpufreq/cpufreq.h>
-enum {
+enum
+{
UNDEFINED_CAPABLE = 0,
SYSTEM_INTEL_MSR_CAPABLE,
SYSTEM_IO_CAPABLE,
@@ -74,7 +75,8 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
perf = data->acpi_data;
- for (i=0; i<perf->state_count; i++) {
+ for (i=0; i<perf->state_count; i++)
+ {
if (value == perf->states[i].status)
return data->freq_table[i].frequency;
}
@@ -89,7 +91,8 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
msr &= INTEL_MSR_RANGE;
perf = data->acpi_data;
- for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ {
if (msr == perf->states[data->freq_table[i].index].status)
return data->freq_table[i].frequency;
}
@@ -98,7 +101,8 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
{
- switch (data->arch_cpu_flags) {
+ switch (data->arch_cpu_flags)
+ {
case SYSTEM_INTEL_MSR_CAPABLE:
return extract_msr(val, data);
case SYSTEM_IO_CAPABLE:
@@ -108,21 +112,25 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
}
}
-struct msr_addr {
+struct msr_addr
+{
u32 reg;
};
-struct io_addr {
+struct io_addr
+{
u16 port;
u8 bit_width;
};
-typedef union {
+typedef union
+{
struct msr_addr msr;
struct io_addr io;
} drv_addr_union;
-struct drv_cmd {
+struct drv_cmd
+{
unsigned int type;
const cpumask_t *mask;
drv_addr_union addr;
@@ -135,13 +143,14 @@ static void do_drv_read(void *drvcmd)
cmd = (struct drv_cmd *)drvcmd;
- switch (cmd->type) {
+ switch (cmd->type)
+ {
case SYSTEM_INTEL_MSR_CAPABLE:
rdmsrl(cmd->addr.msr.reg, cmd->val);
break;
case SYSTEM_IO_CAPABLE:
acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
- &cmd->val, (u32)cmd->addr.io.bit_width);
+ &cmd->val, (u32)cmd->addr.io.bit_width);
break;
default:
break;
@@ -155,16 +164,17 @@ static void do_drv_write(void *drvcmd)
cmd = (struct drv_cmd *)drvcmd;
- switch (cmd->type) {
+ switch (cmd->type)
+ {
case SYSTEM_INTEL_MSR_CAPABLE:
rdmsrl(cmd->addr.msr.reg, msr_content);
msr_content = (msr_content & ~INTEL_MSR_RANGE)
- | (cmd->val & INTEL_MSR_RANGE);
+ | (cmd->val & INTEL_MSR_RANGE);
wrmsrl(cmd->addr.msr.reg, msr_content);
break;
case SYSTEM_IO_CAPABLE:
acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
- cmd->val, (u32)cmd->addr.io.bit_width);
+ cmd->val, (u32)cmd->addr.io.bit_width);
break;
default:
break;
@@ -209,9 +219,10 @@ static u32 get_cur_val(const cpumask_t *mask)
policy = per_cpu(cpufreq_cpu_policy, cpu);
if (!policy || !cpufreq_drv_data[policy->cpu])
- return 0;
+ return 0;
- switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) {
+ switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags)
+ {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
@@ -232,9 +243,12 @@ static u32 get_cur_val(const cpumask_t *mask)
return cmd.val;
}
-struct perf_pair {
- union {
- struct {
+struct perf_pair
+{
+ union
+ {
+ struct
+ {
uint32_t lo;
uint32_t hi;
} split;
@@ -267,7 +281,7 @@ static void read_measured_perf_ctrs(void *_readin)
*/
unsigned int get_measured_perf(unsigned int cpu, unsigned int flag)
{
- struct cpufreq_policy *policy;
+ struct cpufreq_policy *policy;
struct perf_pair readin, cur, *saved;
unsigned int perf_percent;
unsigned int retval;
@@ -295,11 +309,13 @@ unsigned int get_measured_perf(unsigned int cpu, unsigned int flag)
return 0;
}
- if (cpu == smp_processor_id()) {
+ if (cpu == smp_processor_id())
read_measured_perf_ctrs((void *)&readin);
- } else {
+
+ else
+ {
on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs,
- &readin, 1);
+ &readin, 1);
}
cur.aperf.whole = readin.aperf.whole - saved->aperf.whole;
@@ -307,7 +323,8 @@ unsigned int get_measured_perf(unsigned int cpu, unsigned int flag)
saved->aperf.whole = readin.aperf.whole;
saved->mperf.whole = readin.mperf.whole;
- if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
+ if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole))
+ {
int shift_count = 7;
cur.aperf.whole >>= shift_count;
cur.mperf.whole >>= shift_count;
@@ -338,7 +355,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
data = cpufreq_drv_data[policy->cpu];
if (unlikely(data == NULL ||
- data->acpi_data == NULL || data->freq_table == NULL))
+ data->acpi_data == NULL || data->freq_table == NULL))
return 0;
freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
@@ -357,7 +374,8 @@ static void feature_detect(void *info)
}
eax = cpuid_eax(6);
- if (eax & 0x2) {
+ if (eax & 0x2)
+ {
policy->turbo = CPUFREQ_TURBO_ENABLED;
if (cpufreq_verbose)
printk(XENLOG_INFO "CPU%u: Turbo Mode detected and enabled\n",
@@ -371,7 +389,8 @@ static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq,
unsigned int cur_freq;
unsigned int i;
- for (i=0; i<100; i++) {
+ for (i=0; i<100; i++)
+ {
cur_freq = extract_freq(get_cur_val(mask), data);
if (cur_freq == freq)
return 1;
@@ -394,9 +413,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
int result = 0;
if (unlikely(data == NULL ||
- data->acpi_data == NULL || data->freq_table == NULL)) {
+ data->acpi_data == NULL || data->freq_table == NULL))
return -ENODEV;
- }
if (policy->turbo == CPUFREQ_TURBO_DISABLED)
if (target_freq > policy->cpuinfo.second_max_freq)
@@ -413,14 +431,16 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
cpumask_and(&online_policy_cpus, &cpu_online_map, policy->cpus);
next_perf_state = data->freq_table[next_state].index;
- if (perf->state == next_perf_state) {
+ if (perf->state == next_perf_state)
+ {
if (unlikely(policy->resume))
policy->resume = 0;
else
return 0;
}
- switch (data->arch_cpu_flags) {
+ switch (data->arch_cpu_flags)
+ {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
@@ -446,13 +466,14 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
drv_write(&cmd);
- if (acpi_pstate_strict && !check_freqs(cmd.mask, freqs.new, data)) {
+ if (acpi_pstate_strict && !check_freqs(cmd.mask, freqs.new, data))
+ {
printk(KERN_WARNING "Fail transfer to new freq %d\n", freqs.new);
return -EAGAIN;
}
for_each_cpu(j, &online_policy_cpus)
- cpufreq_statistic_update(j, perf->state, next_perf_state);
+ cpufreq_statistic_update(j, perf->state, next_perf_state);
perf->state = next_perf_state;
policy->cur = freqs.new;
@@ -471,8 +492,8 @@ static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
perf = &processor_pminfo[policy->cpu]->perf;
- cpufreq_verify_within_limits(policy, 0,
- perf->states[perf->platform_limit].core_frequency * 1000);
+ cpufreq_verify_within_limits(policy, 0,
+ perf->states[perf->platform_limit].core_frequency * 1000);
return cpufreq_frequency_table_verify(policy, data->freq_table);
}
@@ -482,30 +503,35 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
struct processor_performance *perf = data->acpi_data;
- if (cpu_khz) {
+ if (cpu_khz)
+ {
/* search the closest match to cpu_khz */
unsigned int i;
unsigned long freq;
unsigned long freqn = perf->states[0].core_frequency * 1000;
- for (i=0; i<(perf->state_count-1); i++) {
+ for (i=0; i<(perf->state_count-1); i++)
+ {
freq = freqn;
freqn = perf->states[i+1].core_frequency * 1000;
- if ((2 * cpu_khz) > (freqn + freq)) {
+ if ((2 * cpu_khz) > (freqn + freq))
+ {
perf->state = i;
return freq;
}
}
perf->state = perf->state_count-1;
return freqn;
- } else {
+ }
+ else
+ {
/* assume CPU is at P0... */
perf->state = 0;
return perf->states[0].core_frequency * 1000;
}
}
-static int
+static int
acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
@@ -527,7 +553,8 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
perf = data->acpi_data;
policy->shared_type = perf->shared_type;
- switch (perf->control_register.space_id) {
+ switch (perf->control_register.space_id)
+ {
case ACPI_ADR_SPACE_SYSTEM_IO:
if (cpufreq_verbose)
printk("xen_pminfo: @acpi_cpufreq_cpu_init,"
@@ -538,7 +565,8 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (cpufreq_verbose)
printk("xen_pminfo: @acpi_cpufreq_cpu_init,"
"HARDWARE addr space\n");
- if (!check_est_cpu(cpu)) {
+ if (!check_est_cpu(cpu))
+ {
result = -ENODEV;
goto err_unreg;
}
@@ -549,16 +577,18 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
- data->freq_table = xmalloc_array(struct cpufreq_frequency_table,
- (perf->state_count+1));
- if (!data->freq_table) {
+ data->freq_table = xmalloc_array(struct cpufreq_frequency_table,
+ (perf->state_count+1));
+ if (!data->freq_table)
+ {
result = -ENOMEM;
goto err_unreg;
}
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
- for (i=0; i<perf->state_count; i++) {
+ for (i=0; i<perf->state_count; i++)
+ {
if ((perf->states[i].transition_latency * 1000) >
policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency =
@@ -568,7 +598,8 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR;
/* table init */
- for (i=0; i<perf->state_count; i++) {
+ for (i=0; i<perf->state_count; i++)
+ {
if (i>0 && perf->states[i].core_frequency >=
data->freq_table[valid_states-1].frequency / 1000)
continue;
@@ -585,7 +616,8 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (result)
goto err_freqfree;
- switch (perf->control_register.space_id) {
+ switch (perf->control_register.space_id)
+ {
case ACPI_ADR_SPACE_SYSTEM_IO:
/* Current speed is unknown and not detectable by IO port */
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
@@ -624,7 +656,8 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu];
- if (data) {
+ if (data)
+ {
cpufreq_drv_data[policy->cpu] = NULL;
xfree(data->freq_table);
xfree(data);
@@ -633,7 +666,8 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static const struct cpufreq_driver __initconstrel acpi_cpufreq_driver = {
+static const struct cpufreq_driver __initconstrel acpi_cpufreq_driver =
+{
.name = "acpi-cpufreq",
.verify = acpi_cpufreq_verify,
.target = acpi_cpufreq_target,
@@ -649,8 +683,8 @@ static int __init cpufreq_driver_init(void)
(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL))
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
else if ((cpufreq_controller == FREQCTL_xen) &&
- (boot_cpu_data.x86_vendor &
- (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+ (boot_cpu_data.x86_vendor &
+ (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
ret = powernow_register_driver();
return ret;
diff --git a/xen/arch/x86/acpi/cpufreq/powernow.c b/xen/arch/x86/acpi/cpufreq/powernow.c
index 72ab6a1eba..78d9ac583f 100644
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -61,22 +61,23 @@ static void update_cpb(void *data)
{
struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
- if (policy->turbo != CPUFREQ_TURBO_UNSUPPORTED) {
+ if (policy->turbo != CPUFREQ_TURBO_UNSUPPORTED)
+ {
uint64_t msr_content;
-
+
rdmsrl(MSR_K8_HWCR, msr_content);
if (policy->turbo == CPUFREQ_TURBO_ENABLED)
msr_content &= ~MSR_HWCR_CPBDIS_MASK;
else
- msr_content |= MSR_HWCR_CPBDIS_MASK;
+ msr_content |= MSR_HWCR_CPBDIS_MASK;
wrmsrl(MSR_K8_HWCR, msr_content);
}
}
static int powernow_cpufreq_update (int cpuid,
- struct cpufreq_policy *policy)
+ struct cpufreq_policy *policy)
{
if (!cpumask_test_cpu(cpuid, &cpu_online_map))
return -EINVAL;
@@ -87,7 +88,7 @@ static int powernow_cpufreq_update (int cpuid,
}
static int powernow_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int target_freq, unsigned int relation)
{
struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu];
struct processor_performance *perf;
@@ -96,9 +97,8 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
int result;
if (unlikely(data == NULL ||
- data->acpi_data == NULL || data->freq_table == NULL)) {
+ data->acpi_data == NULL || data->freq_table == NULL))
return -ENODEV;
- }
perf = data->acpi_data;
result = cpufreq_frequency_table_target(policy,
@@ -109,18 +109,22 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
return result;
next_perf_state = data->freq_table[next_state].index;
- if (perf->state == next_perf_state) {
- if (unlikely(data->arch_cpu_flags & ARCH_CPU_FLAG_RESUME))
+ if (perf->state == next_perf_state)
+ {
+ if (unlikely(data->arch_cpu_flags & ARCH_CPU_FLAG_RESUME))
data->arch_cpu_flags &= ~ARCH_CPU_FLAG_RESUME;
else
return 0;
}
if (policy->shared_type == CPUFREQ_SHARED_TYPE_HW &&
- likely(policy->cpu == smp_processor_id())) {
+ likely(policy->cpu == smp_processor_id()))
+ {
transition_pstate(&next_perf_state);
cpufreq_statistic_update(policy->cpu, perf->state, next_perf_state);
- } else {
+ }
+ else
+ {
cpumask_t online_policy_cpus;
unsigned int cpu;
@@ -134,7 +138,7 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
transition_pstate(&next_perf_state);
for_each_cpu(cpu, &online_policy_cpus)
- cpufreq_statistic_update(cpu, perf->state, next_perf_state);
+ cpufreq_statistic_update(cpu, perf->state, next_perf_state);
}
perf->state = next_perf_state;
@@ -168,7 +172,8 @@ static void amd_fixup_frequency(struct xen_processor_px *px)
px->core_frequency = (100 * (fid + 8)) >> did;
}
-struct amd_cpu_data {
+struct amd_cpu_data
+{
struct processor_performance *perf;
u32 max_hw_pstate;
};
@@ -199,8 +204,8 @@ static int powernow_cpufreq_verify(struct cpufreq_policy *policy)
perf = &processor_pminfo[policy->cpu]->perf;
- cpufreq_verify_within_limits(policy, 0,
- perf->states[perf->platform_limit].core_frequency * 1000);
+ cpufreq_verify_within_limits(policy, 0,
+ perf->states[perf->platform_limit].core_frequency * 1000);
return cpufreq_frequency_table_verify(policy, data->freq_table);
}
@@ -217,7 +222,8 @@ static void feature_detect(void *info)
}
edx = cpuid_edx(CPUID_FREQ_VOLT_CAPABILITIES);
- if ((edx & CPB_CAPABLE) == CPB_CAPABLE) {
+ if ((edx & CPB_CAPABLE) == CPB_CAPABLE)
+ {
policy->turbo = CPUFREQ_TURBO_ENABLED;
if (cpufreq_verbose)
printk(XENLOG_INFO
@@ -249,40 +255,46 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->shared_type = perf->shared_type;
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
- policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+ policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ {
cpumask_set_cpu(cpu, policy->cpus);
- if (cpumask_weight(policy->cpus) != 1) {
+ if (cpumask_weight(policy->cpus) != 1)
+ {
printk(XENLOG_WARNING "Unsupported sharing type %d (%u CPUs)\n",
policy->shared_type, cpumask_weight(policy->cpus));
result = -ENODEV;
goto err_unreg;
}
- } else {
- cpumask_copy(policy->cpus, cpumask_of(cpu));
}
+ else
+ cpumask_copy(policy->cpus, cpumask_of(cpu));
/* capability check */
- if (perf->state_count <= 1) {
+ if (perf->state_count <= 1)
+ {
printk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
- if (perf->control_register.space_id != perf->status_register.space_id) {
+ if (perf->control_register.space_id != perf->status_register.space_id)
+ {
result = -ENODEV;
goto err_unreg;
}
- data->freq_table = xmalloc_array(struct cpufreq_frequency_table,
- (perf->state_count+1));
- if (!data->freq_table) {
+ data->freq_table = xmalloc_array(struct cpufreq_frequency_table,
+ (perf->state_count+1));
+ if (!data->freq_table)
+ {
result = -ENOMEM;
goto err_unreg;
}
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
- for (i=0; i<perf->state_count; i++) {
+ for (i=0; i<perf->state_count; i++)
+ {
if ((perf->states[i].transition_latency * 1000) >
policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency =
@@ -294,7 +306,8 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy)
on_selected_cpus(cpumask_of(cpu), get_cpu_data, &info, 1);
/* table init */
- for (i = 0; i < perf->state_count && i <= info.max_hw_pstate; i++) {
+ for (i = 0; i < perf->state_count && i <= info.max_hw_pstate; i++)
+ {
if (i > 0 && perf->states[i].core_frequency >=
data->freq_table[valid_states-1].frequency / 1000)
continue;
@@ -313,7 +326,7 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (c->cpuid_level >= 6)
on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
-
+
/*
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
@@ -336,7 +349,8 @@ static int powernow_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu];
- if (data) {
+ if (data)
+ {
cpufreq_drv_data[policy->cpu] = NULL;
xfree(data->freq_table);
xfree(data);
@@ -345,7 +359,8 @@ static int powernow_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static const struct cpufreq_driver __initconstrel powernow_cpufreq_driver = {
+static const struct cpufreq_driver __initconstrel powernow_cpufreq_driver =
+{
.name = "powernow",
.verify = powernow_cpufreq_verify,
.target = powernow_cpufreq_target,
@@ -358,7 +373,8 @@ unsigned int __init powernow_register_driver()
{
unsigned int i, ret = 0;
- for_each_online_cpu(i) {
+ for_each_online_cpu(i)
+ {
struct cpuinfo_x86 *c = &cpu_data[i];
if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
ret = -ENODEV;
diff --git a/xen/arch/x86/acpi/cpuidle_menu.c b/xen/arch/x86/acpi/cpuidle_menu.c
index 6ff5fb8ff2..16d16a33f8 100644
--- a/xen/arch/x86/acpi/cpuidle_menu.c
+++ b/xen/arch/x86/acpi/cpuidle_menu.c
@@ -1,6 +1,6 @@
/*
* cpuidle_menu - menu governor for cpu idle, main idea come from Linux.
- * drivers/cpuidle/governors/menu.c
+ * drivers/cpuidle/governors/menu.c
*
* Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
* Copyright (C) 2007, 2008 Intel Corporation
@@ -92,7 +92,8 @@
* measured idle time.
*/
-struct perf_factor{
+struct perf_factor
+{
s_time_t time_stamp;
s_time_t duration;
unsigned int irq_count_stamp;
@@ -116,19 +117,19 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static inline int which_bucket(unsigned int duration)
{
- int bucket = 0;
-
- if (duration < 10)
- return bucket;
- if (duration < 100)
- return bucket + 1;
- if (duration < 1000)
- return bucket + 2;
- if (duration < 10000)
- return bucket + 3;
- if (duration < 100000)
- return bucket + 4;
- return bucket + 5;
+ int bucket = 0;
+
+ if (duration < 10)
+ return bucket;
+ if (duration < 100)
+ return bucket + 1;
+ if (duration < 1000)
+ return bucket + 2;
+ if (duration < 10000)
+ return bucket + 3;
+ if (duration < 100000)
+ return bucket + 4;
+ return bucket + 5;
}
/*
@@ -153,10 +154,10 @@ static inline s_time_t avg_intr_interval_us(void)
now = NOW();
duration = (data->pf.duration + (now - data->pf.time_stamp)
- * (DECAY - 1)) / DECAY;
+ * (DECAY - 1)) / DECAY;
irq_sum = (data->pf.irq_sum + (this_cpu(irq_count) - data->pf.irq_count_stamp)
- * (DECAY - 1)) / DECAY;
+ * (DECAY - 1)) / DECAY;
if (irq_sum == 0)
/* no irq recently, so return a big enough interval: 1 sec */
@@ -164,7 +165,8 @@ static inline s_time_t avg_intr_interval_us(void)
else
avg_interval = duration / irq_sum / 1000; /* in us */
- if ( duration >= SAMPLING_PERIOD){
+ if ( duration >= SAMPLING_PERIOD)
+ {
data->pf.time_stamp = now;
data->pf.duration = duration;
data->pf.irq_count_stamp= this_cpu(irq_count);
@@ -203,8 +205,8 @@ static int menu_select(struct acpi_processor_power *power)
io_interval = avg_intr_interval_us();
data->latency_factor = DIV_ROUND(
- data->latency_factor * (DECAY - 1) + data->measured_us,
- DECAY);
+ data->latency_factor * (DECAY - 1) + data->measured_us,
+ DECAY);
/*
* if the correction factor is 0 (eg first time init or cpu hotplug
@@ -215,8 +217,8 @@ static int menu_select(struct acpi_processor_power *power)
/* Make sure to round up for half microseconds */
data->predicted_us = DIV_ROUND(
- data->expected_us * data->correction_factor[data->bucket],
- RESOLUTION * DECAY);
+ data->expected_us * data->correction_factor[data->bucket],
+ RESOLUTION * DECAY);
/* find the deepest idle state that satisfies our constraints */
for ( i = CPUIDLE_DRIVER_STATE_START + 1; i < power->count; i++ )
@@ -254,7 +256,7 @@ static void menu_reflect(struct acpi_processor_power *power)
/* update our correction ratio */
new_factor = data->correction_factor[data->bucket]
- * (DECAY - 1) / DECAY;
+ * (DECAY - 1) / DECAY;
if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
new_factor += RESOLUTION * data->measured_us / data->expected_us;
diff --git a/xen/arch/x86/acpi/lib.c b/xen/arch/x86/acpi/lib.c
index 265b9ad819..685f3ed1ee 100644
--- a/xen/arch/x86/acpi/lib.c
+++ b/xen/arch/x86/acpi/lib.c
@@ -30,7 +30,7 @@ u8 __read_mostly acpi_enable_value;
u8 __read_mostly acpi_disable_value;
u32 __read_mostly x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
- {[0 ... MAX_MADT_ENTRIES - 1] = BAD_APICID };
+{[0 ... MAX_MADT_ENTRIES - 1] = BAD_APICID };
/*
* Important Safety Note: The fixed ACPI page numbers are *subtracted*
@@ -39,87 +39,88 @@ u32 __read_mostly x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
*/
char *__acpi_map_table(paddr_t phys, unsigned long size)
{
- unsigned long base, offset, mapped_size;
- int idx;
-
- /* XEN: RAM holes above 1MB are not permanently mapped. */
- if ((phys + size) <= (1 * 1024 * 1024))
- return __va(phys);
-
- offset = phys & (PAGE_SIZE - 1);
- mapped_size = PAGE_SIZE - offset;
- set_fixmap(FIX_ACPI_END, phys);
- base = __fix_to_virt(FIX_ACPI_END);
-
- /*
- * Most cases can be covered by the below.
- */
- idx = FIX_ACPI_END;
- while (mapped_size < size) {
- if (--idx < FIX_ACPI_BEGIN)
- return NULL; /* cannot handle this */
- phys += PAGE_SIZE;
- set_fixmap(idx, phys);
- mapped_size += PAGE_SIZE;
- }
-
- return ((char *) base + offset);
+ unsigned long base, offset, mapped_size;
+ int idx;
+
+ /* XEN: RAM holes above 1MB are not permanently mapped. */
+ if ((phys + size) <= (1 * 1024 * 1024))
+ return __va(phys);
+
+ offset = phys & (PAGE_SIZE - 1);
+ mapped_size = PAGE_SIZE - offset;
+ set_fixmap(FIX_ACPI_END, phys);
+ base = __fix_to_virt(FIX_ACPI_END);
+
+ /*
+ * Most cases can be covered by the below.
+ */
+ idx = FIX_ACPI_END;
+ while (mapped_size < size)
+ {
+ if (--idx < FIX_ACPI_BEGIN)
+ return NULL; /* cannot handle this */
+ phys += PAGE_SIZE;
+ set_fixmap(idx, phys);
+ mapped_size += PAGE_SIZE;
+ }
+
+ return ((char *) base + offset);
}
unsigned int acpi_get_processor_id(unsigned int cpu)
{
- unsigned int acpiid, apicid;
+ unsigned int acpiid, apicid;
- if ((apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID)
- return INVALID_ACPIID;
+ if ((apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID)
+ return INVALID_ACPIID;
- for (acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++)
- if (x86_acpiid_to_apicid[acpiid] == apicid)
- return acpiid;
+ for (acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++)
+ if (x86_acpiid_to_apicid[acpiid] == apicid)
+ return acpiid;
- return INVALID_ACPIID;
+ return INVALID_ACPIID;
}
static void get_mwait_ecx(void *info)
{
- *(u32 *)info = cpuid_ecx(CPUID_MWAIT_LEAF);
+ *(u32 *)info = cpuid_ecx(CPUID_MWAIT_LEAF);
}
int arch_acpi_set_pdc_bits(u32 acpi_id, u32 *pdc, u32 mask)
{
- unsigned int cpu = get_cpu_id(acpi_id);
- struct cpuinfo_x86 *c;
- u32 ecx;
-
- if (!(acpi_id + 1))
- c = &boot_cpu_data;
- else if (cpu >= nr_cpu_ids || !cpu_online(cpu))
- return -EINVAL;
- else
- c = cpu_data + cpu;
-
- pdc[2] |= ACPI_PDC_C_CAPABILITY_SMP & mask;
-
- if (cpu_has(c, X86_FEATURE_EIST))
- pdc[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP & mask;
-
- if (cpu_has(c, X86_FEATURE_ACPI))
- pdc[2] |= ACPI_PDC_T_FFH & mask;
-
- /*
- * If mwait/monitor or its break-on-interrupt extension are
- * unsupported, Cx_FFH will be disabled.
- */
- if (!cpu_has(c, X86_FEATURE_MONITOR) ||
- c->cpuid_level < CPUID_MWAIT_LEAF)
- ecx = 0;
- else if (c == &boot_cpu_data || cpu == smp_processor_id())
- ecx = cpuid_ecx(CPUID_MWAIT_LEAF);
- else
- on_selected_cpus(cpumask_of(cpu), get_mwait_ecx, &ecx, 1);
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
- pdc[2] &= ~(ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH);
-
- return 0;
+ unsigned int cpu = get_cpu_id(acpi_id);
+ struct cpuinfo_x86 *c;
+ u32 ecx;
+
+ if (!(acpi_id + 1))
+ c = &boot_cpu_data;
+ else if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+ return -EINVAL;
+ else
+ c = cpu_data + cpu;
+
+ pdc[2] |= ACPI_PDC_C_CAPABILITY_SMP & mask;
+
+ if (cpu_has(c, X86_FEATURE_EIST))
+ pdc[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP & mask;
+
+ if (cpu_has(c, X86_FEATURE_ACPI))
+ pdc[2] |= ACPI_PDC_T_FFH & mask;
+
+ /*
+ * If mwait/monitor or its break-on-interrupt extension are
+ * unsupported, Cx_FFH will be disabled.
+ */
+ if (!cpu_has(c, X86_FEATURE_MONITOR) ||
+ c->cpuid_level < CPUID_MWAIT_LEAF)
+ ecx = 0;
+ else if (c == &boot_cpu_data || cpu == smp_processor_id())
+ ecx = cpuid_ecx(CPUID_MWAIT_LEAF);
+ else
+ on_selected_cpus(cpumask_of(cpu), get_mwait_ecx, &ecx, 1);
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ pdc[2] &= ~(ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH);
+
+ return 0;
}
diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c
index aecc754fdb..235a6e4a98 100644
--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -85,22 +85,22 @@ static void device_power_up(enum dev_power_saved saved)
case SAVED_ALL:
case SAVED_LAPIC:
lapic_resume();
- /* fall through */
+ /* fall through */
case SAVED_IOMMU:
iommu_resume();
- /* fall through */
+ /* fall through */
case SAVED_IOAPIC:
ioapic_resume();
- /* fall through */
+ /* fall through */
case SAVED_I8259A:
i8259A_resume();
- /* fall through */
+ /* fall through */
case SAVED_TIME:
time_resume();
- /* fall through */
+ /* fall through */
case SAVED_CONSOLE:
console_resume();
- /* fall through */
+ /* fall through */
case SAVED_NONE:
break;
default:
@@ -120,7 +120,7 @@ static void freeze_domains(void)
* the domain list). Otherwise we could miss concurrently-created domains.
*/
for_each_domain ( d )
- domain_pause(d);
+ domain_pause(d);
rcu_read_unlock(&domlist_read_lock);
}
@@ -145,7 +145,7 @@ static void acpi_sleep_prepare(u32 state)
return;
wakeup_vector_va = __acpi_map_table(
- acpi_sinfo.wakeup_vector, sizeof(uint64_t));
+ acpi_sinfo.wakeup_vector, sizeof(uint64_t));
/* TBoot will set resume vector itself (when it is safe to do so). */
if ( tboot_in_measured_env() )
@@ -262,7 +262,7 @@ static int enter_state(u32 state)
ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr);
spec_ctrl_exit_idle(ci);
- done:
+done:
spin_debug_enable();
local_irq_restore(flags);
acpi_sleep_post(state);
@@ -270,7 +270,7 @@ static int enter_state(u32 state)
BUG();
cpufreq_add_cpu(0);
- enable_cpu:
+enable_cpu:
rcu_barrier();
mtrr_aps_sync_begin();
enable_nonboot_cpus();
@@ -388,21 +388,21 @@ static void tboot_sleep(u8 sleep_state)
g_tboot_shared->acpi_sinfo.wakeup_vector = acpi_sinfo.wakeup_vector;
g_tboot_shared->acpi_sinfo.vector_width = acpi_sinfo.vector_width;
g_tboot_shared->acpi_sinfo.kernel_s3_resume_vector =
- bootsym_phys(wakeup_start);
+ bootsym_phys(wakeup_start);
switch ( sleep_state )
{
- case ACPI_STATE_S3:
- shutdown_type = TB_SHUTDOWN_S3;
- break;
- case ACPI_STATE_S4:
- shutdown_type = TB_SHUTDOWN_S4;
- break;
- case ACPI_STATE_S5:
- shutdown_type = TB_SHUTDOWN_S5;
- break;
- default:
- return;
+ case ACPI_STATE_S3:
+ shutdown_type = TB_SHUTDOWN_S3;
+ break;
+ case ACPI_STATE_S4:
+ shutdown_type = TB_SHUTDOWN_S4;
+ break;
+ case ACPI_STATE_S5:
+ shutdown_type = TB_SHUTDOWN_S5;
+ break;
+ default:
+ return;
}
tboot_shutdown(shutdown_type);
diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c
index ce2b4302e6..0475b254a1 100644
--- a/xen/arch/x86/alternative.c
+++ b/xen/arch/x86/alternative.c
@@ -32,7 +32,8 @@
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
#ifdef K8_NOP1
-static const unsigned char k8nops[] init_or_livepatch_const = {
+static const unsigned char k8nops[] init_or_livepatch_const =
+{
K8_NOP1,
K8_NOP2,
K8_NOP3,
@@ -43,7 +44,9 @@ static const unsigned char k8nops[] init_or_livepatch_const = {
K8_NOP8,
K8_NOP9,
};
-static const unsigned char * const k8_nops[ASM_NOP_MAX+1] init_or_livepatch_constrel = {
+static const unsigned char *const k8_nops[ASM_NOP_MAX+1]
+init_or_livepatch_constrel =
+{
NULL,
k8nops,
k8nops + 1,
@@ -58,7 +61,8 @@ static const unsigned char * const k8_nops[ASM_NOP_MAX+1] init_or_livepatch_cons
#endif
#ifdef P6_NOP1
-static const unsigned char p6nops[] init_or_livepatch_const = {
+static const unsigned char p6nops[] init_or_livepatch_const =
+{
P6_NOP1,
P6_NOP2,
P6_NOP3,
@@ -69,7 +73,9 @@ static const unsigned char p6nops[] init_or_livepatch_const = {
P6_NOP8,
P6_NOP9,
};
-static const unsigned char * const p6_nops[ASM_NOP_MAX+1] init_or_livepatch_constrel = {
+static const unsigned char *const p6_nops[ASM_NOP_MAX+1]
+init_or_livepatch_constrel =
+{
NULL,
p6nops,
p6nops + 1,
@@ -83,7 +89,7 @@ static const unsigned char * const p6_nops[ASM_NOP_MAX+1] init_or_livepatch_cons
};
#endif
-static const unsigned char * const *ideal_nops init_or_livepatch_data = p6_nops;
+static const unsigned char *const *ideal_nops init_or_livepatch_data = p6_nops;
#ifdef HAVE_AS_NOPS_DIRECTIVE
diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c
index 9c3c998d34..7e4242c5aa 100644
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -44,7 +44,8 @@ static bool __read_mostly tdt_enabled;
static bool __initdata tdt_enable = true;
boolean_param("tdt", tdt_enable);
-static struct {
+static struct
+{
int active;
/* r/w apic fields */
unsigned int apic_id;
@@ -171,7 +172,8 @@ void clear_local_APIC(void)
* Masking an LVT entry on a P6 can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this.
*/
- if (maxlvt >= 3) {
+ if (maxlvt >= 3)
+ {
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
}
@@ -185,20 +187,23 @@ void clear_local_APIC(void)
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT1);
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
- if (maxlvt >= 4) {
+ if (maxlvt >= 4)
+ {
v = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
}
-/* lets not touch this if we didn't frob it */
+ /* lets not touch this if we didn't frob it */
#ifdef CONFIG_X86_MCE_THERMAL
- if (maxlvt >= 5) {
+ if (maxlvt >= 5)
+ {
v = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
}
#endif
- if (maxlvt >= 6) {
+ if (maxlvt >= 6)
+ {
v = apic_read(APIC_CMCI);
apic_write(APIC_CMCI, v | APIC_LVT_MASKED);
}
@@ -227,7 +232,8 @@ void clear_local_APIC(void)
void __init connect_bsp_APIC(void)
{
- if (pic_mode) {
+ if (pic_mode)
+ {
/*
* Do not trust the local APIC being empty at bootup.
*/
@@ -246,7 +252,8 @@ void __init connect_bsp_APIC(void)
void disconnect_bsp_APIC(int virt_wire_setup)
{
- if (pic_mode) {
+ if (pic_mode)
+ {
/*
* Put the board back into PIC mode (has an effect
* only on certain older boards). Note that APIC
@@ -258,7 +265,8 @@ void disconnect_bsp_APIC(int virt_wire_setup)
outb(0x70, 0x22);
outb(0x00, 0x23);
}
- else {
+ else
+ {
/* Go back to Virtual Wire compatibility mode */
unsigned long value;
@@ -269,7 +277,8 @@ void disconnect_bsp_APIC(int virt_wire_setup)
value |= 0xf;
apic_write(APIC_SPIV, value);
- if (!virt_wire_setup) {
+ if (!virt_wire_setup)
+ {
/* For LVT0 make it edge triggered, active high, external and enabled */
value = apic_read(APIC_LVT0);
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
@@ -279,7 +288,8 @@ void disconnect_bsp_APIC(int virt_wire_setup)
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
apic_write(APIC_LVT0, value);
}
- else {
+ else
+ {
/* Disable LVT0 */
apic_write(APIC_LVT0, APIC_LVT_MASKED);
}
@@ -287,9 +297,9 @@ void disconnect_bsp_APIC(int virt_wire_setup)
/* For LVT1 make it edge triggered, active high, nmi and enabled */
value = apic_read(APIC_LVT1);
value &= ~(
- APIC_MODE_MASK | APIC_SEND_PENDING |
- APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
- APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
+ APIC_MODE_MASK | APIC_SEND_PENDING |
+ APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
+ APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
apic_write(APIC_LVT1, value);
@@ -306,7 +316,8 @@ void disable_local_APIC(void)
*/
apic_write(APIC_SPIV, apic_read(APIC_SPIV) & ~APIC_SPIV_APIC_ENABLED);
- if (enabled_via_apicbase) {
+ if (enabled_via_apicbase)
+ {
uint64_t msr_content;
rdmsrl(MSR_APIC_BASE, msr_content);
wrmsrl(MSR_APIC_BASE, msr_content &
@@ -452,14 +463,14 @@ void __init init_bsp_APIC(void)
* Do not trust the local APIC being empty at bootup.
*/
clear_local_APIC();
-
+
/*
* Enable APIC.
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_VECTOR_MASK;
value |= APIC_SPIV_APIC_ENABLED;
-
+
/* This bit is reserved on P4/Xeon and should be cleared */
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15))
value &= ~APIC_SPIV_FOCUS_DISABLED;
@@ -532,7 +543,8 @@ void setup_local_APIC(void)
int i, j;
/* Pound the ESR really hard over the head with a big hammer - mbligh */
- if (esr_disable) {
+ if (esr_disable)
+ {
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
@@ -570,9 +582,11 @@ void setup_local_APIC(void)
* the interrupt. Hence a vector might get locked. It was noticed
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
*/
- for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+ for (i = APIC_ISR_NR - 1; i >= 0; i--)
+ {
value = apic_read(APIC_ISR + i*0x10);
- for (j = 31; j >= 0; j--) {
+ for (j = 31; j >= 0; j--)
+ {
if (value & (1u << j))
ack_APIC_irq();
}
@@ -642,11 +656,14 @@ void setup_local_APIC(void)
* TODO: set up through-local-APIC from through-I/O-APIC? --macro
*/
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
- if (!smp_processor_id() && (pic_mode || !value)) {
+ if (!smp_processor_id() && (pic_mode || !value))
+ {
value = APIC_DM_EXTINT;
apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
smp_processor_id());
- } else {
+ }
+ else
+ {
value = APIC_DM_EXTINT | APIC_LVT_MASKED;
apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
smp_processor_id());
@@ -662,7 +679,8 @@ void setup_local_APIC(void)
value = APIC_DM_NMI | APIC_LVT_MASKED;
apic_write(APIC_LVT1, value);
- if (!esr_disable) {
+ if (!esr_disable)
+ {
maxlvt = get_maxlvt();
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
@@ -680,7 +698,9 @@ void setup_local_APIC(void)
apic_printk(APIC_VERBOSE, "ESR value before enabling "
"vector: %#lx after: %#lx\n",
oldvalue, value);
- } else {
+ }
+ else
+ {
/*
* Something untraceble is creating bad interrupts on
* secondary quads ... for the moment, just leave the
@@ -711,9 +731,8 @@ int lapic_suspend(void)
if (maxlvt >= 4)
apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
- if (maxlvt >= 6) {
+ if (maxlvt >= 6)
apic_pm_state.apic_lvtcmci = apic_read(APIC_CMCI);
- }
apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
@@ -769,9 +788,8 @@ int lapic_resume(void)
if (maxlvt >= 5)
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
- if (maxlvt >= 6) {
+ if (maxlvt >= 6)
apic_write(APIC_CMCI, apic_pm_state.apic_lvtcmci);
- }
if (maxlvt >= 4)
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
@@ -829,12 +847,14 @@ static int __init detect_init_APIC (void)
return -1;
}
- if (!cpu_has_apic) {
+ if (!cpu_has_apic)
+ {
/*
* Over-ride BIOS and try to enable the local
* APIC only if "lapic" specified.
*/
- if (enable_local_apic <= 0) {
+ if (enable_local_apic <= 0)
+ {
printk("Local APIC disabled by BIOS -- "
"you can enable it with \"lapic\"\n");
return -1;
@@ -858,7 +878,8 @@ static int __init detect_init_APIC (void)
* The APIC feature bit should now be enabled
* in `cpuid'
*/
- if (!(cpuid_edx(1) & cpufeat_mask(X86_FEATURE_APIC))) {
+ if (!(cpuid_edx(1) & cpufeat_mask(X86_FEATURE_APIC)))
+ {
printk("Could not enable APIC!\n");
return -1;
}
@@ -900,7 +921,7 @@ void __init x2apic_bsp_setup(void)
{
printk("Not enabling x2APIC: disabled by cmdline.\n");
return;
- }
+ }
printk("x2APIC: Already enabled by BIOS: Ignoring cmdline disable.\n");
}
@@ -939,7 +960,7 @@ void __init x2apic_bsp_setup(void)
printk("Not enabling x2APIC (upon firmware request)\n");
goto restore_out;
}
- /* fall through */
+ /* fall through */
default:
if ( x2apic_enabled )
panic("Interrupt remapping could not be enabled while "
@@ -983,10 +1004,12 @@ void __init init_apic_mappings(void)
* zeroes page to simulate the local APIC and another
* one for the IO-APIC.
*/
- if (!smp_found_config && detect_init_APIC()) {
+ if (!smp_found_config && detect_init_APIC())
+ {
apic_phys = __pa(alloc_xenheap_page());
clear_page(__va(apic_phys));
- } else
+ }
+ else
apic_phys = mp_lapic_addr;
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
@@ -1007,15 +1030,15 @@ __next:
/*****************************************************************************
* APIC calibration
- *
+ *
* The APIC is programmed in bus cycles.
* Timeout values should specified in real time units.
* The "cheapest" time source is the cyclecounter.
- *
+ *
* Thus, we need a mappings from: bus cycles <- cycle counter <- system time
- *
+ *
* The calibration is currently a bit shoddy since it requires the external
- * timer chip to generate periodic timer interupts.
+ * timer chip to generate periodic timer interupts.
*****************************************************************************/
/* used for system time scaling */
@@ -1048,16 +1071,17 @@ static unsigned int __init get_8254_timer_count(void)
static void __init wait_8254_wraparound(void)
{
unsigned int curr_count, prev_count;
-
+
curr_count = get_8254_timer_count();
- do {
+ do
+ {
prev_count = curr_count;
curr_count = get_8254_timer_count();
/* workaround for broken Mercury/Neptune */
if (prev_count >= curr_count + 0x100)
curr_count = get_8254_timer_count();
-
+
} while (prev_count >= curr_count);
}
@@ -1119,8 +1143,10 @@ static unsigned int __init hsx_deadline_rev(void)
{
switch ( boot_cpu_data.x86_mask )
{
- case 0x02: return 0x3a; /* EP */
- case 0x04: return 0x0f; /* EX */
+ case 0x02:
+ return 0x3a; /* EP */
+ case 0x04:
+ return 0x0f; /* EX */
}
return ~0U;
@@ -1130,10 +1156,14 @@ static unsigned int __init bdx_deadline_rev(void)
{
switch ( boot_cpu_data.x86_mask )
{
- case 0x02: return 0x00000011;
- case 0x03: return 0x0700000e;
- case 0x04: return 0x0f00000c;
- case 0x05: return 0x0e000003;
+ case 0x02:
+ return 0x00000011;
+ case 0x03:
+ return 0x0700000e;
+ case 0x04:
+ return 0x0f00000c;
+ case 0x05:
+ return 0x0e000003;
}
return ~0U;
@@ -1143,15 +1173,19 @@ static unsigned int __init skx_deadline_rev(void)
{
switch ( boot_cpu_data.x86_mask )
{
- case 0x00 ... 0x02: return ~0U;
- case 0x03: return 0x01000136;
- case 0x04: return 0x02000014;
+ case 0x00 ... 0x02:
+ return ~0U;
+ case 0x03:
+ return 0x01000136;
+ case 0x04:
+ return 0x02000014;
}
return 0;
}
-static const struct x86_cpu_id __initconstrel deadline_match[] = {
+static const struct x86_cpu_id __initconstrel deadline_match[] =
+{
DEADLINE_MODEL_MATCH(0x3c, 0x22), /* Haswell */
DEADLINE_MODEL_MATCH(0x3f, hsx_deadline_rev), /* Haswell EP/EX */
DEADLINE_MODEL_MATCH(0x45, 0x20), /* Haswell D */
@@ -1209,7 +1243,8 @@ static void wait_tick_pvh(void)
start = NOW();
/* Won't wrap around */
- do {
+ do
+ {
cpu_relax();
curr_time = NOW();
} while ( curr_time - start < lapse_ns );
@@ -1323,7 +1358,7 @@ void __init setup_boot_APIC_clock(void)
}
setup_APIC_timer();
-
+
local_irq_restore(flags);
}
@@ -1334,7 +1369,8 @@ void setup_secondary_APIC_clock(void)
void disable_APIC_timer(void)
{
- if (using_apic_timer) {
+ if (using_apic_timer)
+ {
unsigned long v;
/* Work around AMD Erratum 411. This is a nice thing to do anyway. */
@@ -1347,9 +1383,10 @@ void disable_APIC_timer(void)
void enable_APIC_timer(void)
{
- if (using_apic_timer) {
+ if (using_apic_timer)
+ {
unsigned long v;
-
+
v = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED);
}
@@ -1385,7 +1422,7 @@ int reprogram_timer(s_time_t timeout)
return apic_tmict || !timeout;
}
-void apic_timer_interrupt(struct cpu_user_regs * regs)
+void apic_timer_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
perfc_incr(apic_timer);
@@ -1411,9 +1448,11 @@ void spurious_interrupt(struct cpu_user_regs *regs)
* a request to dump local CPU state). Vectored interrupts are ACKed;
* spurious interrupts are not.
*/
- if (apic_isr_read(SPURIOUS_APIC_VECTOR)) {
+ if (apic_isr_read(SPURIOUS_APIC_VECTOR))
+ {
ack_APIC_irq();
- if (this_cpu(state_dump_pending)) {
+ if (this_cpu(state_dump_pending))
+ {
this_cpu(state_dump_pending) = false;
dump_execstate(regs);
return;
@@ -1431,7 +1470,8 @@ void spurious_interrupt(struct cpu_user_regs *regs)
void error_interrupt(struct cpu_user_regs *regs)
{
- static const char *const esr_fields[] = {
+ static const char *const esr_fields[] =
+ {
"Send CS error",
"Receive CS error",
"Send accept error",
@@ -1451,7 +1491,7 @@ void error_interrupt(struct cpu_user_regs *regs)
ack_APIC_irq();
printk(XENLOG_DEBUG "APIC error on CPU%u: %02x(%02x)",
- smp_processor_id(), v , v1);
+ smp_processor_id(), v, v1);
for ( i = 7; i >= 0; --i )
if ( v1 & (1 << i) )
printk(", %s", esr_fields[i]);
@@ -1477,7 +1517,8 @@ int __init APIC_init_uniprocessor (void)
if (enable_local_apic < 0)
setup_clear_cpu_cap(X86_FEATURE_APIC);
- if (!smp_found_config && !cpu_has_apic) {
+ if (!smp_found_config && !cpu_has_apic)
+ {
skip_ioapic_setup = true;
return -1;
}
@@ -1485,7 +1526,8 @@ int __init APIC_init_uniprocessor (void)
/*
* Complain if the BIOS pretends there is one.
*/
- if (!cpu_has_apic) {
+ if (!cpu_has_apic)
+ {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_physical_apicid);
skip_ioapic_setup = true;
@@ -1521,20 +1563,20 @@ int __init APIC_init_uniprocessor (void)
return 0;
}
-static const char * __init apic_mode_to_str(const enum apic_mode mode)
+static const char *__init apic_mode_to_str(const enum apic_mode mode)
{
switch ( mode )
{
- case APIC_MODE_INVALID:
- return "invalid";
- case APIC_MODE_DISABLED:
- return "disabled";
- case APIC_MODE_XAPIC:
- return "xapic";
- case APIC_MODE_X2APIC:
- return "x2apic";
- default:
- return "unrecognised";
+ case APIC_MODE_INVALID:
+ return "invalid";
+ case APIC_MODE_DISABLED:
+ return "disabled";
+ case APIC_MODE_XAPIC:
+ return "xapic";
+ case APIC_MODE_X2APIC:
+ return "x2apic";
+ default:
+ return "unrecognised";
}
}
diff --git a/xen/arch/x86/boot/cmdline.c b/xen/arch/x86/boot/cmdline.c
index fc11c6d3c5..c0dc17ce22 100644
--- a/xen/arch/x86/boot/cmdline.c
+++ b/xen/arch/x86/boot/cmdline.c
@@ -28,14 +28,15 @@ asm (
" .globl _start \n"
"_start: \n"
" jmp cmdline_parse_early \n"
- );
+);
#include <xen/kconfig.h>
#include "defs.h"
#include "video.h"
/* Keep in sync with trampoline.S:early_boot_opts label! */
-typedef struct __packed {
+typedef struct __packed
+{
u8 skip_realmode;
u8 opt_edd;
u8 opt_edid;
@@ -136,7 +137,7 @@ static unsigned int __maybe_unused strtoui(
unsigned long long res = 0;
if ( *s == '0' )
- base = (tolower(*++s) == 'x') ? (++s, 16) : 8;
+ base = (tolower(*++s) == 'x') ? (++s, 16) : 8;
for ( ; *s != '\0'; ++s )
{
@@ -167,9 +168,9 @@ static unsigned int __maybe_unused strtoui(
}
}
- out:
+out:
if ( next )
- *next = s;
+ *next = s;
return res;
}
@@ -211,7 +212,8 @@ static const char *find_opt(const char *cmdline, const char *opt, bool arg)
static bool skip_realmode(const char *cmdline)
{
- return find_opt(cmdline, "no-real-mode", false) || find_opt(cmdline, "tboot=", true);
+ return find_opt(cmdline, "no-real-mode", false)
+ || find_opt(cmdline, "tboot=", true);
}
static u8 edd_parse(const char *cmdline)
diff --git a/xen/arch/x86/boot/mkelf32.c b/xen/arch/x86/boot/mkelf32.c
index bcbde1a056..46ec72e526 100644
--- a/xen/arch/x86/boot/mkelf32.c
+++ b/xen/arch/x86/boot/mkelf32.c
@@ -1,10 +1,10 @@
/******************************************************************************
* mkelf32.c
- *
+ *
* Usage: elf-prefix <in-image> <out-image> <load-base>
- *
+ *
* Converts an Elf64 executable binary <in-image> into a simple Elf32
- * image <out-image> comprising a single chunk to be loaded at <load-base>.
+ * image <out-image> comprising a single chunk to be loaded at <load-base>.
*/
#include <errno.h>
@@ -30,12 +30,15 @@
#define DYNAMICALLY_FILLED 0
#define RAW_OFFSET 128
-static Elf32_Ehdr out_ehdr = {
- { ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3, /* EI_MAG{0-3} */
- ELFCLASS32, /* EI_CLASS */
- ELFDATA2LSB, /* EI_DATA */
- EV_CURRENT, /* EI_VERSION */
- 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* e_ident */
+static Elf32_Ehdr out_ehdr =
+{
+ {
+ ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3, /* EI_MAG{0-3} */
+ ELFCLASS32, /* EI_CLASS */
+ ELFDATA2LSB, /* EI_DATA */
+ EV_CURRENT, /* EI_VERSION */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0
+ }, /* e_ident */
ET_EXEC, /* e_type */
EM_386, /* e_machine */
EV_CURRENT, /* e_version */
@@ -51,7 +54,8 @@ static Elf32_Ehdr out_ehdr = {
2 /* e_shstrndx */
};
-static Elf32_Phdr out_phdr = {
+static Elf32_Phdr out_phdr =
+{
PT_LOAD, /* p_type */
RAW_OFFSET, /* p_offset */
DYNAMICALLY_FILLED, /* p_vaddr */
@@ -61,7 +65,8 @@ static Elf32_Phdr out_phdr = {
PF_R|PF_W|PF_X, /* p_flags */
64 /* p_align */
};
-static Elf32_Phdr note_phdr = {
+static Elf32_Phdr note_phdr =
+{
PT_NOTE, /* p_type */
DYNAMICALLY_FILLED, /* p_offset */
DYNAMICALLY_FILLED, /* p_vaddr */
@@ -76,29 +81,32 @@ static u8 out_shstrtab[] = "\0.text\0.shstrtab";
/* If num_phdrs >= 2, we need to tack the .note. */
static u8 out_shstrtab_extra[] = ".note\0";
-static Elf32_Shdr out_shdr[] = {
+static Elf32_Shdr out_shdr[] =
+{
{ 0 },
- { 1, /* sh_name */
- SHT_PROGBITS, /* sh_type */
- SHF_WRITE|SHF_ALLOC|SHF_EXECINSTR, /* sh_flags */
- DYNAMICALLY_FILLED, /* sh_addr */
- RAW_OFFSET, /* sh_offset */
- DYNAMICALLY_FILLED, /* sh_size */
- 0, /* sh_link */
- 0, /* sh_info */
- 64, /* sh_addralign */
- 0 /* sh_entsize */
+ {
+ 1, /* sh_name */
+ SHT_PROGBITS, /* sh_type */
+ SHF_WRITE|SHF_ALLOC|SHF_EXECINSTR, /* sh_flags */
+ DYNAMICALLY_FILLED, /* sh_addr */
+ RAW_OFFSET, /* sh_offset */
+ DYNAMICALLY_FILLED, /* sh_size */
+ 0, /* sh_link */
+ 0, /* sh_info */
+ 64, /* sh_addralign */
+ 0 /* sh_entsize */
},
- { 7, /* sh_name */
- SHT_STRTAB, /* sh_type */
- 0, /* sh_flags */
- 0, /* sh_addr */
- DYNAMICALLY_FILLED, /* sh_offset */
- sizeof(out_shstrtab), /* sh_size */
- 0, /* sh_link */
- 0, /* sh_info */
- 1, /* sh_addralign */
- 0 /* sh_entsize */
+ {
+ 7, /* sh_name */
+ SHT_STRTAB, /* sh_type */
+ 0, /* sh_flags */
+ 0, /* sh_addr */
+ DYNAMICALLY_FILLED, /* sh_offset */
+ sizeof(out_shstrtab), /* sh_size */
+ 0, /* sh_link */
+ 0, /* sh_info */
+ 1, /* sh_addralign */
+ 0 /* sh_entsize */
}
};
@@ -106,17 +114,18 @@ static Elf32_Shdr out_shdr[] = {
* The 17 points to the '.note' in the out_shstrtab and out_shstrtab_extra
* laid out in the file.
*/
-static Elf32_Shdr out_shdr_note = {
- 17, /* sh_name */
- SHT_NOTE, /* sh_type */
- 0, /* sh_flags */
- DYNAMICALLY_FILLED, /* sh_addr */
- DYNAMICALLY_FILLED, /* sh_offset */
- DYNAMICALLY_FILLED, /* sh_size */
- 0, /* sh_link */
- 0, /* sh_info */
- 4, /* sh_addralign */
- 0 /* sh_entsize */
+static Elf32_Shdr out_shdr_note =
+{
+ 17, /* sh_name */
+ SHT_NOTE, /* sh_type */
+ 0, /* sh_flags */
+ DYNAMICALLY_FILLED, /* sh_addr */
+ DYNAMICALLY_FILLED, /* sh_offset */
+ DYNAMICALLY_FILLED, /* sh_size */
+ 0, /* sh_link */
+ 0, /* sh_info */
+ 4, /* sh_addralign */
+ 0 /* sh_entsize */
};
/* Some system header files define these macros and pollute our namespace. */
@@ -179,7 +188,7 @@ static void endianadjust_phdr32(Elf32_Phdr *ph)
ph->p_filesz = swap32(ph->p_filesz);
ph->p_memsz = swap32(ph->p_memsz);
ph->p_flags = swap32(ph->p_flags);
- ph->p_align = swap32(ph->p_align);
+ ph->p_align = swap32(ph->p_align);
}
static void endianadjust_phdr64(Elf64_Phdr *ph)
@@ -193,7 +202,7 @@ static void endianadjust_phdr64(Elf64_Phdr *ph)
ph->p_paddr = swap64(ph->p_paddr);
ph->p_filesz = swap64(ph->p_filesz);
ph->p_memsz = swap64(ph->p_memsz);
- ph->p_align = swap64(ph->p_align);
+ ph->p_align = swap64(ph->p_align);
}
static void endianadjust_shdr32(Elf32_Shdr *sh)
@@ -427,7 +436,8 @@ int main(int argc, char **argv)
do_write(outfd, &note_phdr, sizeof(note_phdr));
}
- if ( (bytes = RAW_OFFSET - sizeof(out_ehdr) - (num_phdrs * sizeof(out_phdr)) ) < 0 )
+ if ( (bytes = RAW_OFFSET - sizeof(out_ehdr) - (num_phdrs * sizeof(
+ out_phdr)) ) < 0 )
{
fprintf(stderr, "Header overflow.\n");
return 1;
@@ -436,8 +446,8 @@ int main(int argc, char **argv)
for ( bytes = 0; bytes < dat_siz; bytes += todo )
{
- todo = ((dat_siz - bytes) > sizeof(buffer)) ?
- sizeof(buffer) : (dat_siz - bytes);
+ todo = ((dat_siz - bytes) > sizeof(buffer)) ?
+ sizeof(buffer) : (dat_siz - bytes);
do_read(infd, buffer, todo);
do_write(outfd, buffer, todo);
}
@@ -455,7 +465,8 @@ int main(int argc, char **argv)
do_write(outfd, out_shstrtab, sizeof(out_shstrtab));
/* Our .note */
do_write(outfd, out_shstrtab_extra, sizeof(out_shstrtab_extra));
- do_write(outfd, buffer, 4-((sizeof(out_shstrtab)+sizeof(out_shstrtab_extra)+dat_siz)&3));
+ do_write(outfd, buffer, 4-((sizeof(out_shstrtab)+sizeof(
+ out_shstrtab_extra)+dat_siz)&3));
}
else
{
diff --git a/xen/arch/x86/boot/reloc.c b/xen/arch/x86/boot/reloc.c
index 4f4039bb7c..a02cbe2504 100644
--- a/xen/arch/x86/boot/reloc.c
+++ b/xen/arch/x86/boot/reloc.c
@@ -23,7 +23,7 @@ asm (
" .globl _start \n"
"_start: \n"
" jmp reloc \n"
- );
+);
#include "defs.h"
#include "../../../include/xen/multiboot.h"
@@ -264,7 +264,7 @@ static multiboot_info_t *mbi2_reloc(u32 mbi_in)
return mbi_out;
}
-void * __stdcall reloc(u32 magic, u32 in, u32 trampoline)
+void *__stdcall reloc(u32 magic, u32 in, u32 trampoline)
{
alloc = trampoline;
@@ -279,7 +279,7 @@ void * __stdcall reloc(u32 magic, u32 in, u32 trampoline)
case XEN_HVM_START_MAGIC_VALUE:
if ( IS_ENABLED(CONFIG_PVH_GUEST) )
return pvh_info_reloc(in);
- /* Fallthrough */
+ /* Fallthrough */
default:
/* Nothing we can do */
diff --git a/xen/arch/x86/bzimage.c b/xen/arch/x86/bzimage.c
index ac4fd428be..adcda504d0 100644
--- a/xen/arch/x86/bzimage.c
+++ b/xen/arch/x86/bzimage.c
@@ -14,44 +14,45 @@ static __init unsigned long output_length(void *image, unsigned long image_len)
return *(uint32_t *)(image + image_len - 4);
}
-struct __packed setup_header {
- uint8_t _pad0[0x1f1]; /* skip uninteresting stuff */
- uint8_t setup_sects;
- uint16_t root_flags;
- uint32_t syssize;
- uint16_t ram_size;
- uint16_t vid_mode;
- uint16_t root_dev;
- uint16_t boot_flag;
- uint16_t jump;
- uint32_t header;
+struct __packed setup_header
+{
+ uint8_t _pad0[0x1f1]; /* skip uninteresting stuff */
+ uint8_t setup_sects;
+ uint16_t root_flags;
+ uint32_t syssize;
+ uint16_t ram_size;
+ uint16_t vid_mode;
+ uint16_t root_dev;
+ uint16_t boot_flag;
+ uint16_t jump;
+ uint32_t header;
#define HDR_MAGIC "HdrS"
#define HDR_MAGIC_SZ 4
- uint16_t version;
+ uint16_t version;
#define VERSION(h,l) (((h)<<8) | (l))
- uint32_t realmode_swtch;
- uint16_t start_sys;
- uint16_t kernel_version;
- uint8_t type_of_loader;
- uint8_t loadflags;
- uint16_t setup_move_size;
- uint32_t code32_start;
- uint32_t ramdisk_image;
- uint32_t ramdisk_size;
- uint32_t bootsect_kludge;
- uint16_t heap_end_ptr;
- uint16_t _pad1;
- uint32_t cmd_line_ptr;
- uint32_t initrd_addr_max;
- uint32_t kernel_alignment;
- uint8_t relocatable_kernel;
- uint8_t _pad2[3];
- uint32_t cmdline_size;
- uint32_t hardware_subarch;
- uint64_t hardware_subarch_data;
- uint32_t payload_offset;
- uint32_t payload_length;
- };
+ uint32_t realmode_swtch;
+ uint16_t start_sys;
+ uint16_t kernel_version;
+ uint8_t type_of_loader;
+ uint8_t loadflags;
+ uint16_t setup_move_size;
+ uint32_t code32_start;
+ uint32_t ramdisk_image;
+ uint32_t ramdisk_size;
+ uint32_t bootsect_kludge;
+ uint16_t heap_end_ptr;
+ uint16_t _pad1;
+ uint32_t cmd_line_ptr;
+ uint32_t initrd_addr_max;
+ uint32_t kernel_alignment;
+ uint8_t relocatable_kernel;
+ uint8_t _pad2[3];
+ uint32_t cmdline_size;
+ uint32_t hardware_subarch;
+ uint64_t hardware_subarch_data;
+ uint32_t payload_offset;
+ uint32_t payload_length;
+};
static __init int bzimage_check(struct setup_header *hdr, unsigned long len)
{
@@ -61,9 +62,10 @@ static __init int bzimage_check(struct setup_header *hdr, unsigned long len)
if ( memcmp(&hdr->header, HDR_MAGIC, HDR_MAGIC_SZ) != 0 )
return 0;
- if ( hdr->version < VERSION(2,8) ) {
+ if ( hdr->version < VERSION(2, 8) )
+ {
printk("Cannot load bzImage v%d.%02d at least v2.08 is required\n",
- hdr->version >> 8, hdr->version & 0xff);
+ hdr->version >> 8, hdr->version & 0xff);
return -EINVAL;
}
return 1;
@@ -97,7 +99,8 @@ unsigned long __init bzimage_headroom(void *image_start,
{
headroom += headroom >> 12; /* Add 8 bytes for every 32K input block */
headroom += (32768 + 18); /* Add 32K + 18 bytes of extra headroom */
- } else
+ }
+ else
headroom += image_length;
headroom = (headroom + 4095) & ~4095;
diff --git a/xen/arch/x86/compat.c b/xen/arch/x86/compat.c
index a40ec295ae..e362c29151 100644
--- a/xen/arch/x86/compat.c
+++ b/xen/arch/x86/compat.c
@@ -1,6 +1,6 @@
/******************************************************************************
* compat.c
- *
+ *
* Implementations of legacy hypercalls. These call through to the new
* hypercall after doing necessary argument munging.
*/
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 839f19292d..8bc67247d6 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -15,11 +15,11 @@
#include "cpu.h"
/*
- * Pre-canned values for overriding the CPUID features
+ * Pre-canned values for overriding the CPUID features
* and extended features masks.
*
* Currently supported processors:
- *
+ *
* "fam_0f_rev_c"
* "fam_0f_rev_d"
* "fam_0f_rev_e"
@@ -48,52 +48,54 @@ boolean_param("allow_unsafe", opt_allow_unsafe);
bool __read_mostly amd_acpi_c1e_quirk;
static inline int rdmsr_amd_safe(unsigned int msr, unsigned int *lo,
- unsigned int *hi)
+ unsigned int *hi)
{
- int err;
-
- asm volatile("1: rdmsr\n2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl %6,%2\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : "=a" (*lo), "=d" (*hi), "=r" (err)
- : "c" (msr), "D" (0x9c5a203a), "2" (0), "i" (-EFAULT));
-
- return err;
+ int err;
+
+ asm volatile("1: rdmsr\n2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %6,%2\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=a" (*lo), "=d" (*hi), "=r" (err)
+ : "c" (msr), "D" (0x9c5a203a), "2" (0), "i" (-EFAULT));
+
+ return err;
}
static inline int wrmsr_amd_safe(unsigned int msr, unsigned int lo,
- unsigned int hi)
+ unsigned int hi)
{
- int err;
-
- asm volatile("1: wrmsr\n2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl %6,%0\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : "=r" (err)
- : "c" (msr), "a" (lo), "d" (hi), "D" (0x9c5a203a),
- "0" (0), "i" (-EFAULT));
-
- return err;
+ int err;
+
+ asm volatile("1: wrmsr\n2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %6,%0\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=r" (err)
+ : "c" (msr), "a" (lo), "d" (hi), "D" (0x9c5a203a),
+ "0" (0), "i" (-EFAULT));
+
+ return err;
}
static void wrmsr_amd(unsigned int msr, uint64_t val)
{
- asm volatile("wrmsr" ::
- "c" (msr), "a" ((uint32_t)val),
- "d" (val >> 32), "D" (0x9c5a203a));
+ asm volatile("wrmsr" ::
+ "c" (msr), "a" ((uint32_t)val),
+ "d" (val >> 32), "D" (0x9c5a203a));
}
-static const struct cpuidmask {
- uint16_t fam;
- char rev[2];
- unsigned int ecx, edx, ext_ecx, ext_edx;
-} pre_canned[] __initconst = {
+static const struct cpuidmask
+{
+ uint16_t fam;
+ char rev[2];
+ unsigned int ecx, edx, ext_ecx, ext_edx;
+} pre_canned[] __initconst =
+{
#define CAN(fam, id, rev) { \
fam, #rev, \
AMD_FEATURES_##id##_REV_##rev##_ECX, \
@@ -103,35 +105,35 @@ static const struct cpuidmask {
}
#define CAN_FAM(fam, rev) CAN(0x##fam, FAM##fam##h, rev)
#define CAN_K8(rev) CAN(0x0f, K8, rev)
- CAN_FAM(11, B),
- CAN_FAM(10, C),
- CAN_FAM(10, B),
- CAN_K8(G),
- CAN_K8(F),
- CAN_K8(E),
- CAN_K8(D),
- CAN_K8(C)
+ CAN_FAM(11, B),
+ CAN_FAM(10, C),
+ CAN_FAM(10, B),
+ CAN_K8(G),
+ CAN_K8(F),
+ CAN_K8(E),
+ CAN_K8(D),
+ CAN_K8(C)
#undef CAN
};
static const struct cpuidmask *__init noinline get_cpuidmask(const char *opt)
{
- unsigned long fam;
- char rev;
- unsigned int i;
-
- if (strncmp(opt, "fam_", 4))
- return NULL;
- fam = simple_strtoul(opt + 4, &opt, 16);
- if (strncmp(opt, "_rev_", 5) || !opt[5] || opt[6])
- return NULL;
- rev = toupper(opt[5]);
-
- for (i = 0; i < ARRAY_SIZE(pre_canned); ++i)
- if (fam == pre_canned[i].fam && rev == *pre_canned[i].rev)
- return &pre_canned[i];
-
- return NULL;
+ unsigned long fam;
+ char rev;
+ unsigned int i;
+
+ if (strncmp(opt, "fam_", 4))
+ return NULL;
+ fam = simple_strtoul(opt + 4, &opt, 16);
+ if (strncmp(opt, "_rev_", 5) || !opt[5] || opt[6])
+ return NULL;
+ rev = toupper(opt[5]);
+
+ for (i = 0; i < ARRAY_SIZE(pre_canned); ++i)
+ if (fam == pre_canned[i].fam && rev == *pre_canned[i].rev)
+ return &pre_canned[i];
+
+ return NULL;
}
/*
@@ -141,15 +143,15 @@ static const struct cpuidmask *__init noinline get_cpuidmask(const char *opt)
*/
static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps)
{
- unsigned int hi, lo;
+ unsigned int hi, lo;
- expected_levelling_cap |= caps;
+ expected_levelling_cap |= caps;
- if ((rdmsr_amd_safe(msr, &lo, &hi) == 0) &&
- (wrmsr_amd_safe(msr, lo, hi) == 0))
- levelling_caps |= caps;
+ if ((rdmsr_amd_safe(msr, &lo, &hi) == 0) &&
+ (wrmsr_amd_safe(msr, lo, hi) == 0))
+ levelling_caps |= caps;
- return ((uint64_t)hi << 32) | lo;
+ return ((uint64_t)hi << 32) | lo;
}
/*
@@ -158,46 +160,46 @@ static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps)
*/
static void __init noinline probe_masking_msrs(void)
{
- const struct cpuinfo_x86 *c = &boot_cpu_data;
-
- /*
- * First, work out which masking MSRs we should have, based on
- * revision and cpuid.
- */
-
- /* Fam11 doesn't support masking at all. */
- if (c->x86 == 0x11)
- return;
-
- cpuidmask_defaults._1cd =
- _probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd);
- cpuidmask_defaults.e1cd =
- _probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd);
-
- if (c->cpuid_level >= 7)
- cpuidmask_defaults._7ab0 =
- _probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0);
-
- if (c->x86 == 0x15 && c->cpuid_level >= 6 && cpuid_ecx(6))
- cpuidmask_defaults._6c =
- _probe_mask_msr(MSR_AMD_THRM_FEATURE_MASK, LCAP_6c);
-
- /*
- * Don't bother warning about a mismatch if virtualised. These MSRs
- * are not architectural and almost never virtualised.
- */
- if ((expected_levelling_cap == levelling_caps) ||
- cpu_has_hypervisor)
- return;
-
- printk(XENLOG_WARNING "Mismatch between expected (%#x) "
- "and real (%#x) levelling caps: missing %#x\n",
- expected_levelling_cap, levelling_caps,
- (expected_levelling_cap ^ levelling_caps) & levelling_caps);
- printk(XENLOG_WARNING "Fam %#x, model %#x level %#x\n",
- c->x86, c->x86_model, c->cpuid_level);
- printk(XENLOG_WARNING
- "If not running virtualised, please report a bug\n");
+ const struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ /*
+ * First, work out which masking MSRs we should have, based on
+ * revision and cpuid.
+ */
+
+ /* Fam11 doesn't support masking at all. */
+ if (c->x86 == 0x11)
+ return;
+
+ cpuidmask_defaults._1cd =
+ _probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd);
+ cpuidmask_defaults.e1cd =
+ _probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd);
+
+ if (c->cpuid_level >= 7)
+ cpuidmask_defaults._7ab0 =
+ _probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0);
+
+ if (c->x86 == 0x15 && c->cpuid_level >= 6 && cpuid_ecx(6))
+ cpuidmask_defaults._6c =
+ _probe_mask_msr(MSR_AMD_THRM_FEATURE_MASK, LCAP_6c);
+
+ /*
+ * Don't bother warning about a mismatch if virtualised. These MSRs
+ * are not architectural and almost never virtualised.
+ */
+ if ((expected_levelling_cap == levelling_caps) ||
+ cpu_has_hypervisor)
+ return;
+
+ printk(XENLOG_WARNING "Mismatch between expected (%#x) "
+ "and real (%#x) levelling caps: missing %#x\n",
+ expected_levelling_cap, levelling_caps,
+ (expected_levelling_cap ^ levelling_caps) & levelling_caps);
+ printk(XENLOG_WARNING "Fam %#x, model %#x level %#x\n",
+ c->x86, c->x86_model, c->cpuid_level);
+ printk(XENLOG_WARNING
+ "If not running virtualised, please report a bug\n");
}
/*
@@ -208,29 +210,31 @@ static void __init noinline probe_masking_msrs(void)
*/
static void amd_ctxt_switch_masking(const struct vcpu *next)
{
- struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
- const struct domain *nextd = next ? next->domain : NULL;
- const struct cpuidmasks *masks =
- (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
- ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
-
- if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
- uint64_t val = masks->_1cd;
-
- /*
- * OSXSAVE defaults to 1, which causes fast-forwarding of
- * Xen's real setting. Clobber it if disabled by the guest
- * kernel.
- */
- if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
- !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
- val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 32);
-
- if (unlikely(these_masks->_1cd != val)) {
- wrmsr_amd(MSR_K8_FEATURE_MASK, val);
- these_masks->_1cd = val;
- }
- }
+ struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
+ const struct domain *nextd = next ? next->domain : NULL;
+ const struct cpuidmasks *masks =
+ (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
+ ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
+
+ if ((levelling_caps & LCAP_1cd) == LCAP_1cd)
+ {
+ uint64_t val = masks->_1cd;
+
+ /*
+ * OSXSAVE defaults to 1, which causes fast-forwarding of
+ * Xen's real setting. Clobber it if disabled by the guest
+ * kernel.
+ */
+ if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
+ !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
+ val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 32);
+
+ if (unlikely(these_masks->_1cd != val))
+ {
+ wrmsr_amd(MSR_K8_FEATURE_MASK, val);
+ these_masks->_1cd = val;
+ }
+ }
#define LAZY(cap, msr, field) \
({ \
@@ -242,9 +246,9 @@ static void amd_ctxt_switch_masking(const struct vcpu *next)
} \
})
- LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK, e1cd);
- LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0);
- LAZY(LCAP_6c, MSR_AMD_THRM_FEATURE_MASK, _6c);
+ LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK, e1cd);
+ LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0);
+ LAZY(LCAP_6c, MSR_AMD_THRM_FEATURE_MASK, _6c);
#undef LAZY
}
@@ -265,541 +269,587 @@ static void amd_ctxt_switch_masking(const struct vcpu *next)
*/
static void __init noinline amd_init_levelling(void)
{
- const struct cpuidmask *m = NULL;
+ const struct cpuidmask *m = NULL;
- if (probe_cpuid_faulting())
- return;
+ if (probe_cpuid_faulting())
+ return;
- probe_masking_msrs();
+ probe_masking_msrs();
- if (*opt_famrev != '\0') {
- m = get_cpuidmask(opt_famrev);
+ if (*opt_famrev != '\0')
+ {
+ m = get_cpuidmask(opt_famrev);
- if (!m)
- printk("Invalid processor string: %s\n", opt_famrev);
- }
-
- if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
- uint32_t ecx, edx, tmp;
-
- cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
-
- if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
- ecx &= opt_cpuid_mask_ecx;
- edx &= opt_cpuid_mask_edx;
- } else if (m) {
- ecx &= m->ecx;
- edx &= m->edx;
- }
+ if (!m)
+ printk("Invalid processor string: %s\n", opt_famrev);
+ }
- /* Fast-forward bits - Must be set. */
- if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
- ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
- edx |= cpufeat_mask(X86_FEATURE_APIC);
+ if ((levelling_caps & LCAP_1cd) == LCAP_1cd)
+ {
+ uint32_t ecx, edx, tmp;
- /* Allow the HYPERVISOR bit to be set via guest policy. */
- ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
+ cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
- cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
- }
-
- if ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) {
- uint32_t ecx, edx, tmp;
-
- cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
+ if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx))
+ {
+ ecx &= opt_cpuid_mask_ecx;
+ edx &= opt_cpuid_mask_edx;
+ }
+ else if (m)
+ {
+ ecx &= m->ecx;
+ edx &= m->edx;
+ }
- if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
- ecx &= opt_cpuid_mask_ext_ecx;
- edx &= opt_cpuid_mask_ext_edx;
- } else if (m) {
- ecx &= m->ext_ecx;
- edx &= m->ext_edx;
- }
+ /* Fast-forward bits - Must be set. */
+ if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
+ ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
+ edx |= cpufeat_mask(X86_FEATURE_APIC);
- /* Fast-forward bits - Must be set. */
- edx |= cpufeat_mask(X86_FEATURE_APIC);
+ /* Allow the HYPERVISOR bit to be set via guest policy. */
+ ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
- cpuidmask_defaults.e1cd = ((uint64_t)ecx << 32) | edx;
- }
+ cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
+ }
- if ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) {
- uint32_t eax, ebx, tmp;
+ if ((levelling_caps & LCAP_e1cd) == LCAP_e1cd)
+ {
+ uint32_t ecx, edx, tmp;
- cpuid(0x00000007, &eax, &ebx, &tmp, &tmp);
+ cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
- if (~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx)) {
- eax &= opt_cpuid_mask_l7s0_eax;
- ebx &= opt_cpuid_mask_l7s0_ebx;
- }
+ if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx))
+ {
+ ecx &= opt_cpuid_mask_ext_ecx;
+ edx &= opt_cpuid_mask_ext_edx;
+ }
+ else if (m)
+ {
+ ecx &= m->ext_ecx;
+ edx &= m->ext_edx;
+ }
- cpuidmask_defaults._7ab0 &= ((uint64_t)eax << 32) | ebx;
- }
+ /* Fast-forward bits - Must be set. */
+ edx |= cpufeat_mask(X86_FEATURE_APIC);
- if ((levelling_caps & LCAP_6c) == LCAP_6c) {
- uint32_t ecx = cpuid_ecx(6);
+ cpuidmask_defaults.e1cd = ((uint64_t)ecx << 32) | edx;
+ }
- if (~opt_cpuid_mask_thermal_ecx)
- ecx &= opt_cpuid_mask_thermal_ecx;
+ if ((levelling_caps & LCAP_7ab0) == LCAP_7ab0)
+ {
+ uint32_t eax, ebx, tmp;
- cpuidmask_defaults._6c &= (~0ULL << 32) | ecx;
- }
+ cpuid(0x00000007, &eax, &ebx, &tmp, &tmp);
- if (opt_cpu_info) {
- printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
- printk(XENLOG_INFO
- "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
- "e1c 0x%08x, 7a0 0x%08x, 7b0 0x%08x, 6c 0x%08x\n",
- (uint32_t)cpuidmask_defaults._1cd,
- (uint32_t)(cpuidmask_defaults._1cd >> 32),
- (uint32_t)cpuidmask_defaults.e1cd,
- (uint32_t)(cpuidmask_defaults.e1cd >> 32),
- (uint32_t)(cpuidmask_defaults._7ab0 >> 32),
- (uint32_t)cpuidmask_defaults._7ab0,
- (uint32_t)cpuidmask_defaults._6c);
- }
+ if (~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx))
+ {
+ eax &= opt_cpuid_mask_l7s0_eax;
+ ebx &= opt_cpuid_mask_l7s0_ebx;
+ }
- if (levelling_caps)
- ctxt_switch_masking = amd_ctxt_switch_masking;
+ cpuidmask_defaults._7ab0 &= ((uint64_t)eax << 32) | ebx;
+ }
+
+ if ((levelling_caps & LCAP_6c) == LCAP_6c)
+ {
+ uint32_t ecx = cpuid_ecx(6);
+
+ if (~opt_cpuid_mask_thermal_ecx)
+ ecx &= opt_cpuid_mask_thermal_ecx;
+
+ cpuidmask_defaults._6c &= (~0ULL << 32) | ecx;
+ }
+
+ if (opt_cpu_info)
+ {
+ printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
+ printk(XENLOG_INFO
+ "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
+ "e1c 0x%08x, 7a0 0x%08x, 7b0 0x%08x, 6c 0x%08x\n",
+ (uint32_t)cpuidmask_defaults._1cd,
+ (uint32_t)(cpuidmask_defaults._1cd >> 32),
+ (uint32_t)cpuidmask_defaults.e1cd,
+ (uint32_t)(cpuidmask_defaults.e1cd >> 32),
+ (uint32_t)(cpuidmask_defaults._7ab0 >> 32),
+ (uint32_t)cpuidmask_defaults._7ab0,
+ (uint32_t)cpuidmask_defaults._6c);
+ }
+
+ if (levelling_caps)
+ ctxt_switch_masking = amd_ctxt_switch_masking;
}
/*
- * Check for the presence of an AMD erratum. Arguments are defined in amd.h
+ * Check for the presence of an AMD erratum. Arguments are defined in amd.h
* for each known erratum. Return 1 if erratum is found.
*/
int cpu_has_amd_erratum(const struct cpuinfo_x86 *cpu, int osvw_id, ...)
{
- va_list ap;
- u32 range;
- u32 ms;
-
- if (cpu->x86_vendor != X86_VENDOR_AMD)
- return 0;
+ va_list ap;
+ u32 range;
+ u32 ms;
- if (osvw_id >= 0 && cpu_has(cpu, X86_FEATURE_OSVW)) {
- u64 osvw_len;
+ if (cpu->x86_vendor != X86_VENDOR_AMD)
+ return 0;
- rdmsrl(MSR_AMD_OSVW_ID_LENGTH, osvw_len);
+ if (osvw_id >= 0 && cpu_has(cpu, X86_FEATURE_OSVW))
+ {
+ u64 osvw_len;
- if (osvw_id < osvw_len) {
- u64 osvw_bits;
+ rdmsrl(MSR_AMD_OSVW_ID_LENGTH, osvw_len);
- rdmsrl(MSR_AMD_OSVW_STATUS + (osvw_id >> 6),
- osvw_bits);
+ if (osvw_id < osvw_len)
+ {
+ u64 osvw_bits;
- return (osvw_bits >> (osvw_id & 0x3f)) & 1;
- }
- }
+ rdmsrl(MSR_AMD_OSVW_STATUS + (osvw_id >> 6),
+ osvw_bits);
- /* OSVW unavailable or ID unknown, match family-model-stepping range */
- va_start(ap, osvw_id);
-
- ms = (cpu->x86_model << 4) | cpu->x86_mask;
- while ((range = va_arg(ap, int))) {
- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
- (ms >= AMD_MODEL_RANGE_START(range)) &&
- (ms <= AMD_MODEL_RANGE_END(range))) {
- va_end(ap);
- return 1;
- }
- }
+ return (osvw_bits >> (osvw_id & 0x3f)) & 1;
+ }
+ }
+
+ /* OSVW unavailable or ID unknown, match family-model-stepping range */
+ va_start(ap, osvw_id);
+
+ ms = (cpu->x86_model << 4) | cpu->x86_mask;
+ while ((range = va_arg(ap, int)))
+ {
+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
+ (ms >= AMD_MODEL_RANGE_START(range)) &&
+ (ms <= AMD_MODEL_RANGE_END(range)))
+ {
+ va_end(ap);
+ return 1;
+ }
+ }
- va_end(ap);
- return 0;
+ va_end(ap);
+ return 0;
}
/*
* Disable C1-Clock ramping if enabled in PMM7.CpuLowPwrEnh on 8th-generation
* cores only. Assume BIOS has setup all Northbridges equivalently.
*/
-static void disable_c1_ramping(void)
+static void disable_c1_ramping(void)
{
- u8 pmm7;
- int node, nr_nodes;
-
- /* Read the number of nodes from the first Northbridge. */
- nr_nodes = ((pci_conf_read32(0, 0, 0x18, 0x0, 0x60)>>4)&0x07)+1;
- for (node = 0; node < nr_nodes; node++) {
- /* PMM7: bus=0, dev=0x18+node, function=0x3, register=0x87. */
- pmm7 = pci_conf_read8(0, 0, 0x18+node, 0x3, 0x87);
- /* Invalid read means we've updated every Northbridge. */
- if (pmm7 == 0xFF)
- break;
- pmm7 &= 0xFC; /* clear pmm7[1:0] */
- pci_conf_write8(0, 0, 0x18+node, 0x3, 0x87, pmm7);
- printk ("AMD: Disabling C1 Clock Ramping Node #%x\n", node);
- }
+ u8 pmm7;
+ int node, nr_nodes;
+
+ /* Read the number of nodes from the first Northbridge. */
+ nr_nodes = ((pci_conf_read32(0, 0, 0x18, 0x0, 0x60)>>4)&0x07)+1;
+ for (node = 0; node < nr_nodes; node++)
+ {
+ /* PMM7: bus=0, dev=0x18+node, function=0x3, register=0x87. */
+ pmm7 = pci_conf_read8(0, 0, 0x18+node, 0x3, 0x87);
+ /* Invalid read means we've updated every Northbridge. */
+ if (pmm7 == 0xFF)
+ break;
+ pmm7 &= 0xFC; /* clear pmm7[1:0] */
+ pci_conf_write8(0, 0, 0x18+node, 0x3, 0x87, pmm7);
+ printk ("AMD: Disabling C1 Clock Ramping Node #%x\n", node);
+ }
}
static void disable_c1e(void *unused)
{
- uint64_t msr_content;
-
- /*
- * Disable C1E mode, as the APIC timer stops in that mode.
- * The MSR does not exist in all FamilyF CPUs (only Rev F and above),
- * but we safely catch the #GP in that case.
- */
- if ((rdmsr_safe(MSR_K8_ENABLE_C1E, msr_content) == 0) &&
- (msr_content & (3ULL << 27)) &&
- (wrmsr_safe(MSR_K8_ENABLE_C1E, msr_content & ~(3ULL << 27)) != 0))
- printk(KERN_ERR "Failed to disable C1E on CPU#%u (%16"PRIx64")\n",
- smp_processor_id(), msr_content);
+ uint64_t msr_content;
+
+ /*
+ * Disable C1E mode, as the APIC timer stops in that mode.
+ * The MSR does not exist in all FamilyF CPUs (only Rev F and above),
+ * but we safely catch the #GP in that case.
+ */
+ if ((rdmsr_safe(MSR_K8_ENABLE_C1E, msr_content) == 0) &&
+ (msr_content & (3ULL << 27)) &&
+ (wrmsr_safe(MSR_K8_ENABLE_C1E, msr_content & ~(3ULL << 27)) != 0))
+ printk(KERN_ERR "Failed to disable C1E on CPU#%u (%16"PRIx64")\n",
+ smp_processor_id(), msr_content);
}
void amd_check_disable_c1e(unsigned int port, u8 value)
{
- /* C1E is sometimes enabled during entry to ACPI mode. */
- if ((port == acpi_smi_cmd) && (value == acpi_enable_value))
- on_each_cpu(disable_c1e, NULL, 1);
+ /* C1E is sometimes enabled during entry to ACPI mode. */
+ if ((port == acpi_smi_cmd) && (value == acpi_enable_value))
+ on_each_cpu(disable_c1e, NULL, 1);
}
/*
- * BIOS is expected to clear MtrrFixDramModEn bit. According to AMD BKDG :
- * "The MtrrFixDramModEn bit should be set to 1 during BIOS initalization of
+ * BIOS is expected to clear MtrrFixDramModEn bit. According to AMD BKDG :
+ * "The MtrrFixDramModEn bit should be set to 1 during BIOS initalization of
* the fixed MTRRs, then cleared to 0 for operation."
*/
static void check_syscfg_dram_mod_en(void)
{
- uint64_t syscfg;
- static bool_t printed = 0;
+ uint64_t syscfg;
+ static bool_t printed = 0;
- if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
- (boot_cpu_data.x86 >= 0x0f)))
- return;
+ if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
+ (boot_cpu_data.x86 >= 0x0f)))
+ return;
- rdmsrl(MSR_K8_SYSCFG, syscfg);
- if (!(syscfg & K8_MTRRFIXRANGE_DRAM_MODIFY))
- return;
+ rdmsrl(MSR_K8_SYSCFG, syscfg);
+ if (!(syscfg & K8_MTRRFIXRANGE_DRAM_MODIFY))
+ return;
- if (!test_and_set_bool(printed))
- printk(KERN_ERR "MTRR: SYSCFG[MtrrFixDramModEn] not "
- "cleared by BIOS, clearing this bit\n");
+ if (!test_and_set_bool(printed))
+ printk(KERN_ERR "MTRR: SYSCFG[MtrrFixDramModEn] not "
+ "cleared by BIOS, clearing this bit\n");
- syscfg &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
- wrmsrl(MSR_K8_SYSCFG, syscfg);
+ syscfg &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
+ wrmsrl(MSR_K8_SYSCFG, syscfg);
}
static void amd_get_topology(struct cpuinfo_x86 *c)
{
- int cpu;
- unsigned bits;
+ int cpu;
+ unsigned bits;
+
+ if (c->x86_max_cores <= 1)
+ return;
+ /*
+ * On a AMD multi core setup the lower bits of the APIC id
+ * distingush the cores.
+ */
+ cpu = smp_processor_id();
+ bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
+
+ if (bits == 0)
+ {
+ while ((1 << bits) < c->x86_max_cores)
+ bits++;
+ }
+
+ /* Low order bits define the core id */
+ c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
+ /* Convert local APIC ID into the socket ID */
+ c->phys_proc_id >>= bits;
+ /* Collect compute unit ID if available */
+ if (cpu_has(c, X86_FEATURE_TOPOEXT))
+ {
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+ c->x86_num_siblings = ((ebx >> 8) & 0xff) + 1;
+
+ if (c->x86 < 0x17)
+ c->compute_unit_id = ebx & 0xFF;
+ else
+ {
+ c->cpu_core_id = ebx & 0xFF;
+ c->x86_max_cores /= c->x86_num_siblings;
+ }
- if (c->x86_max_cores <= 1)
- return;
/*
- * On a AMD multi core setup the lower bits of the APIC id
- * distingush the cores.
+ * In case leaf B is available, use it to derive
+ * topology information.
*/
- cpu = smp_processor_id();
- bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
-
- if (bits == 0) {
- while ((1 << bits) < c->x86_max_cores)
- bits++;
- }
-
- /* Low order bits define the core id */
- c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
- /* Convert local APIC ID into the socket ID */
- c->phys_proc_id >>= bits;
- /* Collect compute unit ID if available */
- if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
- u32 eax, ebx, ecx, edx;
-
- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- c->x86_num_siblings = ((ebx >> 8) & 0xff) + 1;
-
- if (c->x86 < 0x17)
- c->compute_unit_id = ebx & 0xFF;
- else {
- c->cpu_core_id = ebx & 0xFF;
- c->x86_max_cores /= c->x86_num_siblings;
- }
-
- /*
- * In case leaf B is available, use it to derive
- * topology information.
- */
- if (detect_extended_topology(c))
- return;
- }
-
- if (opt_cpu_info)
- printk("CPU %d(%d) -> Processor %d, %s %d\n",
- cpu, c->x86_max_cores, c->phys_proc_id,
- c->compute_unit_id != INVALID_CUID ? "Compute Unit"
- : "Core",
- c->compute_unit_id != INVALID_CUID ? c->compute_unit_id
- : c->cpu_core_id);
+ if (detect_extended_topology(c))
+ return;
+ }
+
+ if (opt_cpu_info)
+ printk("CPU %d(%d) -> Processor %d, %s %d\n",
+ cpu, c->x86_max_cores, c->phys_proc_id,
+ c->compute_unit_id != INVALID_CUID ? "Compute Unit"
+ : "Core",
+ c->compute_unit_id != INVALID_CUID ? c->compute_unit_id
+ : c->cpu_core_id);
}
void early_init_amd(struct cpuinfo_x86 *c)
{
- if (c == &boot_cpu_data)
- amd_init_levelling();
+ if (c == &boot_cpu_data)
+ amd_init_levelling();
- ctxt_switch_levelling(NULL);
+ ctxt_switch_levelling(NULL);
}
static void init_amd(struct cpuinfo_x86 *c)
{
- u32 l, h;
-
- unsigned long long value;
-
- /* Disable TLB flush filter by setting HWCR.FFDIS on K8
- * bit 6 of msr C001_0015
- *
- * Errata 63 for SH-B3 steppings
- * Errata 122 for all steppings (F+ have it disabled by default)
- */
- if (c->x86 == 15) {
- rdmsrl(MSR_K7_HWCR, value);
- value |= 1 << 6;
- wrmsrl(MSR_K7_HWCR, value);
- }
-
- /*
- * Some AMD CPUs duplicate the 3DNow bit in base and extended CPUID
- * leaves. Unfortunately, this aliases PBE on Intel CPUs. Clobber the
- * alias, leaving 3DNow in the extended leaf.
- */
- __clear_bit(X86_FEATURE_PBE, c->x86_capability);
-
- if (c->x86 == 0xf && c->x86_model < 0x14
- && cpu_has(c, X86_FEATURE_LAHF_LM)) {
- /*
- * Some BIOSes incorrectly force this feature, but only K8
- * revision D (model = 0x14) and later actually support it.
- * (AMD Erratum #110, docId: 25759).
- */
- __clear_bit(X86_FEATURE_LAHF_LM, c->x86_capability);
- if (!rdmsr_amd_safe(0xc001100d, &l, &h))
- wrmsr_amd_safe(0xc001100d, l, h & ~1);
- }
-
- /*
- * Attempt to set lfence to be Dispatch Serialising. This MSR almost
- * certainly isn't virtualised (and Xen at least will leak the real
- * value in but silently discard writes), as well as being per-core
- * rather than per-thread, so do a full safe read/write/readback cycle
- * in the worst case.
- */
- if (c->x86 == 0x0f || c->x86 == 0x11)
- /* Always dispatch serialising on this hardare. */
- __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
- else /* Implicily "== 0x10 || >= 0x12" by being 64bit. */ {
- if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
- /* Unable to read. Assume the safer default. */
- __clear_bit(X86_FEATURE_LFENCE_DISPATCH,
- c->x86_capability);
- else if (value & AMD64_DE_CFG_LFENCE_SERIALISE)
- /* Already dispatch serialising. */
- __set_bit(X86_FEATURE_LFENCE_DISPATCH,
- c->x86_capability);
- else if (wrmsr_safe(MSR_AMD64_DE_CFG,
- value | AMD64_DE_CFG_LFENCE_SERIALISE) ||
- rdmsr_safe(MSR_AMD64_DE_CFG, value) ||
- !(value & AMD64_DE_CFG_LFENCE_SERIALISE))
- /* Attempt to set failed. Assume the safer default. */
- __clear_bit(X86_FEATURE_LFENCE_DISPATCH,
- c->x86_capability);
- else
- /* Successfully enabled! */
- __set_bit(X86_FEATURE_LFENCE_DISPATCH,
- c->x86_capability);
- }
-
- /*
- * If the user has explicitly chosen to disable Memory Disambiguation
- * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
- */
- if (opt_ssbd) {
- int bit = -1;
-
- switch (c->x86) {
- case 0x15: bit = 54; break;
- case 0x16: bit = 33; break;
- case 0x17: bit = 10; break;
- }
-
- if (bit >= 0 && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
- value |= 1ull << bit;
- wrmsr_safe(MSR_AMD64_LS_CFG, value);
- }
- }
-
- /* MFENCE stops RDTSC speculation */
- if (!cpu_has_lfence_dispatch)
- __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
-
- switch(c->x86)
- {
- case 0xf ... 0x11:
- disable_c1e(NULL);
- if (acpi_smi_cmd && (acpi_enable_value | acpi_disable_value))
- amd_acpi_c1e_quirk = true;
- break;
- }
-
- display_cacheinfo(c);
-
- if (c->extended_cpuid_level >= 0x80000008) {
- c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
- }
-
- if (c->extended_cpuid_level >= 0x80000007) {
- if (cpu_has(c, X86_FEATURE_ITSC)) {
- __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
- if (c->x86 != 0x11)
- __set_bit(X86_FEATURE_TSC_RELIABLE,
- c->x86_capability);
- }
- }
-
- /* re-enable TopologyExtensions if switched off by BIOS */
- if ((c->x86 == 0x15) &&
- (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
- !cpu_has(c, X86_FEATURE_TOPOEXT) &&
- !rdmsr_safe(MSR_K8_EXT_FEATURE_MASK, value)) {
- value |= 1ULL << 54;
- wrmsr_safe(MSR_K8_EXT_FEATURE_MASK, value);
- rdmsrl(MSR_K8_EXT_FEATURE_MASK, value);
- if (value & (1ULL << 54)) {
- __set_bit(X86_FEATURE_TOPOEXT, c->x86_capability);
- printk(KERN_INFO "CPU: Re-enabling disabled "
- "Topology Extensions Support\n");
- }
- }
+ u32 l, h;
+
+ unsigned long long value;
+
+ /* Disable TLB flush filter by setting HWCR.FFDIS on K8
+ * bit 6 of msr C001_0015
+ *
+ * Errata 63 for SH-B3 steppings
+ * Errata 122 for all steppings (F+ have it disabled by default)
+ */
+ if (c->x86 == 15)
+ {
+ rdmsrl(MSR_K7_HWCR, value);
+ value |= 1 << 6;
+ wrmsrl(MSR_K7_HWCR, value);
+ }
+
+ /*
+ * Some AMD CPUs duplicate the 3DNow bit in base and extended CPUID
+ * leaves. Unfortunately, this aliases PBE on Intel CPUs. Clobber the
+ * alias, leaving 3DNow in the extended leaf.
+ */
+ __clear_bit(X86_FEATURE_PBE, c->x86_capability);
+
+ if (c->x86 == 0xf && c->x86_model < 0x14
+ && cpu_has(c, X86_FEATURE_LAHF_LM))
+ {
+ /*
+ * Some BIOSes incorrectly force this feature, but only K8
+ * revision D (model = 0x14) and later actually support it.
+ * (AMD Erratum #110, docId: 25759).
+ */
+ __clear_bit(X86_FEATURE_LAHF_LM, c->x86_capability);
+ if (!rdmsr_amd_safe(0xc001100d, &l, &h))
+ wrmsr_amd_safe(0xc001100d, l, h & ~1);
+ }
+
+ /*
+ * Attempt to set lfence to be Dispatch Serialising. This MSR almost
+ * certainly isn't virtualised (and Xen at least will leak the real
+ * value in but silently discard writes), as well as being per-core
+ * rather than per-thread, so do a full safe read/write/readback cycle
+ * in the worst case.
+ */
+ if (c->x86 == 0x0f || c->x86 == 0x11)
+ /* Always dispatch serialising on this hardare. */
+ __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
+ else /* Implicily "== 0x10 || >= 0x12" by being 64bit. */
+ {
+ if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
+ /* Unable to read. Assume the safer default. */
+ __clear_bit(X86_FEATURE_LFENCE_DISPATCH,
+ c->x86_capability);
+ else if (value & AMD64_DE_CFG_LFENCE_SERIALISE)
+ /* Already dispatch serialising. */
+ __set_bit(X86_FEATURE_LFENCE_DISPATCH,
+ c->x86_capability);
+ else if (wrmsr_safe(MSR_AMD64_DE_CFG,
+ value | AMD64_DE_CFG_LFENCE_SERIALISE) ||
+ rdmsr_safe(MSR_AMD64_DE_CFG, value) ||
+ !(value & AMD64_DE_CFG_LFENCE_SERIALISE))
+ /* Attempt to set failed. Assume the safer default. */
+ __clear_bit(X86_FEATURE_LFENCE_DISPATCH,
+ c->x86_capability);
+ else
+ /* Successfully enabled! */
+ __set_bit(X86_FEATURE_LFENCE_DISPATCH,
+ c->x86_capability);
+ }
+
+ /*
+ * If the user has explicitly chosen to disable Memory Disambiguation
+ * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
+ */
+ if (opt_ssbd)
+ {
+ int bit = -1;
+
+ switch (c->x86)
+ {
+ case 0x15:
+ bit = 54;
+ break;
+ case 0x16:
+ bit = 33;
+ break;
+ case 0x17:
+ bit = 10;
+ break;
+ }
- /*
- * The way access filter has a performance penalty on some workloads.
- * Disable it on the affected CPUs.
- */
- if (c->x86 == 0x15 && c->x86_model >= 0x02 && c->x86_model < 0x20 &&
- !rdmsr_safe(MSR_AMD64_IC_CFG, value) && (value & 0x1e) != 0x1e)
- wrmsr_safe(MSR_AMD64_IC_CFG, value | 0x1e);
-
- amd_get_topology(c);
-
- /* Pointless to use MWAIT on Family10 as it does not deep sleep. */
- if (c->x86 == 0x10)
- __clear_bit(X86_FEATURE_MONITOR, c->x86_capability);
-
- if (!cpu_has_amd_erratum(c, AMD_ERRATUM_121))
- opt_allow_unsafe = 1;
- else if (opt_allow_unsafe < 0)
- panic("Xen will not boot on this CPU for security reasons"
- "Pass \"allow_unsafe\" if you're trusting all your"
- " (PV) guest kernels.\n");
- else if (!opt_allow_unsafe && c == &boot_cpu_data)
- printk(KERN_WARNING
- "*** Xen will not allow creation of DomU-s on"
- " this CPU for security reasons. ***\n"
- KERN_WARNING
- "*** Pass \"allow_unsafe\" if you're trusting"
- " all your (PV) guest kernels. ***\n");
-
- if (c->x86 == 0x16 && c->x86_model <= 0xf) {
- if (c == &boot_cpu_data) {
- l = pci_conf_read32(0, 0, 0x18, 0x3, 0x58);
- h = pci_conf_read32(0, 0, 0x18, 0x3, 0x5c);
- if ((l & 0x1f) | (h & 0x1))
- printk(KERN_WARNING
- "Applying workaround for erratum 792: %s%s%s\n",
- (l & 0x1f) ? "clearing D18F3x58[4:0]" : "",
- ((l & 0x1f) && (h & 0x1)) ? " and " : "",
- (h & 0x1) ? "clearing D18F3x5C[0]" : "");
-
- if (l & 0x1f)
- pci_conf_write32(0, 0, 0x18, 0x3, 0x58,
- l & ~0x1f);
-
- if (h & 0x1)
- pci_conf_write32(0, 0, 0x18, 0x3, 0x5c,
- h & ~0x1);
- }
-
- rdmsrl(MSR_AMD64_LS_CFG, value);
- if (!(value & (1 << 15))) {
- static bool_t warned;
-
- if (c == &boot_cpu_data || opt_cpu_info ||
- !test_and_set_bool(warned))
- printk(KERN_WARNING
- "CPU%u: Applying workaround for erratum 793\n",
- smp_processor_id());
- wrmsrl(MSR_AMD64_LS_CFG, value | (1 << 15));
- }
- } else if (c->x86 == 0x12) {
- rdmsrl(MSR_AMD64_DE_CFG, value);
- if (!(value & (1U << 31))) {
- static bool warned;
-
- if (c == &boot_cpu_data || opt_cpu_info ||
- !test_and_set_bool(warned))
- printk(KERN_WARNING
- "CPU%u: Applying workaround for erratum 665\n",
- smp_processor_id());
- wrmsrl(MSR_AMD64_DE_CFG, value | (1U << 31));
- }
- }
+ if (bit >= 0 && !rdmsr_safe(MSR_AMD64_LS_CFG, value))
+ {
+ value |= 1ull << bit;
+ wrmsr_safe(MSR_AMD64_LS_CFG, value);
+ }
+ }
+
+ /* MFENCE stops RDTSC speculation */
+ if (!cpu_has_lfence_dispatch)
+ __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
+
+ switch (c->x86)
+ {
+ case 0xf ... 0x11:
+ disable_c1e(NULL);
+ if (acpi_smi_cmd && (acpi_enable_value | acpi_disable_value))
+ amd_acpi_c1e_quirk = true;
+ break;
+ }
+
+ display_cacheinfo(c);
+
+ if (c->extended_cpuid_level >= 0x80000008)
+ c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+
+ if (c->extended_cpuid_level >= 0x80000007)
+ {
+ if (cpu_has(c, X86_FEATURE_ITSC))
+ {
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+ if (c->x86 != 0x11)
+ __set_bit(X86_FEATURE_TSC_RELIABLE,
+ c->x86_capability);
+ }
+ }
+
+ /* re-enable TopologyExtensions if switched off by BIOS */
+ if ((c->x86 == 0x15) &&
+ (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+ !cpu_has(c, X86_FEATURE_TOPOEXT) &&
+ !rdmsr_safe(MSR_K8_EXT_FEATURE_MASK, value))
+ {
+ value |= 1ULL << 54;
+ wrmsr_safe(MSR_K8_EXT_FEATURE_MASK, value);
+ rdmsrl(MSR_K8_EXT_FEATURE_MASK, value);
+ if (value & (1ULL << 54))
+ {
+ __set_bit(X86_FEATURE_TOPOEXT, c->x86_capability);
+ printk(KERN_INFO "CPU: Re-enabling disabled "
+ "Topology Extensions Support\n");
+ }
+ }
+
+ /*
+ * The way access filter has a performance penalty on some workloads.
+ * Disable it on the affected CPUs.
+ */
+ if (c->x86 == 0x15 && c->x86_model >= 0x02 && c->x86_model < 0x20 &&
+ !rdmsr_safe(MSR_AMD64_IC_CFG, value) && (value & 0x1e) != 0x1e)
+ wrmsr_safe(MSR_AMD64_IC_CFG, value | 0x1e);
+
+ amd_get_topology(c);
+
+ /* Pointless to use MWAIT on Family10 as it does not deep sleep. */
+ if (c->x86 == 0x10)
+ __clear_bit(X86_FEATURE_MONITOR, c->x86_capability);
+
+ if (!cpu_has_amd_erratum(c, AMD_ERRATUM_121))
+ opt_allow_unsafe = 1;
+ else if (opt_allow_unsafe < 0)
+ panic("Xen will not boot on this CPU for security reasons"
+ "Pass \"allow_unsafe\" if you're trusting all your"
+ " (PV) guest kernels.\n");
+ else if (!opt_allow_unsafe && c == &boot_cpu_data)
+ printk(KERN_WARNING
+ "*** Xen will not allow creation of DomU-s on"
+ " this CPU for security reasons. ***\n"
+ KERN_WARNING
+ "*** Pass \"allow_unsafe\" if you're trusting"
+ " all your (PV) guest kernels. ***\n");
+
+ if (c->x86 == 0x16 && c->x86_model <= 0xf)
+ {
+ if (c == &boot_cpu_data)
+ {
+ l = pci_conf_read32(0, 0, 0x18, 0x3, 0x58);
+ h = pci_conf_read32(0, 0, 0x18, 0x3, 0x5c);
+ if ((l & 0x1f) | (h & 0x1))
+ printk(KERN_WARNING
+ "Applying workaround for erratum 792: %s%s%s\n",
+ (l & 0x1f) ? "clearing D18F3x58[4:0]" : "",
+ ((l & 0x1f) && (h & 0x1)) ? " and " : "",
+ (h & 0x1) ? "clearing D18F3x5C[0]" : "");
+
+ if (l & 0x1f)
+ pci_conf_write32(0, 0, 0x18, 0x3, 0x58,
+ l & ~0x1f);
+
+ if (h & 0x1)
+ pci_conf_write32(0, 0, 0x18, 0x3, 0x5c,
+ h & ~0x1);
+ }
- /* AMD CPUs do not support SYSENTER outside of legacy mode. */
- __clear_bit(X86_FEATURE_SEP, c->x86_capability);
-
- if (c->x86 == 0x10) {
- /* do this for boot cpu */
- if (c == &boot_cpu_data)
- check_enable_amd_mmconf_dmi();
-
- fam10h_check_enable_mmcfg();
-
- /*
- * On family 10h BIOS may not have properly enabled WC+
- * support, causing it to be converted to CD memtype. This may
- * result in performance degradation for certain nested-paging
- * guests. Prevent this conversion by clearing bit 24 in
- * MSR_F10_BU_CFG2.
- */
- rdmsrl(MSR_F10_BU_CFG2, value);
- value &= ~(1ULL << 24);
- wrmsrl(MSR_F10_BU_CFG2, value);
- }
+ rdmsrl(MSR_AMD64_LS_CFG, value);
+ if (!(value & (1 << 15)))
+ {
+ static bool_t warned;
+
+ if (c == &boot_cpu_data || opt_cpu_info ||
+ !test_and_set_bool(warned))
+ printk(KERN_WARNING
+ "CPU%u: Applying workaround for erratum 793\n",
+ smp_processor_id());
+ wrmsrl(MSR_AMD64_LS_CFG, value | (1 << 15));
+ }
+ }
+ else if (c->x86 == 0x12)
+ {
+ rdmsrl(MSR_AMD64_DE_CFG, value);
+ if (!(value & (1U << 31)))
+ {
+ static bool warned;
+
+ if (c == &boot_cpu_data || opt_cpu_info ||
+ !test_and_set_bool(warned))
+ printk(KERN_WARNING
+ "CPU%u: Applying workaround for erratum 665\n",
+ smp_processor_id());
+ wrmsrl(MSR_AMD64_DE_CFG, value | (1U << 31));
+ }
+ }
- /*
- * Family 0x12 and above processors have APIC timer
- * running in deep C states.
- */
- if ( opt_arat && c->x86 > 0x11 )
- __set_bit(X86_FEATURE_ARAT, c->x86_capability);
-
- /*
- * Prior to Family 0x14, perf counters are not reset during warm reboot.
- * We have to reset them manually.
- */
- if (nmi_watchdog != NMI_LOCAL_APIC && c->x86 < 0x14) {
- wrmsrl(MSR_K7_PERFCTR0, 0);
- wrmsrl(MSR_K7_PERFCTR1, 0);
- wrmsrl(MSR_K7_PERFCTR2, 0);
- wrmsrl(MSR_K7_PERFCTR3, 0);
- }
+ /* AMD CPUs do not support SYSENTER outside of legacy mode. */
+ __clear_bit(X86_FEATURE_SEP, c->x86_capability);
- if (cpu_has(c, X86_FEATURE_EFRO)) {
- rdmsr(MSR_K7_HWCR, l, h);
- l |= (1 << 27); /* Enable read-only APERF/MPERF bit */
- wrmsr(MSR_K7_HWCR, l, h);
- }
+ if (c->x86 == 0x10)
+ {
+ /* do this for boot cpu */
+ if (c == &boot_cpu_data)
+ check_enable_amd_mmconf_dmi();
- /* Prevent TSC drift in non single-processor, single-core platforms. */
- if ((smp_processor_id() == 1) && !cpu_has(c, X86_FEATURE_ITSC))
- disable_c1_ramping();
+ fam10h_check_enable_mmcfg();
- check_syscfg_dram_mod_en();
+ /*
+ * On family 10h BIOS may not have properly enabled WC+
+ * support, causing it to be converted to CD memtype. This may
+ * result in performance degradation for certain nested-paging
+ * guests. Prevent this conversion by clearing bit 24 in
+ * MSR_F10_BU_CFG2.
+ */
+ rdmsrl(MSR_F10_BU_CFG2, value);
+ value &= ~(1ULL << 24);
+ wrmsrl(MSR_F10_BU_CFG2, value);
+ }
+
+ /*
+ * Family 0x12 and above processors have APIC timer
+ * running in deep C states.
+ */
+ if ( opt_arat && c->x86 > 0x11 )
+ __set_bit(X86_FEATURE_ARAT, c->x86_capability);
+
+ /*
+ * Prior to Family 0x14, perf counters are not reset during warm reboot.
+ * We have to reset them manually.
+ */
+ if (nmi_watchdog != NMI_LOCAL_APIC && c->x86 < 0x14)
+ {
+ wrmsrl(MSR_K7_PERFCTR0, 0);
+ wrmsrl(MSR_K7_PERFCTR1, 0);
+ wrmsrl(MSR_K7_PERFCTR2, 0);
+ wrmsrl(MSR_K7_PERFCTR3, 0);
+ }
+
+ if (cpu_has(c, X86_FEATURE_EFRO))
+ {
+ rdmsr(MSR_K7_HWCR, l, h);
+ l |= (1 << 27); /* Enable read-only APERF/MPERF bit */
+ wrmsr(MSR_K7_HWCR, l, h);
+ }
+
+ /* Prevent TSC drift in non single-processor, single-core platforms. */
+ if ((smp_processor_id() == 1) && !cpu_has(c, X86_FEATURE_ITSC))
+ disable_c1_ramping();
+
+ check_syscfg_dram_mod_en();
}
-const struct cpu_dev amd_cpu_dev = {
- .c_early_init = early_init_amd,
- .c_init = init_amd,
+const struct cpu_dev amd_cpu_dev =
+{
+ .c_early_init = early_init_amd,
+ .c_init = init_amd,
};
diff --git a/xen/arch/x86/cpu/centaur.c b/xen/arch/x86/cpu/centaur.c
index 34a5bfcaee..ce6fb03ffb 100644
--- a/xen/arch/x86/cpu/centaur.c
+++ b/xen/arch/x86/cpu/centaur.c
@@ -16,44 +16,49 @@
static void init_c3(struct cpuinfo_x86 *c)
{
- uint64_t msr_content;
-
- /* Test for Centaur Extended Feature Flags presence */
- if (cpuid_eax(0xC0000000) >= 0xC0000001) {
- u32 tmp = cpuid_edx(0xC0000001);
-
- /* enable ACE unit, if present and disabled */
- if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
- rdmsrl(MSR_VIA_FCR, msr_content);
- /* enable ACE unit */
- wrmsrl(MSR_VIA_FCR, msr_content | ACE_FCR);
- printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
- }
-
- /* enable RNG unit, if present and disabled */
- if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
- rdmsrl(MSR_VIA_RNG, msr_content);
- /* enable RNG unit */
- wrmsrl(MSR_VIA_RNG, msr_content | RNG_ENABLE);
- printk(KERN_INFO "CPU: Enabled h/w RNG\n");
- }
- }
-
- if (c->x86 == 0x6 && c->x86_model >= 0xf) {
- c->x86_cache_alignment = c->x86_clflush_size * 2;
- __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- }
-
- get_model_name(c);
- display_cacheinfo(c);
+ uint64_t msr_content;
+
+ /* Test for Centaur Extended Feature Flags presence */
+ if (cpuid_eax(0xC0000000) >= 0xC0000001)
+ {
+ u32 tmp = cpuid_edx(0xC0000001);
+
+ /* enable ACE unit, if present and disabled */
+ if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT)
+ {
+ rdmsrl(MSR_VIA_FCR, msr_content);
+ /* enable ACE unit */
+ wrmsrl(MSR_VIA_FCR, msr_content | ACE_FCR);
+ printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
+ }
+
+ /* enable RNG unit, if present and disabled */
+ if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT)
+ {
+ rdmsrl(MSR_VIA_RNG, msr_content);
+ /* enable RNG unit */
+ wrmsrl(MSR_VIA_RNG, msr_content | RNG_ENABLE);
+ printk(KERN_INFO "CPU: Enabled h/w RNG\n");
+ }
+ }
+
+ if (c->x86 == 0x6 && c->x86_model >= 0xf)
+ {
+ c->x86_cache_alignment = c->x86_clflush_size * 2;
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ }
+
+ get_model_name(c);
+ display_cacheinfo(c);
}
static void init_centaur(struct cpuinfo_x86 *c)
{
- if (c->x86 == 6)
- init_c3(c);
+ if (c->x86 == 6)
+ init_c3(c);
}
-const struct cpu_dev centaur_cpu_dev = {
- .c_init = init_centaur,
+const struct cpu_dev centaur_cpu_dev =
+{
+ .c_init = init_centaur,
};
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 1db96d959c..f159a1c046 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -51,57 +51,60 @@ static unsigned int forced_caps[NCAPINTS];
void __init setup_clear_cpu_cap(unsigned int cap)
{
- const uint32_t *dfs;
- unsigned int i;
-
- if (__test_and_set_bit(cap, cleared_caps))
- return;
-
- if (test_bit(cap, forced_caps))
- printk("%pS clearing previously forced feature %#x\n",
- __builtin_return_address(0), cap);
-
- __clear_bit(cap, boot_cpu_data.x86_capability);
- dfs = x86_cpuid_lookup_deep_deps(cap);
-
- if (!dfs)
- return;
-
- for (i = 0; i < FSCAPINTS; ++i) {
- cleared_caps[i] |= dfs[i];
- boot_cpu_data.x86_capability[i] &= ~dfs[i];
- if (!(forced_caps[i] & dfs[i]))
- continue;
- printk("%pS implicitly clearing previously forced feature(s) %u:%#x\n",
- __builtin_return_address(0),
- i, forced_caps[i] & dfs[i]);
- }
+ const uint32_t *dfs;
+ unsigned int i;
+
+ if (__test_and_set_bit(cap, cleared_caps))
+ return;
+
+ if (test_bit(cap, forced_caps))
+ printk("%pS clearing previously forced feature %#x\n",
+ __builtin_return_address(0), cap);
+
+ __clear_bit(cap, boot_cpu_data.x86_capability);
+ dfs = x86_cpuid_lookup_deep_deps(cap);
+
+ if (!dfs)
+ return;
+
+ for (i = 0; i < FSCAPINTS; ++i)
+ {
+ cleared_caps[i] |= dfs[i];
+ boot_cpu_data.x86_capability[i] &= ~dfs[i];
+ if (!(forced_caps[i] & dfs[i]))
+ continue;
+ printk("%pS implicitly clearing previously forced feature(s) %u:%#x\n",
+ __builtin_return_address(0),
+ i, forced_caps[i] & dfs[i]);
+ }
}
void __init setup_force_cpu_cap(unsigned int cap)
{
- if (__test_and_set_bit(cap, forced_caps))
- return;
+ if (__test_and_set_bit(cap, forced_caps))
+ return;
- if (test_bit(cap, cleared_caps)) {
- printk("%pS tries to force previously cleared feature %#x\n",
- __builtin_return_address(0), cap);
- return;
- }
+ if (test_bit(cap, cleared_caps))
+ {
+ printk("%pS tries to force previously cleared feature %#x\n",
+ __builtin_return_address(0), cap);
+ return;
+ }
- __set_bit(cap, boot_cpu_data.x86_capability);
+ __set_bit(cap, boot_cpu_data.x86_capability);
}
-static void default_init(struct cpuinfo_x86 * c)
+static void default_init(struct cpuinfo_x86 *c)
{
- /* Not much we can do here... */
- /* Check if at least it has cpuid */
- BUG_ON(c->cpuid_level == -1);
- __clear_bit(X86_FEATURE_SEP, c->x86_capability);
+ /* Not much we can do here... */
+ /* Check if at least it has cpuid */
+ BUG_ON(c->cpuid_level == -1);
+ __clear_bit(X86_FEATURE_SEP, c->x86_capability);
}
-static const struct cpu_dev default_cpu = {
- .c_init = default_init,
+static const struct cpu_dev default_cpu =
+{
+ .c_init = default_init,
};
static const struct cpu_dev *this_cpu = &default_cpu;
@@ -110,87 +113,88 @@ void (* __read_mostly ctxt_switch_masking)(const struct vcpu *next);
bool __init probe_cpuid_faulting(void)
{
- uint64_t val;
- int rc;
-
- /*
- * Don't bother looking for CPUID faulting if we aren't virtualised on
- * AMD or Hygon hardware - it won't be present.
- */
- if ((boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
- !cpu_has_hypervisor)
- return false;
-
- if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
- raw_msr_policy.plaform_info.cpuid_faulting =
- val & MSR_PLATFORM_INFO_CPUID_FAULTING;
-
- if (rc ||
- !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
- rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES,
- this_cpu(msr_misc_features)))
- {
- setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING);
- return false;
- }
-
- expected_levelling_cap |= LCAP_faulting;
- levelling_caps |= LCAP_faulting;
- setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
-
- return true;
+ uint64_t val;
+ int rc;
+
+ /*
+ * Don't bother looking for CPUID faulting if we aren't virtualised on
+ * AMD or Hygon hardware - it won't be present.
+ */
+ if ((boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+ !cpu_has_hypervisor)
+ return false;
+
+ if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
+ raw_msr_policy.plaform_info.cpuid_faulting =
+ val & MSR_PLATFORM_INFO_CPUID_FAULTING;
+
+ if (rc ||
+ !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
+ rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES,
+ this_cpu(msr_misc_features)))
+ {
+ setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING);
+ return false;
+ }
+
+ expected_levelling_cap |= LCAP_faulting;
+ levelling_caps |= LCAP_faulting;
+ setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
+
+ return true;
}
static void set_cpuid_faulting(bool enable)
{
- uint64_t *this_misc_features = &this_cpu(msr_misc_features);
- uint64_t val = *this_misc_features;
+ uint64_t *this_misc_features = &this_cpu(msr_misc_features);
+ uint64_t val = *this_misc_features;
- if (!!(val & MSR_MISC_FEATURES_CPUID_FAULTING) == enable)
- return;
+ if (!!(val & MSR_MISC_FEATURES_CPUID_FAULTING) == enable)
+ return;
- val ^= MSR_MISC_FEATURES_CPUID_FAULTING;
+ val ^= MSR_MISC_FEATURES_CPUID_FAULTING;
- wrmsrl(MSR_INTEL_MISC_FEATURES_ENABLES, val);
- *this_misc_features = val;
+ wrmsrl(MSR_INTEL_MISC_FEATURES_ENABLES, val);
+ *this_misc_features = val;
}
void ctxt_switch_levelling(const struct vcpu *next)
{
- const struct domain *nextd = next ? next->domain : NULL;
-
- if (cpu_has_cpuid_faulting) {
- /*
- * No need to alter the faulting setting if we are switching
- * to idle; it won't affect any code running in idle context.
- */
- if (nextd && is_idle_domain(nextd))
- return;
- /*
- * We *should* be enabling faulting for the control domain.
- *
- * Unfortunately, the domain builder (having only ever been a
- * PV guest) expects to be able to see host cpuid state in a
- * native CPUID instruction, to correctly build a CPUID policy
- * for HVM guests (notably the xstate leaves).
- *
- * This logic is fundimentally broken for HVM toolstack
- * domains, and faulting causes PV guests to behave like HVM
- * guests from their point of view.
- *
- * Future development plans will move responsibility for
- * generating the maximum full cpuid policy into Xen, at which
- * this problem will disappear.
- */
- set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
- (is_pv_domain(nextd) ||
- next->arch.msrs->
- misc_features_enables.cpuid_faulting));
- return;
- }
-
- if (ctxt_switch_masking)
- alternative_vcall(ctxt_switch_masking, next);
+ const struct domain *nextd = next ? next->domain : NULL;
+
+ if (cpu_has_cpuid_faulting)
+ {
+ /*
+ * No need to alter the faulting setting if we are switching
+ * to idle; it won't affect any code running in idle context.
+ */
+ if (nextd && is_idle_domain(nextd))
+ return;
+ /*
+ * We *should* be enabling faulting for the control domain.
+ *
+ * Unfortunately, the domain builder (having only ever been a
+ * PV guest) expects to be able to see host cpuid state in a
+ * native CPUID instruction, to correctly build a CPUID policy
+ * for HVM guests (notably the xstate leaves).
+ *
+ * This logic is fundimentally broken for HVM toolstack
+ * domains, and faulting causes PV guests to behave like HVM
+ * guests from their point of view.
+ *
+ * Future development plans will move responsibility for
+ * generating the maximum full cpuid policy into Xen, at which
+ * this problem will disappear.
+ */
+ set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
+ (is_pv_domain(nextd) ||
+ next->arch.msrs->
+ misc_features_enables.cpuid_faulting));
+ return;
+ }
+
+ if (ctxt_switch_masking)
+ alternative_vcall(ctxt_switch_masking, next);
}
bool_t opt_cpu_info;
@@ -198,63 +202,65 @@ boolean_param("cpuinfo", opt_cpu_info);
int get_model_name(struct cpuinfo_x86 *c)
{
- unsigned int *v;
- char *p, *q;
-
- if (c->extended_cpuid_level < 0x80000004)
- return 0;
-
- v = (unsigned int *) c->x86_model_id;
- cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
- cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
- cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
- c->x86_model_id[48] = 0;
-
- /* Intel chips right-justify this string for some dumb reason;
- undo that brain damage */
- p = q = &c->x86_model_id[0];
- while ( *p == ' ' )
- p++;
- if ( p != q ) {
- while ( *p )
- *q++ = *p++;
- while ( q <= &c->x86_model_id[48] )
- *q++ = '\0'; /* Zero-pad the rest */
- }
-
- return 1;
+ unsigned int *v;
+ char *p, *q;
+
+ if (c->extended_cpuid_level < 0x80000004)
+ return 0;
+
+ v = (unsigned int *) c->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ c->x86_model_id[48] = 0;
+
+ /* Intel chips right-justify this string for some dumb reason;
+ undo that brain damage */
+ p = q = &c->x86_model_id[0];
+ while ( *p == ' ' )
+ p++;
+ if ( p != q )
+ {
+ while ( *p )
+ *q++ = *p++;
+ while ( q <= &c->x86_model_id[48] )
+ *q++ = '\0'; /* Zero-pad the rest */
+ }
+
+ return 1;
}
void display_cacheinfo(struct cpuinfo_x86 *c)
{
- unsigned int dummy, ecx, edx, l2size;
-
- if (c->extended_cpuid_level >= 0x80000005) {
- cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
- if (opt_cpu_info)
- printk("CPU: L1 I cache %dK (%d bytes/line),"
- " D cache %dK (%d bytes/line)\n",
- edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
- c->x86_cache_size=(ecx>>24)+(edx>>24);
- }
-
- if (c->extended_cpuid_level < 0x80000006) /* Some chips just has a large L1. */
- return;
-
- ecx = cpuid_ecx(0x80000006);
- l2size = ecx >> 16;
-
- c->x86_cache_size = l2size;
-
- if (opt_cpu_info)
- printk("CPU: L2 Cache: %dK (%d bytes/line)\n",
- l2size, ecx & 0xFF);
+ unsigned int dummy, ecx, edx, l2size;
+
+ if (c->extended_cpuid_level >= 0x80000005)
+ {
+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+ if (opt_cpu_info)
+ printk("CPU: L1 I cache %dK (%d bytes/line),"
+ " D cache %dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ c->x86_cache_size=(ecx>>24)+(edx>>24);
+ }
+
+ if (c->extended_cpuid_level < 0x80000006) /* Some chips just has a large L1. */
+ return;
+
+ ecx = cpuid_ecx(0x80000006);
+ l2size = ecx >> 16;
+
+ c->x86_cache_size = l2size;
+
+ if (opt_cpu_info)
+ printk("CPU: L2 Cache: %dK (%d bytes/line)\n",
+ l2size, ecx & 0xFF);
}
static inline u32 _phys_pkg_id(u32 cpuid_apic, int index_msb)
{
- return cpuid_apic >> index_msb;
+ return cpuid_apic >> index_msb;
}
/*
@@ -266,7 +272,7 @@ static inline u32 _phys_pkg_id(u32 cpuid_apic, int index_msb)
*/
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
- return _phys_pkg_id(get_apic_id(), index_msb);
+ return _phys_pkg_id(get_apic_id(), index_msb);
}
/* Do minimum CPU detection early.
@@ -277,144 +283,157 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
that is supposed to run on all CPUs. */
void __init early_cpu_init(void)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
- u32 eax, ebx, ecx, edx;
-
- c->x86_cache_alignment = 32;
-
- /* Get vendor name */
- cpuid(0x00000000, &c->cpuid_level, &ebx, &ecx, &edx);
- *(u32 *)&c->x86_vendor_id[0] = ebx;
- *(u32 *)&c->x86_vendor_id[8] = ecx;
- *(u32 *)&c->x86_vendor_id[4] = edx;
-
- c->x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
- switch (c->x86_vendor) {
- case X86_VENDOR_INTEL: this_cpu = &intel_cpu_dev; break;
- case X86_VENDOR_AMD: this_cpu = &amd_cpu_dev; break;
- case X86_VENDOR_CENTAUR: this_cpu = &centaur_cpu_dev; break;
- case X86_VENDOR_SHANGHAI: this_cpu = &shanghai_cpu_dev; break;
- case X86_VENDOR_HYGON: this_cpu = &hygon_cpu_dev; break;
- default:
- printk(XENLOG_ERR
- "Unrecognised or unsupported CPU vendor '%.12s'\n",
- c->x86_vendor_id);
- }
-
- cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
- c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask);
-
- edx &= ~cleared_caps[cpufeat_word(X86_FEATURE_FPU)];
- ecx &= ~cleared_caps[cpufeat_word(X86_FEATURE_SSE3)];
- if (edx & cpufeat_mask(X86_FEATURE_CLFLUSH))
- c->x86_cache_alignment = ((ebx >> 8) & 0xff) * 8;
- /* Leaf 0x1 capabilities filled in early for Xen. */
- c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx;
- c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx;
-
- printk(XENLOG_INFO
- "CPU Vendor: %s, Family %u (%#x), Model %u (%#x), Stepping %u (raw %08x)\n",
- x86_cpuid_vendor_to_str(c->x86_vendor), c->x86, c->x86,
- c->x86_model, c->x86_model, c->x86_mask, eax);
-
- eax = cpuid_eax(0x80000000);
- if ((eax >> 16) == 0x8000 && eax >= 0x80000008) {
- eax = cpuid_eax(0x80000008);
- paddr_bits = eax & 0xff;
- if (paddr_bits > PADDR_BITS)
- paddr_bits = PADDR_BITS;
- vaddr_bits = (eax >> 8) & 0xff;
- if (vaddr_bits > VADDR_BITS)
- vaddr_bits = VADDR_BITS;
- hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits;
- if (hap_paddr_bits > PADDR_BITS)
- hap_paddr_bits = PADDR_BITS;
- }
-
- if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
- park_offline_cpus = opt_mce;
-
- initialize_cpu_data(0);
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ u32 eax, ebx, ecx, edx;
+
+ c->x86_cache_alignment = 32;
+
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level, &ebx, &ecx, &edx);
+ *(u32 *)&c->x86_vendor_id[0] = ebx;
+ *(u32 *)&c->x86_vendor_id[8] = ecx;
+ *(u32 *)&c->x86_vendor_id[4] = edx;
+
+ c->x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
+ switch (c->x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ this_cpu = &intel_cpu_dev;
+ break;
+ case X86_VENDOR_AMD:
+ this_cpu = &amd_cpu_dev;
+ break;
+ case X86_VENDOR_CENTAUR:
+ this_cpu = &centaur_cpu_dev;
+ break;
+ case X86_VENDOR_SHANGHAI:
+ this_cpu = &shanghai_cpu_dev;
+ break;
+ case X86_VENDOR_HYGON:
+ this_cpu = &hygon_cpu_dev;
+ break;
+ default:
+ printk(XENLOG_ERR
+ "Unrecognised or unsupported CPU vendor '%.12s'\n",
+ c->x86_vendor_id);
+ }
+
+ cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
+ c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask);
+
+ edx &= ~cleared_caps[cpufeat_word(X86_FEATURE_FPU)];
+ ecx &= ~cleared_caps[cpufeat_word(X86_FEATURE_SSE3)];
+ if (edx & cpufeat_mask(X86_FEATURE_CLFLUSH))
+ c->x86_cache_alignment = ((ebx >> 8) & 0xff) * 8;
+ /* Leaf 0x1 capabilities filled in early for Xen. */
+ c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx;
+ c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx;
+
+ printk(XENLOG_INFO
+ "CPU Vendor: %s, Family %u (%#x), Model %u (%#x), Stepping %u (raw %08x)\n",
+ x86_cpuid_vendor_to_str(c->x86_vendor), c->x86, c->x86,
+ c->x86_model, c->x86_model, c->x86_mask, eax);
+
+ eax = cpuid_eax(0x80000000);
+ if ((eax >> 16) == 0x8000 && eax >= 0x80000008)
+ {
+ eax = cpuid_eax(0x80000008);
+ paddr_bits = eax & 0xff;
+ if (paddr_bits > PADDR_BITS)
+ paddr_bits = PADDR_BITS;
+ vaddr_bits = (eax >> 8) & 0xff;
+ if (vaddr_bits > VADDR_BITS)
+ vaddr_bits = VADDR_BITS;
+ hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits;
+ if (hap_paddr_bits > PADDR_BITS)
+ hap_paddr_bits = PADDR_BITS;
+ }
+
+ if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+ park_offline_cpus = opt_mce;
+
+ initialize_cpu_data(0);
}
static void generic_identify(struct cpuinfo_x86 *c)
{
- u32 eax, ebx, ecx, edx, tmp;
-
- /* Get vendor name */
- cpuid(0x00000000, &c->cpuid_level, &ebx, &ecx, &edx);
- *(u32 *)&c->x86_vendor_id[0] = ebx;
- *(u32 *)&c->x86_vendor_id[8] = ecx;
- *(u32 *)&c->x86_vendor_id[4] = edx;
-
- c->x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
- if (boot_cpu_data.x86_vendor != c->x86_vendor)
- printk(XENLOG_ERR "CPU%u vendor %u mismatch against BSP %u\n",
- smp_processor_id(), c->x86_vendor,
- boot_cpu_data.x86_vendor);
-
- /* Initialize the standard set of capabilities */
- /* Note that the vendor-specific code below might override */
-
- /* Model and family information. */
- cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
- c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask);
- c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
- c->phys_proc_id = c->apicid;
-
- if (this_cpu->c_early_init)
- this_cpu->c_early_init(c);
-
- /* c_early_init() may have adjusted cpuid levels/features. Reread. */
- c->cpuid_level = cpuid_eax(0);
- cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
- c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx;
- c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx;
-
- if ( cpu_has(c, X86_FEATURE_CLFLUSH) )
- c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
-
- if ( (c->cpuid_level >= CPUID_PM_LEAF) &&
- (cpuid_ecx(CPUID_PM_LEAF) & CPUID6_ECX_APERFMPERF_CAPABILITY) )
- set_bit(X86_FEATURE_APERFMPERF, c->x86_capability);
-
- /* AMD-defined flags: level 0x80000001 */
- c->extended_cpuid_level = cpuid_eax(0x80000000);
- if ((c->extended_cpuid_level >> 16) != 0x8000)
- c->extended_cpuid_level = 0;
- if (c->extended_cpuid_level > 0x80000000)
- cpuid(0x80000001, &tmp, &tmp,
- &c->x86_capability[cpufeat_word(X86_FEATURE_LAHF_LM)],
- &c->x86_capability[cpufeat_word(X86_FEATURE_SYSCALL)]);
- if (c == &boot_cpu_data)
- bootsym(cpuid_ext_features) =
- c->x86_capability[cpufeat_word(X86_FEATURE_NX)];
-
- if (c->extended_cpuid_level >= 0x80000004)
- get_model_name(c); /* Default name */
- if (c->extended_cpuid_level >= 0x80000007)
- c->x86_capability[cpufeat_word(X86_FEATURE_ITSC)]
- = cpuid_edx(0x80000007);
- if (c->extended_cpuid_level >= 0x80000008)
- c->x86_capability[cpufeat_word(X86_FEATURE_CLZERO)]
- = cpuid_ebx(0x80000008);
-
- /* Intel-defined flags: level 0x00000007 */
- if ( c->cpuid_level >= 0x00000007 ) {
- cpuid_count(0x00000007, 0, &eax,
- &c->x86_capability[cpufeat_word(X86_FEATURE_FSGSBASE)],
- &c->x86_capability[cpufeat_word(X86_FEATURE_PKU)],
- &c->x86_capability[cpufeat_word(X86_FEATURE_AVX512_4VNNIW)]);
- if (eax > 0)
- cpuid_count(0x00000007, 1,
- &c->x86_capability[cpufeat_word(X86_FEATURE_AVX512_BF16)],
- &tmp, &tmp, &tmp);
- }
-
- if (c->cpuid_level >= 0xd)
- cpuid_count(0xd, 1,
- &c->x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)],
- &tmp, &tmp, &tmp);
+ u32 eax, ebx, ecx, edx, tmp;
+
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level, &ebx, &ecx, &edx);
+ *(u32 *)&c->x86_vendor_id[0] = ebx;
+ *(u32 *)&c->x86_vendor_id[8] = ecx;
+ *(u32 *)&c->x86_vendor_id[4] = edx;
+
+ c->x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
+ if (boot_cpu_data.x86_vendor != c->x86_vendor)
+ printk(XENLOG_ERR "CPU%u vendor %u mismatch against BSP %u\n",
+ smp_processor_id(), c->x86_vendor,
+ boot_cpu_data.x86_vendor);
+
+ /* Initialize the standard set of capabilities */
+ /* Note that the vendor-specific code below might override */
+
+ /* Model and family information. */
+ cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
+ c->x86 = get_cpu_family(eax, &c->x86_model, &c->x86_mask);
+ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
+ c->phys_proc_id = c->apicid;
+
+ if (this_cpu->c_early_init)
+ this_cpu->c_early_init(c);
+
+ /* c_early_init() may have adjusted cpuid levels/features. Reread. */
+ c->cpuid_level = cpuid_eax(0);
+ cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
+ c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx;
+ c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx;
+
+ if ( cpu_has(c, X86_FEATURE_CLFLUSH) )
+ c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
+
+ if ( (c->cpuid_level >= CPUID_PM_LEAF) &&
+ (cpuid_ecx(CPUID_PM_LEAF) & CPUID6_ECX_APERFMPERF_CAPABILITY) )
+ set_bit(X86_FEATURE_APERFMPERF, c->x86_capability);
+
+ /* AMD-defined flags: level 0x80000001 */
+ c->extended_cpuid_level = cpuid_eax(0x80000000);
+ if ((c->extended_cpuid_level >> 16) != 0x8000)
+ c->extended_cpuid_level = 0;
+ if (c->extended_cpuid_level > 0x80000000)
+ cpuid(0x80000001, &tmp, &tmp,
+ &c->x86_capability[cpufeat_word(X86_FEATURE_LAHF_LM)],
+ &c->x86_capability[cpufeat_word(X86_FEATURE_SYSCALL)]);
+ if (c == &boot_cpu_data)
+ bootsym(cpuid_ext_features) =
+ c->x86_capability[cpufeat_word(X86_FEATURE_NX)];
+
+ if (c->extended_cpuid_level >= 0x80000004)
+ get_model_name(c); /* Default name */
+ if (c->extended_cpuid_level >= 0x80000007)
+ c->x86_capability[cpufeat_word(X86_FEATURE_ITSC)]
+ = cpuid_edx(0x80000007);
+ if (c->extended_cpuid_level >= 0x80000008)
+ c->x86_capability[cpufeat_word(X86_FEATURE_CLZERO)]
+ = cpuid_ebx(0x80000008);
+
+ /* Intel-defined flags: level 0x00000007 */
+ if ( c->cpuid_level >= 0x00000007 )
+ {
+ cpuid_count(0x00000007, 0, &eax,
+ &c->x86_capability[cpufeat_word(X86_FEATURE_FSGSBASE)],
+ &c->x86_capability[cpufeat_word(X86_FEATURE_PKU)],
+ &c->x86_capability[cpufeat_word(X86_FEATURE_AVX512_4VNNIW)]);
+ if (eax > 0)
+ cpuid_count(0x00000007, 1,
+ &c->x86_capability[cpufeat_word(X86_FEATURE_AVX512_BF16)],
+ &tmp, &tmp, &tmp);
+ }
+
+ if (c->cpuid_level >= 0xd)
+ cpuid_count(0xd, 1,
+ &c->x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)],
+ &tmp, &tmp, &tmp);
}
/*
@@ -422,99 +441,104 @@ static void generic_identify(struct cpuinfo_x86 *c)
*/
void identify_cpu(struct cpuinfo_x86 *c)
{
- int i;
-
- c->x86_cache_size = -1;
- c->x86_vendor = X86_VENDOR_UNKNOWN;
- c->cpuid_level = -1; /* CPUID not detected */
- c->x86_model = c->x86_mask = 0; /* So far unknown... */
- c->x86_vendor_id[0] = '\0'; /* Unset */
- c->x86_model_id[0] = '\0'; /* Unset */
- c->x86_max_cores = 1;
- c->x86_num_siblings = 1;
- c->x86_clflush_size = 0;
- c->phys_proc_id = XEN_INVALID_SOCKET_ID;
- c->cpu_core_id = XEN_INVALID_CORE_ID;
- c->compute_unit_id = INVALID_CUID;
- memset(&c->x86_capability, 0, sizeof c->x86_capability);
-
- generic_identify(c);
+ int i;
+
+ c->x86_cache_size = -1;
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ c->cpuid_level = -1; /* CPUID not detected */
+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ c->x86_max_cores = 1;
+ c->x86_num_siblings = 1;
+ c->x86_clflush_size = 0;
+ c->phys_proc_id = XEN_INVALID_SOCKET_ID;
+ c->cpu_core_id = XEN_INVALID_CORE_ID;
+ c->compute_unit_id = INVALID_CUID;
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+ generic_identify(c);
#ifdef NOISY_CAPS
- printk(KERN_DEBUG "CPU: After vendor identify, caps:");
- for (i = 0; i < NCAPINTS; i++)
- printk(" %08x", c->x86_capability[i]);
- printk("\n");
+ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08x", c->x86_capability[i]);
+ printk("\n");
#endif
- /*
- * Vendor-specific initialization. In this section we
- * canonicalize the feature flags, meaning if there are
- * features a certain CPU supports which CPUID doesn't
- * tell us, CPUID claiming incorrect flags, or other bugs,
- * we handle them here.
- *
- * At the end of this section, c->x86_capability better
- * indicate the features this CPU genuinely supports!
- */
- if (this_cpu->c_init)
- this_cpu->c_init(c);
-
-
- if ( !opt_pku )
- setup_clear_cpu_cap(X86_FEATURE_PKU);
-
- /*
- * The vendor-specific functions might have changed features. Now
- * we do "generic changes."
- */
- for (i = 0; i < FSCAPINTS; ++i)
- c->x86_capability[i] &= known_features[i];
-
- for (i = 0 ; i < NCAPINTS ; ++i) {
- c->x86_capability[i] |= forced_caps[i];
- c->x86_capability[i] &= ~cleared_caps[i];
- }
-
- /* If the model name is still unset, do table lookup. */
- if ( !c->x86_model_id[0] ) {
- /* Last resort... */
- snprintf(c->x86_model_id, sizeof(c->x86_model_id),
- "%02x/%02x", c->x86_vendor, c->x86_model);
- }
-
- /* Now the feature flags better reflect actual CPU features! */
-
- if ( cpu_has_xsave )
- xstate_init(c);
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+ * features a certain CPU supports which CPUID doesn't
+ * tell us, CPUID claiming incorrect flags, or other bugs,
+ * we handle them here.
+ *
+ * At the end of this section, c->x86_capability better
+ * indicate the features this CPU genuinely supports!
+ */
+ if (this_cpu->c_init)
+ this_cpu->c_init(c);
+
+
+ if ( !opt_pku )
+ setup_clear_cpu_cap(X86_FEATURE_PKU);
+
+ /*
+ * The vendor-specific functions might have changed features. Now
+ * we do "generic changes."
+ */
+ for (i = 0; i < FSCAPINTS; ++i)
+ c->x86_capability[i] &= known_features[i];
+
+ for (i = 0 ; i < NCAPINTS ; ++i)
+ {
+ c->x86_capability[i] |= forced_caps[i];
+ c->x86_capability[i] &= ~cleared_caps[i];
+ }
+
+ /* If the model name is still unset, do table lookup. */
+ if ( !c->x86_model_id[0] )
+ {
+ /* Last resort... */
+ snprintf(c->x86_model_id, sizeof(c->x86_model_id),
+ "%02x/%02x", c->x86_vendor, c->x86_model);
+ }
+
+ /* Now the feature flags better reflect actual CPU features! */
+
+ if ( cpu_has_xsave )
+ xstate_init(c);
#ifdef NOISY_CAPS
- printk(KERN_DEBUG "CPU: After all inits, caps:");
- for (i = 0; i < NCAPINTS; i++)
- printk(" %08x", c->x86_capability[i]);
- printk("\n");
+ printk(KERN_DEBUG "CPU: After all inits, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08x", c->x86_capability[i]);
+ printk("\n");
#endif
- if (system_state == SYS_STATE_resume)
- return;
-
- /*
- * On SMP, boot_cpu_data holds the common feature set between
- * all CPUs; so make sure that we indicate which features are
- * common between the CPUs. The first time this routine gets
- * executed, c == &boot_cpu_data.
- */
- if ( c != &boot_cpu_data ) {
- /* AND the already accumulated flags with these */
- for ( i = 0 ; i < NCAPINTS ; i++ )
- boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-
- mcheck_init(c, false);
- } else {
- mcheck_init(c, true);
-
- mtrr_bp_init();
- }
+ if (system_state == SYS_STATE_resume)
+ return;
+
+ /*
+ * On SMP, boot_cpu_data holds the common feature set between
+ * all CPUs; so make sure that we indicate which features are
+ * common between the CPUs. The first time this routine gets
+ * executed, c == &boot_cpu_data.
+ */
+ if ( c != &boot_cpu_data )
+ {
+ /* AND the already accumulated flags with these */
+ for ( i = 0 ; i < NCAPINTS ; i++ )
+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+
+ mcheck_init(c, false);
+ }
+ else
+ {
+ mcheck_init(c, true);
+
+ mtrr_bp_init();
+ }
}
/* leaf 0xb SMT level */
@@ -535,158 +559,166 @@ void identify_cpu(struct cpuinfo_x86 *c)
*/
bool detect_extended_topology(struct cpuinfo_x86 *c)
{
- unsigned int eax, ebx, ecx, edx, sub_index;
- unsigned int ht_mask_width, core_plus_mask_width;
- unsigned int core_select_mask, core_level_siblings;
- unsigned int initial_apicid;
+ unsigned int eax, ebx, ecx, edx, sub_index;
+ unsigned int ht_mask_width, core_plus_mask_width;
+ unsigned int core_select_mask, core_level_siblings;
+ unsigned int initial_apicid;
- if ( c->cpuid_level < 0xb )
- return false;
+ if ( c->cpuid_level < 0xb )
+ return false;
- cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
- /* Check if the cpuid leaf 0xb is actually implemented */
- if ( ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE) )
- return false;
+ /* Check if the cpuid leaf 0xb is actually implemented */
+ if ( ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE) )
+ return false;
- __set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability);
+ __set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability);
- initial_apicid = edx;
+ initial_apicid = edx;
- /* Populate HT related information from sub-leaf level 0 */
- core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
- core_level_siblings = c->x86_num_siblings = 1u << ht_mask_width;
+ /* Populate HT related information from sub-leaf level 0 */
+ core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+ core_level_siblings = c->x86_num_siblings = 1u << ht_mask_width;
- sub_index = 1;
- do {
- cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
+ sub_index = 1;
+ do
+ {
+ cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
- /* Check for the Core type in the implemented sub leaves */
- if ( LEAFB_SUBTYPE(ecx) == CORE_TYPE ) {
- core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
- core_level_siblings = 1u << core_plus_mask_width;
- break;
- }
+ /* Check for the Core type in the implemented sub leaves */
+ if ( LEAFB_SUBTYPE(ecx) == CORE_TYPE )
+ {
+ core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+ core_level_siblings = 1u << core_plus_mask_width;
+ break;
+ }
- sub_index++;
- } while ( LEAFB_SUBTYPE(ecx) != INVALID_TYPE );
+ sub_index++;
+ } while ( LEAFB_SUBTYPE(ecx) != INVALID_TYPE );
- core_select_mask = (~(~0u << core_plus_mask_width)) >> ht_mask_width;
+ core_select_mask = (~(~0u << core_plus_mask_width)) >> ht_mask_width;
- c->cpu_core_id = phys_pkg_id(initial_apicid, ht_mask_width)
- & core_select_mask;
- c->phys_proc_id = phys_pkg_id(initial_apicid, core_plus_mask_width);
+ c->cpu_core_id = phys_pkg_id(initial_apicid, ht_mask_width)
+ & core_select_mask;
+ c->phys_proc_id = phys_pkg_id(initial_apicid, core_plus_mask_width);
- c->apicid = phys_pkg_id(initial_apicid, 0);
- c->x86_max_cores = (core_level_siblings / c->x86_num_siblings);
+ c->apicid = phys_pkg_id(initial_apicid, 0);
+ c->x86_max_cores = (core_level_siblings / c->x86_num_siblings);
- if ( opt_cpu_info )
- {
- printk("CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- if ( c->x86_max_cores > 1 )
- printk("CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- }
+ if ( opt_cpu_info )
+ {
+ printk("CPU: Physical Processor ID: %d\n",
+ c->phys_proc_id);
+ if ( c->x86_max_cores > 1 )
+ printk("CPU: Processor Core ID: %d\n",
+ c->cpu_core_id);
+ }
- return true;
+ return true;
}
void detect_ht(struct cpuinfo_x86 *c)
{
- u32 eax, ebx, ecx, edx;
- int index_msb, core_bits;
+ u32 eax, ebx, ecx, edx;
+ int index_msb, core_bits;
- if (!cpu_has(c, X86_FEATURE_HTT) ||
- cpu_has(c, X86_FEATURE_CMP_LEGACY) ||
- cpu_has(c, X86_FEATURE_XTOPOLOGY))
- return;
+ if (!cpu_has(c, X86_FEATURE_HTT) ||
+ cpu_has(c, X86_FEATURE_CMP_LEGACY) ||
+ cpu_has(c, X86_FEATURE_XTOPOLOGY))
+ return;
- cpuid(1, &eax, &ebx, &ecx, &edx);
- c->x86_num_siblings = (ebx & 0xff0000) >> 16;
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ c->x86_num_siblings = (ebx & 0xff0000) >> 16;
- if (c->x86_num_siblings == 1) {
- printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
- } else if (c->x86_num_siblings > 1 ) {
- index_msb = get_count_order(c->x86_num_siblings);
- c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+ if (c->x86_num_siblings == 1)
+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
- if (opt_cpu_info)
- printk("CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
+ else if (c->x86_num_siblings > 1 )
+ {
+ index_msb = get_count_order(c->x86_num_siblings);
+ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
- c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores;
+ if (opt_cpu_info)
+ printk("CPU: Physical Processor ID: %d\n",
+ c->phys_proc_id);
- index_msb = get_count_order(c->x86_num_siblings) ;
+ c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores;
- core_bits = get_count_order(c->x86_max_cores);
+ index_msb = get_count_order(c->x86_num_siblings) ;
- c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
- ((1 << core_bits) - 1);
+ core_bits = get_count_order(c->x86_max_cores);
- if (opt_cpu_info && c->x86_max_cores > 1)
- printk("CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- }
+ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
+ ((1 << core_bits) - 1);
+
+ if (opt_cpu_info && c->x86_max_cores > 1)
+ printk("CPU: Processor Core ID: %d\n",
+ c->cpu_core_id);
+ }
}
unsigned int __init apicid_to_socket(unsigned int apicid)
{
- unsigned int dummy;
-
- if (boot_cpu_has(X86_FEATURE_XTOPOLOGY)) {
- unsigned int eax, ecx, sub_index = 1, core_plus_mask_width;
-
- cpuid_count(0xb, SMT_LEVEL, &eax, &dummy, &dummy, &dummy);
- core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
- do {
- cpuid_count(0xb, sub_index, &eax, &dummy, &ecx,
- &dummy);
-
- if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
- core_plus_mask_width =
- BITS_SHIFT_NEXT_LEVEL(eax);
- break;
- }
-
- sub_index++;
- } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
-
- return _phys_pkg_id(apicid, core_plus_mask_width);
- }
-
- if (boot_cpu_has(X86_FEATURE_HTT) &&
- !boot_cpu_has(X86_FEATURE_CMP_LEGACY)) {
- unsigned int num_siblings = (cpuid_ebx(1) & 0xff0000) >> 16;
-
- if (num_siblings)
- return _phys_pkg_id(apicid,
- get_count_order(num_siblings));
- }
-
- return apicid;
+ unsigned int dummy;
+
+ if (boot_cpu_has(X86_FEATURE_XTOPOLOGY))
+ {
+ unsigned int eax, ecx, sub_index = 1, core_plus_mask_width;
+
+ cpuid_count(0xb, SMT_LEVEL, &eax, &dummy, &dummy, &dummy);
+ core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+ do
+ {
+ cpuid_count(0xb, sub_index, &eax, &dummy, &ecx,
+ &dummy);
+
+ if (LEAFB_SUBTYPE(ecx) == CORE_TYPE)
+ {
+ core_plus_mask_width =
+ BITS_SHIFT_NEXT_LEVEL(eax);
+ break;
+ }
+
+ sub_index++;
+ } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
+
+ return _phys_pkg_id(apicid, core_plus_mask_width);
+ }
+
+ if (boot_cpu_has(X86_FEATURE_HTT) &&
+ !boot_cpu_has(X86_FEATURE_CMP_LEGACY))
+ {
+ unsigned int num_siblings = (cpuid_ebx(1) & 0xff0000) >> 16;
+
+ if (num_siblings)
+ return _phys_pkg_id(apicid,
+ get_count_order(num_siblings));
+ }
+
+ return apicid;
}
void print_cpu_info(unsigned int cpu)
{
- const struct cpuinfo_x86 *c = cpu_data + cpu;
- const char *vendor = NULL;
+ const struct cpuinfo_x86 *c = cpu_data + cpu;
+ const char *vendor = NULL;
- if (!opt_cpu_info)
- return;
+ if (!opt_cpu_info)
+ return;
- printk("CPU%u: ", cpu);
+ printk("CPU%u: ", cpu);
- vendor = x86_cpuid_vendor_to_str(c->x86_vendor);
- if (strncmp(c->x86_model_id, vendor, strlen(vendor)))
- printk("%s ", vendor);
+ vendor = x86_cpuid_vendor_to_str(c->x86_vendor);
+ if (strncmp(c->x86_model_id, vendor, strlen(vendor)))
+ printk("%s ", vendor);
- if (!c->x86_model_id[0])
- printk("%d86", c->x86);
- else
- printk("%s", c->x86_model_id);
+ if (!c->x86_model_id[0])
+ printk("%d86", c->x86);
+ else
+ printk("%s", c->x86_model_id);
- printk(" stepping %02x\n", c->x86_mask);
+ printk(" stepping %02x\n", c->x86_mask);
}
static cpumask_t cpu_initialized;
@@ -701,76 +733,80 @@ static cpumask_t cpu_initialized;
*/
void load_system_tables(void)
{
- unsigned int cpu = smp_processor_id();
- unsigned long stack_bottom = get_stack_bottom(),
- stack_top = stack_bottom & ~(STACK_SIZE - 1);
-
- struct tss_struct *tss = &this_cpu(init_tss);
- seg_desc_t *gdt =
- this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY;
- seg_desc_t *compat_gdt =
- this_cpu(compat_gdt_table) - FIRST_RESERVED_GDT_ENTRY;
-
- const struct desc_ptr gdtr = {
- .base = (unsigned long)gdt,
- .limit = LAST_RESERVED_GDT_BYTE,
- };
- const struct desc_ptr idtr = {
- .base = (unsigned long)idt_tables[cpu],
- .limit = (IDT_ENTRIES * sizeof(idt_entry_t)) - 1,
- };
-
- *tss = (struct tss_struct){
- /* Main stack for interrupts/exceptions. */
- .rsp0 = stack_bottom,
-
- /* Ring 1 and 2 stacks poisoned. */
- .rsp1 = 0x8600111111111111ul,
- .rsp2 = 0x8600111111111111ul,
-
- /*
- * MCE, NMI and Double Fault handlers get their own stacks.
- * All others poisoned.
- */
- .ist = {
- [IST_MCE - 1] = stack_top + IST_MCE * PAGE_SIZE,
- [IST_DF - 1] = stack_top + IST_DF * PAGE_SIZE,
- [IST_NMI - 1] = stack_top + IST_NMI * PAGE_SIZE,
- [IST_DB - 1] = stack_top + IST_DB * PAGE_SIZE,
-
- [IST_MAX ... ARRAY_SIZE(tss->ist) - 1] =
- 0x8600111111111111ul,
- },
-
- .bitmap = IOBMP_INVALID_OFFSET,
- };
-
- _set_tssldt_desc(
- gdt + TSS_ENTRY,
- (unsigned long)tss,
- offsetof(struct tss_struct, __cacheline_filler) - 1,
- SYS_DESC_tss_avail);
- _set_tssldt_desc(
- compat_gdt + TSS_ENTRY,
- (unsigned long)tss,
- offsetof(struct tss_struct, __cacheline_filler) - 1,
- SYS_DESC_tss_busy);
-
- lgdt(&gdtr);
- lidt(&idtr);
- ltr(TSS_ENTRY << 3);
- lldt(0);
-
- enable_each_ist(idt_tables[cpu]);
-
- /*
- * Bottom-of-stack must be 16-byte aligned!
- *
- * Defer checks until exception support is sufficiently set up.
- */
- BUILD_BUG_ON((sizeof(struct cpu_info) -
- offsetof(struct cpu_info, guest_cpu_user_regs.es)) & 0xf);
- BUG_ON(system_state != SYS_STATE_early_boot && (stack_bottom & 0xf));
+ unsigned int cpu = smp_processor_id();
+ unsigned long stack_bottom = get_stack_bottom(),
+ stack_top = stack_bottom & ~(STACK_SIZE - 1);
+
+ struct tss_struct *tss = &this_cpu(init_tss);
+ seg_desc_t *gdt =
+ this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY;
+ seg_desc_t *compat_gdt =
+ this_cpu(compat_gdt_table) - FIRST_RESERVED_GDT_ENTRY;
+
+ const struct desc_ptr gdtr =
+ {
+ .base = (unsigned long)gdt,
+ .limit = LAST_RESERVED_GDT_BYTE,
+ };
+ const struct desc_ptr idtr =
+ {
+ .base = (unsigned long)idt_tables[cpu],
+ .limit = (IDT_ENTRIES * sizeof(idt_entry_t)) - 1,
+ };
+
+ *tss = (struct tss_struct)
+ {
+ /* Main stack for interrupts/exceptions. */
+ .rsp0 = stack_bottom,
+
+ /* Ring 1 and 2 stacks poisoned. */
+ .rsp1 = 0x8600111111111111ul,
+ .rsp2 = 0x8600111111111111ul,
+
+ /*
+ * MCE, NMI and Double Fault handlers get their own stacks.
+ * All others poisoned.
+ */
+ .ist =
+ {
+ [IST_MCE - 1] = stack_top + IST_MCE * PAGE_SIZE,
+ [IST_DF - 1] = stack_top + IST_DF * PAGE_SIZE,
+ [IST_NMI - 1] = stack_top + IST_NMI * PAGE_SIZE,
+ [IST_DB - 1] = stack_top + IST_DB * PAGE_SIZE,
+
+ [IST_MAX ... ARRAY_SIZE(tss->ist) - 1] =
+ 0x8600111111111111ul,
+ },
+
+ .bitmap = IOBMP_INVALID_OFFSET,
+ };
+
+ _set_tssldt_desc(
+ gdt + TSS_ENTRY,
+ (unsigned long)tss,
+ offsetof(struct tss_struct, __cacheline_filler) - 1,
+ SYS_DESC_tss_avail);
+ _set_tssldt_desc(
+ compat_gdt + TSS_ENTRY,
+ (unsigned long)tss,
+ offsetof(struct tss_struct, __cacheline_filler) - 1,
+ SYS_DESC_tss_busy);
+
+ lgdt(&gdtr);
+ lidt(&idtr);
+ ltr(TSS_ENTRY << 3);
+ lldt(0);
+
+ enable_each_ist(idt_tables[cpu]);
+
+ /*
+ * Bottom-of-stack must be 16-byte aligned!
+ *
+ * Defer checks until exception support is sufficiently set up.
+ */
+ BUILD_BUG_ON((sizeof(struct cpu_info) -
+ offsetof(struct cpu_info, guest_cpu_user_regs.es)) & 0xf);
+ BUG_ON(system_state != SYS_STATE_early_boot && (stack_bottom & 0xf));
}
/*
@@ -781,38 +817,40 @@ void load_system_tables(void)
*/
void cpu_init(void)
{
- int cpu = smp_processor_id();
-
- if (cpumask_test_and_set_cpu(cpu, &cpu_initialized)) {
- printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
- for (;;) local_irq_enable();
- }
- if (opt_cpu_info)
- printk("Initializing CPU#%d\n", cpu);
-
- wrmsrl(MSR_IA32_CR_PAT, XEN_MSR_PAT);
-
- /* Install correct page table. */
- write_ptbase(current);
-
- /* Ensure FPU gets initialised for each domain. */
- stts();
-
- /* Reset debug registers: */
- write_debugreg(0, 0);
- write_debugreg(1, 0);
- write_debugreg(2, 0);
- write_debugreg(3, 0);
- write_debugreg(6, X86_DR6_DEFAULT);
- write_debugreg(7, X86_DR7_DEFAULT);
-
- /* Enable NMIs. Our loader (e.g. Tboot) may have left them disabled. */
- enable_nmis();
+ int cpu = smp_processor_id();
+
+ if (cpumask_test_and_set_cpu(cpu, &cpu_initialized))
+ {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+ for (;;)
+ local_irq_enable();
+ }
+ if (opt_cpu_info)
+ printk("Initializing CPU#%d\n", cpu);
+
+ wrmsrl(MSR_IA32_CR_PAT, XEN_MSR_PAT);
+
+ /* Install correct page table. */
+ write_ptbase(current);
+
+ /* Ensure FPU gets initialised for each domain. */
+ stts();
+
+ /* Reset debug registers: */
+ write_debugreg(0, 0);
+ write_debugreg(1, 0);
+ write_debugreg(2, 0);
+ write_debugreg(3, 0);
+ write_debugreg(6, X86_DR6_DEFAULT);
+ write_debugreg(7, X86_DR7_DEFAULT);
+
+ /* Enable NMIs. Our loader (e.g. Tboot) may have left them disabled. */
+ enable_nmis();
}
void cpu_uninit(unsigned int cpu)
{
- cpumask_clear_cpu(cpu, &cpu_initialized);
+ cpumask_clear_cpu(cpu, &cpu_initialized);
}
/*
@@ -836,19 +874,20 @@ features are
*/
const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id table[])
{
- const struct x86_cpu_id *m;
- const struct cpuinfo_x86 *c = &boot_cpu_data;
-
- for (m = table; m->vendor | m->family | m->model | m->feature; m++) {
- if (c->x86_vendor != m->vendor)
- continue;
- if (c->x86 != m->family)
- continue;
- if (c->x86_model != m->model)
- continue;
- if (!cpu_has(c, m->feature))
- continue;
- return m;
- }
- return NULL;
+ const struct x86_cpu_id *m;
+ const struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ for (m = table; m->vendor | m->family | m->model | m->feature; m++)
+ {
+ if (c->x86_vendor != m->vendor)
+ continue;
+ if (c->x86 != m->family)
+ continue;
+ if (c->x86_model != m->model)
+ continue;
+ if (!cpu_has(c, m->feature))
+ continue;
+ return m;
+ }
+ return NULL;
}
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
index c2f4d9a06a..482e4ebe32 100644
--- a/xen/arch/x86/cpu/cpu.h
+++ b/xen/arch/x86/cpu/cpu.h
@@ -1,11 +1,12 @@
/* attempt to consolidate cpu attributes */
-struct cpu_dev {
- void (*c_early_init)(struct cpuinfo_x86 *c);
- void (*c_init)(struct cpuinfo_x86 * c);
+struct cpu_dev
+{
+ void (*c_early_init)(struct cpuinfo_x86 *c);
+ void (*c_init)(struct cpuinfo_x86 *c);
};
extern const struct cpu_dev intel_cpu_dev, amd_cpu_dev, centaur_cpu_dev,
- shanghai_cpu_dev, hygon_cpu_dev;
+ shanghai_cpu_dev, hygon_cpu_dev;
extern bool_t opt_arat;
extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
diff --git a/xen/arch/x86/cpu/hygon.c b/xen/arch/x86/cpu/hygon.c
index 9ab7aa8622..9fc62c3b8f 100644
--- a/xen/arch/x86/cpu/hygon.c
+++ b/xen/arch/x86/cpu/hygon.c
@@ -9,99 +9,104 @@
static void hygon_get_topology(struct cpuinfo_x86 *c)
{
- unsigned int ebx;
+ unsigned int ebx;
- if (c->x86_max_cores <= 1)
- return;
+ if (c->x86_max_cores <= 1)
+ return;
- /* Socket ID is ApicId[6] for Hygon processors. */
- c->phys_proc_id >>= APICID_SOCKET_ID_BIT;
+ /* Socket ID is ApicId[6] for Hygon processors. */
+ c->phys_proc_id >>= APICID_SOCKET_ID_BIT;
- ebx = cpuid_ebx(0x8000001e);
- c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1;
- c->x86_max_cores /= c->x86_num_siblings;
- c->cpu_core_id = ebx & 0xff;
+ ebx = cpuid_ebx(0x8000001e);
+ c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1;
+ c->x86_max_cores /= c->x86_num_siblings;
+ c->cpu_core_id = ebx & 0xff;
- if (opt_cpu_info)
- printk("CPU %d(%d) -> Processor %d, Core %d\n",
- smp_processor_id(), c->x86_max_cores,
- c->phys_proc_id, c->cpu_core_id);
+ if (opt_cpu_info)
+ printk("CPU %d(%d) -> Processor %d, Core %d\n",
+ smp_processor_id(), c->x86_max_cores,
+ c->phys_proc_id, c->cpu_core_id);
}
static void init_hygon(struct cpuinfo_x86 *c)
{
- unsigned long long value;
-
- /*
- * Attempt to set lfence to be Dispatch Serialising. This MSR almost
- * certainly isn't virtualised (and Xen at least will leak the real
- * value in but silently discard writes), as well as being per-core
- * rather than per-thread, so do a full safe read/write/readback cycle
- * in the worst case.
- */
- if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
- /* Unable to read. Assume the safer default. */
- __clear_bit(X86_FEATURE_LFENCE_DISPATCH,
- c->x86_capability);
- else if (value & AMD64_DE_CFG_LFENCE_SERIALISE)
- /* Already dispatch serialising. */
- __set_bit(X86_FEATURE_LFENCE_DISPATCH,
- c->x86_capability);
- else if (wrmsr_safe(MSR_AMD64_DE_CFG,
- value | AMD64_DE_CFG_LFENCE_SERIALISE) ||
- rdmsr_safe(MSR_AMD64_DE_CFG, value) ||
- !(value & AMD64_DE_CFG_LFENCE_SERIALISE))
- /* Attempt to set failed. Assume the safer default. */
- __clear_bit(X86_FEATURE_LFENCE_DISPATCH,
- c->x86_capability);
- else
- /* Successfully enabled! */
- __set_bit(X86_FEATURE_LFENCE_DISPATCH,
- c->x86_capability);
-
- /*
- * If the user has explicitly chosen to disable Memory Disambiguation
- * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
- */
- if (opt_ssbd && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
- value |= 1ull << 10;
- wrmsr_safe(MSR_AMD64_LS_CFG, value);
- }
-
- /* MFENCE stops RDTSC speculation */
- if (!cpu_has_lfence_dispatch)
- __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
-
- display_cacheinfo(c);
-
- if (c->extended_cpuid_level >= 0x80000008)
- c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-
- if (c->extended_cpuid_level >= 0x80000007) {
- if (cpu_has(c, X86_FEATURE_ITSC)) {
- __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
- __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
- }
- }
-
- hygon_get_topology(c);
-
- /* Hygon CPUs do not support SYSENTER outside of legacy mode. */
- __clear_bit(X86_FEATURE_SEP, c->x86_capability);
-
- /* Hygon processors have APIC timer running in deep C states. */
- if (opt_arat)
- __set_bit(X86_FEATURE_ARAT, c->x86_capability);
-
- if (cpu_has(c, X86_FEATURE_EFRO)) {
- rdmsrl(MSR_K7_HWCR, value);
- value |= (1 << 27); /* Enable read-only APERF/MPERF bit */
- wrmsrl(MSR_K7_HWCR, value);
- }
+ unsigned long long value;
+
+ /*
+ * Attempt to set lfence to be Dispatch Serialising. This MSR almost
+ * certainly isn't virtualised (and Xen at least will leak the real
+ * value in but silently discard writes), as well as being per-core
+ * rather than per-thread, so do a full safe read/write/readback cycle
+ * in the worst case.
+ */
+ if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
+ /* Unable to read. Assume the safer default. */
+ __clear_bit(X86_FEATURE_LFENCE_DISPATCH,
+ c->x86_capability);
+ else if (value & AMD64_DE_CFG_LFENCE_SERIALISE)
+ /* Already dispatch serialising. */
+ __set_bit(X86_FEATURE_LFENCE_DISPATCH,
+ c->x86_capability);
+ else if (wrmsr_safe(MSR_AMD64_DE_CFG,
+ value | AMD64_DE_CFG_LFENCE_SERIALISE) ||
+ rdmsr_safe(MSR_AMD64_DE_CFG, value) ||
+ !(value & AMD64_DE_CFG_LFENCE_SERIALISE))
+ /* Attempt to set failed. Assume the safer default. */
+ __clear_bit(X86_FEATURE_LFENCE_DISPATCH,
+ c->x86_capability);
+ else
+ /* Successfully enabled! */
+ __set_bit(X86_FEATURE_LFENCE_DISPATCH,
+ c->x86_capability);
+
+ /*
+ * If the user has explicitly chosen to disable Memory Disambiguation
+ * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
+ */
+ if (opt_ssbd && !rdmsr_safe(MSR_AMD64_LS_CFG, value))
+ {
+ value |= 1ull << 10;
+ wrmsr_safe(MSR_AMD64_LS_CFG, value);
+ }
+
+ /* MFENCE stops RDTSC speculation */
+ if (!cpu_has_lfence_dispatch)
+ __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
+
+ display_cacheinfo(c);
+
+ if (c->extended_cpuid_level >= 0x80000008)
+ c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+
+ if (c->extended_cpuid_level >= 0x80000007)
+ {
+ if (cpu_has(c, X86_FEATURE_ITSC))
+ {
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
+ }
+ }
+
+ hygon_get_topology(c);
+
+ /* Hygon CPUs do not support SYSENTER outside of legacy mode. */
+ __clear_bit(X86_FEATURE_SEP, c->x86_capability);
+
+ /* Hygon processors have APIC timer running in deep C states. */
+ if (opt_arat)
+ __set_bit(X86_FEATURE_ARAT, c->x86_capability);
+
+ if (cpu_has(c, X86_FEATURE_EFRO))
+ {
+ rdmsrl(MSR_K7_HWCR, value);
+ value |= (1 << 27); /* Enable read-only APERF/MPERF bit */
+ wrmsrl(MSR_K7_HWCR, value);
+ }
}
-const struct cpu_dev hygon_cpu_dev = {
- .c_early_init = early_init_amd,
- .c_init = init_hygon,
+const struct cpu_dev hygon_cpu_dev =
+{
+ .c_early_init = early_init_amd,
+ .c_init = init_hygon,
};
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 0dd8f98607..703a01f837 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -21,21 +21,21 @@
*/
static uint64_t __init _probe_mask_msr(unsigned int *msr, uint64_t caps)
{
- uint64_t val = 0;
+ uint64_t val = 0;
- expected_levelling_cap |= caps;
+ expected_levelling_cap |= caps;
- if (rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val))
- *msr = 0;
- else
- levelling_caps |= caps;
+ if (rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val))
+ *msr = 0;
+ else
+ levelling_caps |= caps;
- return val;
+ return val;
}
/* Indices of the masking MSRs, or 0 if unavailable. */
static unsigned int __read_mostly msr_basic, __read_mostly msr_ext,
- __read_mostly msr_xsave;
+ __read_mostly msr_xsave;
/*
* Probe for the existance of the expected masking MSRs. They might easily
@@ -43,69 +43,70 @@ static unsigned int __read_mostly msr_basic, __read_mostly msr_ext,
*/
static void __init probe_masking_msrs(void)
{
- const struct cpuinfo_x86 *c = &boot_cpu_data;
- unsigned int exp_msr_basic, exp_msr_ext, exp_msr_xsave;
-
- /* Only family 6 supports this feature. */
- if (c->x86 != 6)
- return;
-
- switch (c->x86_model) {
- case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */
- case 0x1d: /* Dunnington(MP) */
- msr_basic = MSR_INTEL_MASK_V1_CPUID1;
- break;
-
- case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */
- case 0x1e: /* Clarksfield, Lynnfield, Jasper Forest */
- case 0x1f: /* Something Nehalem-based - perhaps Auburndale/Havendale? */
- case 0x25: /* Arrandale, Clarksdale */
- case 0x2c: /* Gulftown, Westmere-EP */
- case 0x2e: /* Nehalem-EX(Beckton) */
- case 0x2f: /* Westmere-EX */
- msr_basic = MSR_INTEL_MASK_V2_CPUID1;
- msr_ext = MSR_INTEL_MASK_V2_CPUID80000001;
- break;
-
- case 0x2a: /* SandyBridge */
- case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */
- msr_basic = MSR_INTEL_MASK_V3_CPUID1;
- msr_ext = MSR_INTEL_MASK_V3_CPUID80000001;
- msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
- break;
- }
-
- exp_msr_basic = msr_basic;
- exp_msr_ext = msr_ext;
- exp_msr_xsave = msr_xsave;
-
- if (msr_basic)
- cpuidmask_defaults._1cd = _probe_mask_msr(&msr_basic, LCAP_1cd);
-
- if (msr_ext)
- cpuidmask_defaults.e1cd = _probe_mask_msr(&msr_ext, LCAP_e1cd);
-
- if (msr_xsave)
- cpuidmask_defaults.Da1 = _probe_mask_msr(&msr_xsave, LCAP_Da1);
-
- /*
- * Don't bother warning about a mismatch if virtualised. These MSRs
- * are not architectural and almost never virtualised.
- */
- if ((expected_levelling_cap == levelling_caps) ||
- cpu_has_hypervisor)
- return;
-
- printk(XENLOG_WARNING "Mismatch between expected (%#x) "
- "and real (%#x) levelling caps: missing %#x\n",
- expected_levelling_cap, levelling_caps,
- (expected_levelling_cap ^ levelling_caps) & levelling_caps);
- printk(XENLOG_WARNING "Fam %#x, model %#x expected (%#x/%#x/%#x), "
- "got (%#x/%#x/%#x)\n", c->x86, c->x86_model,
- exp_msr_basic, exp_msr_ext, exp_msr_xsave,
- msr_basic, msr_ext, msr_xsave);
- printk(XENLOG_WARNING
- "If not running virtualised, please report a bug\n");
+ const struct cpuinfo_x86 *c = &boot_cpu_data;
+ unsigned int exp_msr_basic, exp_msr_ext, exp_msr_xsave;
+
+ /* Only family 6 supports this feature. */
+ if (c->x86 != 6)
+ return;
+
+ switch (c->x86_model)
+ {
+ case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */
+ case 0x1d: /* Dunnington(MP) */
+ msr_basic = MSR_INTEL_MASK_V1_CPUID1;
+ break;
+
+ case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */
+ case 0x1e: /* Clarksfield, Lynnfield, Jasper Forest */
+ case 0x1f: /* Something Nehalem-based - perhaps Auburndale/Havendale? */
+ case 0x25: /* Arrandale, Clarksdale */
+ case 0x2c: /* Gulftown, Westmere-EP */
+ case 0x2e: /* Nehalem-EX(Beckton) */
+ case 0x2f: /* Westmere-EX */
+ msr_basic = MSR_INTEL_MASK_V2_CPUID1;
+ msr_ext = MSR_INTEL_MASK_V2_CPUID80000001;
+ break;
+
+ case 0x2a: /* SandyBridge */
+ case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */
+ msr_basic = MSR_INTEL_MASK_V3_CPUID1;
+ msr_ext = MSR_INTEL_MASK_V3_CPUID80000001;
+ msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
+ break;
+ }
+
+ exp_msr_basic = msr_basic;
+ exp_msr_ext = msr_ext;
+ exp_msr_xsave = msr_xsave;
+
+ if (msr_basic)
+ cpuidmask_defaults._1cd = _probe_mask_msr(&msr_basic, LCAP_1cd);
+
+ if (msr_ext)
+ cpuidmask_defaults.e1cd = _probe_mask_msr(&msr_ext, LCAP_e1cd);
+
+ if (msr_xsave)
+ cpuidmask_defaults.Da1 = _probe_mask_msr(&msr_xsave, LCAP_Da1);
+
+ /*
+ * Don't bother warning about a mismatch if virtualised. These MSRs
+ * are not architectural and almost never virtualised.
+ */
+ if ((expected_levelling_cap == levelling_caps) ||
+ cpu_has_hypervisor)
+ return;
+
+ printk(XENLOG_WARNING "Mismatch between expected (%#x) "
+ "and real (%#x) levelling caps: missing %#x\n",
+ expected_levelling_cap, levelling_caps,
+ (expected_levelling_cap ^ levelling_caps) & levelling_caps);
+ printk(XENLOG_WARNING "Fam %#x, model %#x expected (%#x/%#x/%#x), "
+ "got (%#x/%#x/%#x)\n", c->x86, c->x86_model,
+ exp_msr_basic, exp_msr_ext, exp_msr_xsave,
+ msr_basic, msr_ext, msr_xsave);
+ printk(XENLOG_WARNING
+ "If not running virtualised, please report a bug\n");
}
/*
@@ -116,29 +117,31 @@ static void __init probe_masking_msrs(void)
*/
static void intel_ctxt_switch_masking(const struct vcpu *next)
{
- struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
- const struct domain *nextd = next ? next->domain : NULL;
- const struct cpuidmasks *masks =
- (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
- ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
-
- if (msr_basic) {
- uint64_t val = masks->_1cd;
-
- /*
- * OSXSAVE defaults to 1, which causes fast-forwarding of
- * Xen's real setting. Clobber it if disabled by the guest
- * kernel.
- */
- if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
- !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
- val &= ~(uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE);
-
- if (unlikely(these_masks->_1cd != val)) {
- wrmsrl(msr_basic, val);
- these_masks->_1cd = val;
- }
+ struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
+ const struct domain *nextd = next ? next->domain : NULL;
+ const struct cpuidmasks *masks =
+ (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
+ ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
+
+ if (msr_basic)
+ {
+ uint64_t val = masks->_1cd;
+
+ /*
+ * OSXSAVE defaults to 1, which causes fast-forwarding of
+ * Xen's real setting. Clobber it if disabled by the guest
+ * kernel.
+ */
+ if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
+ !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
+ val &= ~(uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE);
+
+ if (unlikely(these_masks->_1cd != val))
+ {
+ wrmsrl(msr_basic, val);
+ these_masks->_1cd = val;
}
+ }
#define LAZY(msr, field) \
({ \
@@ -150,8 +153,8 @@ static void intel_ctxt_switch_masking(const struct vcpu *next)
} \
})
- LAZY(msr_ext, e1cd);
- LAZY(msr_xsave, Da1);
+ LAZY(msr_ext, e1cd);
+ LAZY(msr_xsave, Da1);
#undef LAZY
}
@@ -164,102 +167,108 @@ static void intel_ctxt_switch_masking(const struct vcpu *next)
*/
static void __init noinline intel_init_levelling(void)
{
- if (probe_cpuid_faulting())
- return;
+ if (probe_cpuid_faulting())
+ return;
- probe_masking_msrs();
+ probe_masking_msrs();
- if (msr_basic) {
- uint32_t ecx, edx, tmp;
+ if (msr_basic)
+ {
+ uint32_t ecx, edx, tmp;
- cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
+ cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
- ecx &= opt_cpuid_mask_ecx;
- edx &= opt_cpuid_mask_edx;
+ ecx &= opt_cpuid_mask_ecx;
+ edx &= opt_cpuid_mask_edx;
- /* Fast-forward bits - Must be set. */
- if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
- ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
- edx |= cpufeat_mask(X86_FEATURE_APIC);
+ /* Fast-forward bits - Must be set. */
+ if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
+ ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
+ edx |= cpufeat_mask(X86_FEATURE_APIC);
- cpuidmask_defaults._1cd &= ((u64)edx << 32) | ecx;
- }
+ cpuidmask_defaults._1cd &= ((u64)edx << 32) | ecx;
+ }
- if (msr_ext) {
- uint32_t ecx, edx, tmp;
+ if (msr_ext)
+ {
+ uint32_t ecx, edx, tmp;
- cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
+ cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
- ecx &= opt_cpuid_mask_ext_ecx;
- edx &= opt_cpuid_mask_ext_edx;
+ ecx &= opt_cpuid_mask_ext_ecx;
+ edx &= opt_cpuid_mask_ext_edx;
- cpuidmask_defaults.e1cd &= ((u64)edx << 32) | ecx;
- }
+ cpuidmask_defaults.e1cd &= ((u64)edx << 32) | ecx;
+ }
- if (msr_xsave) {
- uint32_t eax, tmp;
+ if (msr_xsave)
+ {
+ uint32_t eax, tmp;
- cpuid_count(0x0000000d, 1, &eax, &tmp, &tmp, &tmp);
+ cpuid_count(0x0000000d, 1, &eax, &tmp, &tmp, &tmp);
- eax &= opt_cpuid_mask_xsave_eax;
+ eax &= opt_cpuid_mask_xsave_eax;
- cpuidmask_defaults.Da1 &= (~0ULL << 32) | eax;
- }
+ cpuidmask_defaults.Da1 &= (~0ULL << 32) | eax;
+ }
- if (opt_cpu_info) {
- printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
+ if (opt_cpu_info)
+ {
+ printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
- if (!cpu_has_cpuid_faulting)
- printk(XENLOG_INFO
- "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
- "e1c 0x%08x, Da1 0x%08x\n",
- (uint32_t)(cpuidmask_defaults._1cd >> 32),
- (uint32_t)cpuidmask_defaults._1cd,
- (uint32_t)(cpuidmask_defaults.e1cd >> 32),
- (uint32_t)cpuidmask_defaults.e1cd,
- (uint32_t)cpuidmask_defaults.Da1);
- }
+ if (!cpu_has_cpuid_faulting)
+ printk(XENLOG_INFO
+ "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
+ "e1c 0x%08x, Da1 0x%08x\n",
+ (uint32_t)(cpuidmask_defaults._1cd >> 32),
+ (uint32_t)cpuidmask_defaults._1cd,
+ (uint32_t)(cpuidmask_defaults.e1cd >> 32),
+ (uint32_t)cpuidmask_defaults.e1cd,
+ (uint32_t)cpuidmask_defaults.Da1);
+ }
- if (levelling_caps)
- ctxt_switch_masking = intel_ctxt_switch_masking;
+ if (levelling_caps)
+ ctxt_switch_masking = intel_ctxt_switch_masking;
}
static void early_init_intel(struct cpuinfo_x86 *c)
{
- u64 misc_enable, disable;
-
- /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
- if (c->x86 == 15 && c->x86_cache_alignment == 64)
- c->x86_cache_alignment = 128;
-
- /* Unmask CPUID levels and NX if masked: */
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-
- disable = misc_enable & (MSR_IA32_MISC_ENABLE_LIMIT_CPUID |
- MSR_IA32_MISC_ENABLE_XD_DISABLE);
- if (disable) {
- wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable & ~disable);
- bootsym(trampoline_misc_enable_off) |= disable;
- }
-
- if (disable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)
- printk(KERN_INFO "revised cpuid level: %d\n",
- cpuid_eax(0));
- if (disable & MSR_IA32_MISC_ENABLE_XD_DISABLE) {
- write_efer(read_efer() | EFER_NX);
- printk(KERN_INFO
- "re-enabled NX (Execute Disable) protection\n");
- }
-
- /* CPUID workaround for Intel 0F33/0F34 CPU */
- if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
- (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
- paddr_bits = 36;
-
- if (c == &boot_cpu_data)
- intel_init_levelling();
-
- ctxt_switch_levelling(NULL);
+ u64 misc_enable, disable;
+
+ /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
+ if (c->x86 == 15 && c->x86_cache_alignment == 64)
+ c->x86_cache_alignment = 128;
+
+ /* Unmask CPUID levels and NX if masked: */
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+
+ disable = misc_enable & (MSR_IA32_MISC_ENABLE_LIMIT_CPUID |
+ MSR_IA32_MISC_ENABLE_XD_DISABLE);
+ if (disable)
+ {
+ wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable & ~disable);
+ bootsym(trampoline_misc_enable_off) |= disable;
+ }
+
+ if (disable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)
+ printk(KERN_INFO "revised cpuid level: %d\n",
+ cpuid_eax(0));
+ if (disable & MSR_IA32_MISC_ENABLE_XD_DISABLE)
+ {
+ write_efer(read_efer() | EFER_NX);
+ printk(KERN_INFO
+ "re-enabled NX (Execute Disable) protection\n");
+ }
+
+ /* CPUID workaround for Intel 0F33/0F34 CPU */
+ if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
+ (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
+ paddr_bits = 36;
+
+ if (c == &boot_cpu_data)
+ intel_init_levelling();
+
+ ctxt_switch_levelling(NULL);
}
/*
@@ -271,24 +280,26 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/
static void Intel_errata_workarounds(struct cpuinfo_x86 *c)
{
- unsigned long lo, hi;
-
- if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
- rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
- if ((lo & (1<<9)) == 0) {
- printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
- printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
- lo |= (1<<9); /* Disable hw prefetching */
- wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
- }
- }
-
- if (c->x86 == 6 && cpu_has_clflush &&
- (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
- __set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
-
- if (cpu_has_tsx_force_abort && opt_rtm_abort)
- wrmsrl(MSR_TSX_FORCE_ABORT, TSX_FORCE_ABORT_RTM);
+ unsigned long lo, hi;
+
+ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1))
+ {
+ rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
+ if ((lo & (1<<9)) == 0)
+ {
+ printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
+ printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
+ lo |= (1<<9); /* Disable hw prefetching */
+ wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
+ }
+ }
+
+ if (c->x86 == 6 && cpu_has_clflush &&
+ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
+ __set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
+
+ if (cpu_has_tsx_force_abort && opt_rtm_abort)
+ wrmsrl(MSR_TSX_FORCE_ABORT, TSX_FORCE_ABORT_RTM);
}
@@ -297,58 +308,61 @@ static void Intel_errata_workarounds(struct cpuinfo_x86 *c)
*/
static int num_cpu_cores(struct cpuinfo_x86 *c)
{
- unsigned int eax, ebx, ecx, edx;
+ unsigned int eax, ebx, ecx, edx;
- if (c->cpuid_level < 4)
- return 1;
+ if (c->cpuid_level < 4)
+ return 1;
- /* Intel has a non-standard dependency on %ecx for this CPUID level. */
- cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
- if (eax & 0x1f)
- return ((eax >> 26) + 1);
- else
- return 1;
+ /* Intel has a non-standard dependency on %ecx for this CPUID level. */
+ cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
+ if (eax & 0x1f)
+ return ((eax >> 26) + 1);
+ else
+ return 1;
}
static void init_intel(struct cpuinfo_x86 *c)
{
- unsigned int l2 = 0;
-
- /* Detect the extended topology information if available */
- detect_extended_topology(c);
-
- l2 = init_intel_cacheinfo(c);
- if (c->cpuid_level > 9) {
- unsigned eax = cpuid_eax(10);
- /* Check for version and the number of counters */
- if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
- __set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
- }
-
- if ( !cpu_has(c, X86_FEATURE_XTOPOLOGY) )
- {
- c->x86_max_cores = num_cpu_cores(c);
- detect_ht(c);
- }
-
- /* Work around errata */
- Intel_errata_workarounds(c);
-
- if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
- (c->x86 == 0x6 && c->x86_model >= 0x0e))
- __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- if (cpu_has(c, X86_FEATURE_ITSC)) {
- __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
- __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
- }
- if ( opt_arat &&
- ( c->cpuid_level >= 0x00000006 ) &&
- ( cpuid_eax(0x00000006) & (1u<<2) ) )
- __set_bit(X86_FEATURE_ARAT, c->x86_capability);
+ unsigned int l2 = 0;
+
+ /* Detect the extended topology information if available */
+ detect_extended_topology(c);
+
+ l2 = init_intel_cacheinfo(c);
+ if (c->cpuid_level > 9)
+ {
+ unsigned eax = cpuid_eax(10);
+ /* Check for version and the number of counters */
+ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
+ __set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
+ }
+
+ if ( !cpu_has(c, X86_FEATURE_XTOPOLOGY) )
+ {
+ c->x86_max_cores = num_cpu_cores(c);
+ detect_ht(c);
+ }
+
+ /* Work around errata */
+ Intel_errata_workarounds(c);
+
+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ if (cpu_has(c, X86_FEATURE_ITSC))
+ {
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
+ }
+ if ( opt_arat &&
+ ( c->cpuid_level >= 0x00000006 ) &&
+ ( cpuid_eax(0x00000006) & (1u<<2) ) )
+ __set_bit(X86_FEATURE_ARAT, c->x86_capability);
}
-const struct cpu_dev intel_cpu_dev = {
- .c_early_init = early_init_intel,
- .c_init = init_intel,
+const struct cpu_dev intel_cpu_dev =
+{
+ .c_early_init = early_init_intel,
+ .c_init = init_intel,
};
diff --git a/xen/arch/x86/cpu/intel_cacheinfo.c b/xen/arch/x86/cpu/intel_cacheinfo.c
index 88b61fddfe..bc641b98f2 100644
--- a/xen/arch/x86/cpu/intel_cacheinfo.c
+++ b/xen/arch/x86/cpu/intel_cacheinfo.c
@@ -20,248 +20,259 @@
struct _cache_table
{
- unsigned char descriptor;
- char cache_type;
- short size;
+ unsigned char descriptor;
+ char cache_type;
+ short size;
};
/* all the cache descriptor types we care about (no TLB or trace cache entries) */
static const struct _cache_table cache_table[] =
{
- { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
- { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
- { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
- { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
- { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
- { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
- { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
- { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
- { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
- { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
- { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
- { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
- { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
- { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
- { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
- { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
- { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
- { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
- { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
- { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
- { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
- { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
- { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
- { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
- { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
- { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
- { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
- { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
- { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
- { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
- { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
- { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
- { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
- { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
- { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
- { 0x00, 0, 0}
+ { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
+ { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
+ { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
+ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
+ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
+ { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
+ { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
+ { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
+ { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
+ { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
+ { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
+ { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
+ { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
+ { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
+ { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
+ { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
+ { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
+ { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
+ { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
+ { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
+ { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
+ { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
+ { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
+ { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
+ { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
+ { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
+ { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
+ { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
+ { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
+ { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
+ { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
+ { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
+ { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
+ { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
+ { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
+ { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
+ { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
+ { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
+ { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
+ { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
+ { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
+ { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
+ { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
+ { 0x00, 0, 0}
};
int cpuid4_cache_lookup(int index, struct cpuid4_info *this_leaf)
{
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
- unsigned edx;
-
- cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
- if (eax.split.type == CACHE_TYPE_NULL)
- return -EIO; /* better error ? */
-
- this_leaf->eax = eax;
- this_leaf->ebx = ebx;
- this_leaf->ecx = ecx;
- this_leaf->size = (ecx.split.number_of_sets + 1) *
- (ebx.split.coherency_line_size + 1) *
- (ebx.split.physical_line_partition + 1) *
- (ebx.split.ways_of_associativity + 1);
- return 0;
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned edx;
+
+ cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
+ if (eax.split.type == CACHE_TYPE_NULL)
+ return -EIO; /* better error ? */
+
+ this_leaf->eax = eax;
+ this_leaf->ebx = ebx;
+ this_leaf->ecx = ecx;
+ this_leaf->size = (ecx.split.number_of_sets + 1) *
+ (ebx.split.coherency_line_size + 1) *
+ (ebx.split.physical_line_partition + 1) *
+ (ebx.split.ways_of_associativity + 1);
+ return 0;
}
static int find_num_cache_leaves(void)
{
- unsigned int eax, ebx, ecx, edx;
- union _cpuid4_leaf_eax cache_eax;
- int i = -1;
-
- do {
- ++i;
- /* Do cpuid(4) loop to find out num_cache_leaves */
- cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
- cache_eax.full = eax;
- } while (cache_eax.split.type != CACHE_TYPE_NULL);
- return i;
+ unsigned int eax, ebx, ecx, edx;
+ union _cpuid4_leaf_eax cache_eax;
+ int i = -1;
+
+ do
+ {
+ ++i;
+ /* Do cpuid(4) loop to find out num_cache_leaves */
+ cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
+ cache_eax.full = eax;
+ } while (cache_eax.split.type != CACHE_TYPE_NULL);
+ return i;
}
unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
- unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
- unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
- unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
- static unsigned int num_cache_leaves;
-
- if (c->cpuid_level > 3) {
- static int is_initialized;
-
- if (is_initialized == 0) {
- /* Init num_cache_leaves from boot CPU */
- num_cache_leaves = find_num_cache_leaves();
- is_initialized++;
- }
-
- /*
- * Whenever possible use cpuid(4), deterministic cache
- * parameters cpuid leaf to find the cache details
- */
- for (i = 0; i < num_cache_leaves; i++) {
- struct cpuid4_info this_leaf;
-
- int retval;
-
- retval = cpuid4_cache_lookup(i, &this_leaf);
- if (retval >= 0) {
- switch(this_leaf.eax.split.level) {
- case 1:
- if (this_leaf.eax.split.type ==
- CACHE_TYPE_DATA)
- new_l1d = this_leaf.size/1024;
- else if (this_leaf.eax.split.type ==
- CACHE_TYPE_INST)
- new_l1i = this_leaf.size/1024;
- break;
- case 2:
- new_l2 = this_leaf.size/1024;
- break;
- case 3:
- new_l3 = this_leaf.size/1024;
- break;
- default:
- break;
- }
- }
- }
- }
- /*
- * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
- * trace cache
- */
- if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1 &&
- c->x86_vendor != X86_VENDOR_SHANGHAI)
- {
- /* supports eax=2 call */
- int i, j, n;
- int regs[4];
- unsigned char *dp = (unsigned char *)regs;
- int only_trace = 0;
-
- if (num_cache_leaves != 0 && c->x86 == 15)
- only_trace = 1;
-
- /* Number of times to iterate */
- n = cpuid_eax(2) & 0xFF;
-
- for ( i = 0 ; i < n ; i++ ) {
- cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
-
- /* If bit 31 is set, this is an unknown format */
- for ( j = 0 ; j < 3 ; j++ ) {
- if ( regs[j] < 0 ) regs[j] = 0;
- }
-
- /* Byte 0 is level count, not a descriptor */
- for ( j = 1 ; j < 16 ; j++ ) {
- unsigned char des = dp[j];
- unsigned char k = 0;
-
- /* look up this descriptor in the table */
- while (cache_table[k].descriptor != 0)
- {
- if (cache_table[k].descriptor == des) {
- if (only_trace && cache_table[k].cache_type != LVL_TRACE)
- break;
- switch (cache_table[k].cache_type) {
- case LVL_1_INST:
- l1i += cache_table[k].size;
- break;
- case LVL_1_DATA:
- l1d += cache_table[k].size;
- break;
- case LVL_2:
- l2 += cache_table[k].size;
- break;
- case LVL_3:
- l3 += cache_table[k].size;
- break;
- case LVL_TRACE:
- trace += cache_table[k].size;
- break;
- }
-
- break;
- }
-
- k++;
- }
- }
- }
- }
-
- if (new_l1d)
- l1d = new_l1d;
-
- if (new_l1i)
- l1i = new_l1i;
-
- if (new_l2) {
- l2 = new_l2;
- }
-
- if (new_l3) {
- l3 = new_l3;
- }
-
- if (opt_cpu_info) {
- if (trace)
- printk("CPU: Trace cache: %dK uops", trace);
- else if ( l1i )
- printk("CPU: L1 I cache: %dK", l1i);
-
- if (l1d)
- printk(", L1 D cache: %dK\n", l1d);
- else
- printk("\n");
-
- if (l2)
- printk("CPU: L2 cache: %dK\n", l2);
-
- if (l3)
- printk("CPU: L3 cache: %dK\n", l3);
- }
-
- c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
-
- return l2;
+ unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
+ unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
+ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
+ static unsigned int num_cache_leaves;
+
+ if (c->cpuid_level > 3)
+ {
+ static int is_initialized;
+
+ if (is_initialized == 0)
+ {
+ /* Init num_cache_leaves from boot CPU */
+ num_cache_leaves = find_num_cache_leaves();
+ is_initialized++;
+ }
+
+ /*
+ * Whenever possible use cpuid(4), deterministic cache
+ * parameters cpuid leaf to find the cache details
+ */
+ for (i = 0; i < num_cache_leaves; i++)
+ {
+ struct cpuid4_info this_leaf;
+
+ int retval;
+
+ retval = cpuid4_cache_lookup(i, &this_leaf);
+ if (retval >= 0)
+ {
+ switch (this_leaf.eax.split.level)
+ {
+ case 1:
+ if (this_leaf.eax.split.type ==
+ CACHE_TYPE_DATA)
+ new_l1d = this_leaf.size/1024;
+ else if (this_leaf.eax.split.type ==
+ CACHE_TYPE_INST)
+ new_l1i = this_leaf.size/1024;
+ break;
+ case 2:
+ new_l2 = this_leaf.size/1024;
+ break;
+ case 3:
+ new_l3 = this_leaf.size/1024;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+ /*
+ * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
+ * trace cache
+ */
+ if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1 &&
+ c->x86_vendor != X86_VENDOR_SHANGHAI)
+ {
+ /* supports eax=2 call */
+ int i, j, n;
+ int regs[4];
+ unsigned char *dp = (unsigned char *)regs;
+ int only_trace = 0;
+
+ if (num_cache_leaves != 0 && c->x86 == 15)
+ only_trace = 1;
+
+ /* Number of times to iterate */
+ n = cpuid_eax(2) & 0xFF;
+
+ for ( i = 0 ; i < n ; i++ )
+ {
+ cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
+
+ /* If bit 31 is set, this is an unknown format */
+ for ( j = 0 ; j < 3 ; j++ )
+ {
+ if ( regs[j] < 0 )
+ regs[j] = 0;
+ }
+
+ /* Byte 0 is level count, not a descriptor */
+ for ( j = 1 ; j < 16 ; j++ )
+ {
+ unsigned char des = dp[j];
+ unsigned char k = 0;
+
+ /* look up this descriptor in the table */
+ while (cache_table[k].descriptor != 0)
+ {
+ if (cache_table[k].descriptor == des)
+ {
+ if (only_trace && cache_table[k].cache_type != LVL_TRACE)
+ break;
+ switch (cache_table[k].cache_type)
+ {
+ case LVL_1_INST:
+ l1i += cache_table[k].size;
+ break;
+ case LVL_1_DATA:
+ l1d += cache_table[k].size;
+ break;
+ case LVL_2:
+ l2 += cache_table[k].size;
+ break;
+ case LVL_3:
+ l3 += cache_table[k].size;
+ break;
+ case LVL_TRACE:
+ trace += cache_table[k].size;
+ break;
+ }
+
+ break;
+ }
+
+ k++;
+ }
+ }
+ }
+ }
+
+ if (new_l1d)
+ l1d = new_l1d;
+
+ if (new_l1i)
+ l1i = new_l1i;
+
+ if (new_l2)
+ l2 = new_l2;
+
+ if (new_l3)
+ l3 = new_l3;
+
+ if (opt_cpu_info)
+ {
+ if (trace)
+ printk("CPU: Trace cache: %dK uops", trace);
+ else if ( l1i )
+ printk("CPU: L1 I cache: %dK", l1i);
+
+ if (l1d)
+ printk(", L1 D cache: %dK\n", l1d);
+ else
+ printk("\n");
+
+ if (l2)
+ printk("CPU: L2 cache: %dK\n", l2);
+
+ if (l3)
+ printk("CPU: L3 cache: %dK\n", l3);
+ }
+
+ c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
+
+ return l2;
}
diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
index 6e8901530a..4f45b7366a 100644
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
@@ -81,43 +81,47 @@ static int variable_period = 1;
*/
static void mce_amd_checkregs(void *info)
{
- mctelem_cookie_t mctc;
- struct mca_summary bs;
-
- mctc = mcheck_mca_logout(MCA_POLLER, mca_allbanks, &bs, NULL);
-
- if (bs.errcnt && mctc != NULL) {
- static uint64_t dumpcount = 0;
-
- /* If Dom0 enabled the VIRQ_MCA event, then notify it.
- * Otherwise, if dom0 has had plenty of time to register
- * the virq handler but still hasn't then dump telemetry
- * to the Xen console. The call count may be incremented
- * on multiple cpus at once and is indicative only - just
- * a simple-minded attempt to avoid spamming the console
- * for corrected errors in early startup. */
-
- if (dom0_vmce_enabled()) {
- mctelem_commit(mctc);
- send_global_virq(VIRQ_MCA);
- } else if (++dumpcount >= 10) {
- x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
- mctelem_dismiss(mctc);
- } else {
- mctelem_dismiss(mctc);
- }
-
- } else if (mctc != NULL) {
- mctelem_dismiss(mctc);
- }
-
- /* adjust is global and all cpus may attempt to increment it without
- * synchronisation, so they race and the final adjust count
- * (number of cpus seeing any error) is approximate. We can
- * guarantee that if any cpu observes an error that the
- * adjust count is at least 1. */
- if (bs.errcnt)
- adjust++;
+ mctelem_cookie_t mctc;
+ struct mca_summary bs;
+
+ mctc = mcheck_mca_logout(MCA_POLLER, mca_allbanks, &bs, NULL);
+
+ if (bs.errcnt && mctc != NULL)
+ {
+ static uint64_t dumpcount = 0;
+
+ /* If Dom0 enabled the VIRQ_MCA event, then notify it.
+ * Otherwise, if dom0 has had plenty of time to register
+ * the virq handler but still hasn't then dump telemetry
+ * to the Xen console. The call count may be incremented
+ * on multiple cpus at once and is indicative only - just
+ * a simple-minded attempt to avoid spamming the console
+ * for corrected errors in early startup. */
+
+ if (dom0_vmce_enabled())
+ {
+ mctelem_commit(mctc);
+ send_global_virq(VIRQ_MCA);
+ }
+ else if (++dumpcount >= 10)
+ {
+ x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
+ mctelem_dismiss(mctc);
+ }
+ else
+ mctelem_dismiss(mctc);
+
+ }
+ else if (mctc != NULL)
+ mctelem_dismiss(mctc);
+
+ /* adjust is global and all cpus may attempt to increment it without
+ * synchronisation, so they race and the final adjust count
+ * (number of cpus seeing any error) is approximate. We can
+ * guarantee that if any cpu observes an error that the
+ * adjust count is at least 1. */
+ if (bs.errcnt)
+ adjust++;
}
/* polling service routine invoker:
@@ -129,118 +133,135 @@ static void mce_amd_checkregs(void *info)
*/
static void mce_amd_work_fn(void *data)
{
- on_each_cpu(mce_amd_checkregs, data, 1);
-
- if (adjust > 0) {
- if (!dom0_vmce_enabled()) {
- /* Dom0 did not enable VIRQ_MCA, so Xen is reporting. */
- printk("MCE: polling routine found correctable error. "
- " Use mcelog to parse above error output.\n");
- }
- }
-
- if (hw_threshold) {
- uint64_t value;
- uint32_t counter;
-
- value = mca_rdmsr(MSR_IA32_MCx_MISC(4));
- /* Only the error counter field is of interest
- * Bit field is described in AMD K8 BKDG chapter 6.4.5.5
- */
- counter = (value & 0xFFF00000000ULL) >> 32U;
-
- /* HW does not count *all* kinds of correctable errors.
- * Thus it is possible, that the polling routine finds an
- * correctable error even if the HW reports nothing. */
- if (counter > 0) {
- /* HW reported correctable errors,
- * the polling routine did not find...
- */
- if (adjust == 0) {
- printk("CPU counter reports %"PRIu32
- " correctable hardware error%s that %s"
- " not reported by the status MSRs\n",
- counter,
- (counter == 1 ? "" : "s"),
- (counter == 1 ? "was" : "were"));
- }
- /* subtract 1 to not double count the error
- * from the polling service routine */
- adjust += (counter - 1);
-
- /* Restart counter */
- /* No interrupt, reset counter value */
- value &= ~(0x60FFF00000000ULL);
- /* Counter enable */
- value |= (1ULL << 51);
- mca_wrmsr(MSR_IA32_MCx_MISC(4), value);
- }
- }
-
- if (variable_period && adjust > 0) {
- /* Increase polling frequency */
- adjust++; /* adjust == 1 must have an effect */
- period /= adjust;
- } else if (variable_period) {
- /* Decrease polling frequency */
- period *= 2;
- }
- if (variable_period && period > MCE_MAX) {
- /* limit: Poll at least every 30s */
- period = MCE_MAX;
- }
- if (variable_period && period < MCE_MIN) {
- /* limit: Poll every 2s.
- * When this is reached an uncorrectable error
- * is expected to happen, if Dom0 does nothing.
- */
- period = MCE_MIN;
- }
-
- set_timer(&mce_timer, NOW() + period);
- adjust = 0;
+ on_each_cpu(mce_amd_checkregs, data, 1);
+
+ if (adjust > 0)
+ {
+ if (!dom0_vmce_enabled())
+ {
+ /* Dom0 did not enable VIRQ_MCA, so Xen is reporting. */
+ printk("MCE: polling routine found correctable error. "
+ " Use mcelog to parse above error output.\n");
+ }
+ }
+
+ if (hw_threshold)
+ {
+ uint64_t value;
+ uint32_t counter;
+
+ value = mca_rdmsr(MSR_IA32_MCx_MISC(4));
+ /* Only the error counter field is of interest
+ * Bit field is described in AMD K8 BKDG chapter 6.4.5.5
+ */
+ counter = (value & 0xFFF00000000ULL) >> 32U;
+
+ /* HW does not count *all* kinds of correctable errors.
+ * Thus it is possible, that the polling routine finds an
+ * correctable error even if the HW reports nothing. */
+ if (counter > 0)
+ {
+ /* HW reported correctable errors,
+ * the polling routine did not find...
+ */
+ if (adjust == 0)
+ {
+ printk("CPU counter reports %"PRIu32
+ " correctable hardware error%s that %s"
+ " not reported by the status MSRs\n",
+ counter,
+ (counter == 1 ? "" : "s"),
+ (counter == 1 ? "was" : "were"));
+ }
+ /* subtract 1 to not double count the error
+ * from the polling service routine */
+ adjust += (counter - 1);
+
+ /* Restart counter */
+ /* No interrupt, reset counter value */
+ value &= ~(0x60FFF00000000ULL);
+ /* Counter enable */
+ value |= (1ULL << 51);
+ mca_wrmsr(MSR_IA32_MCx_MISC(4), value);
+ }
+ }
+
+ if (variable_period && adjust > 0)
+ {
+ /* Increase polling frequency */
+ adjust++; /* adjust == 1 must have an effect */
+ period /= adjust;
+ }
+ else if (variable_period)
+ {
+ /* Decrease polling frequency */
+ period *= 2;
+ }
+ if (variable_period && period > MCE_MAX)
+ {
+ /* limit: Poll at least every 30s */
+ period = MCE_MAX;
+ }
+ if (variable_period && period < MCE_MIN)
+ {
+ /* limit: Poll every 2s.
+ * When this is reached an uncorrectable error
+ * is expected to happen, if Dom0 does nothing.
+ */
+ period = MCE_MIN;
+ }
+
+ set_timer(&mce_timer, NOW() + period);
+ adjust = 0;
}
void __init amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c)
{
- if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
- return;
-
- /* Assume we are on K8 or newer AMD or Hygon CPU here */
-
- /* The threshold bitfields in MSR_IA32_MC4_MISC has
- * been introduced along with the SVME feature bit. */
- if (variable_period && cpu_has(c, X86_FEATURE_SVM)) {
- uint64_t value;
-
- /* hw threshold registers present */
- hw_threshold = 1;
- rdmsrl(MSR_IA32_MCx_MISC(4), value);
-
- if (value & (1ULL << 61)) { /* Locked bit */
- /* Locked by BIOS. Not available for use */
- hw_threshold = 0;
- }
- if (!(value & (1ULL << 63))) { /* Valid bit */
- /* No CtrP present */
- hw_threshold = 0;
- } else {
- if (!(value & (1ULL << 62))) { /* Counter Bit */
- /* No counter field present */
- hw_threshold = 0;
- }
- }
-
- if (hw_threshold) {
- /* No interrupt, reset counter value */
- value &= ~(0x60FFF00000000ULL);
- /* Counter enable */
- value |= (1ULL << 51);
- wrmsrl(MSR_IA32_MCx_MISC(4), value);
- printk(XENLOG_INFO "MCA: Use hw thresholding to adjust polling frequency\n");
- }
- }
-
- init_timer(&mce_timer, mce_amd_work_fn, NULL, 0);
- set_timer(&mce_timer, NOW() + period);
+ if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+ return;
+
+ /* Assume we are on K8 or newer AMD or Hygon CPU here */
+
+ /* The threshold bitfields in MSR_IA32_MC4_MISC has
+ * been introduced along with the SVME feature bit. */
+ if (variable_period && cpu_has(c, X86_FEATURE_SVM))
+ {
+ uint64_t value;
+
+ /* hw threshold registers present */
+ hw_threshold = 1;
+ rdmsrl(MSR_IA32_MCx_MISC(4), value);
+
+ if (value & (1ULL << 61)) /* Locked bit */
+ {
+ /* Locked by BIOS. Not available for use */
+ hw_threshold = 0;
+ }
+ if (!(value & (1ULL << 63))) /* Valid bit */
+ {
+ /* No CtrP present */
+ hw_threshold = 0;
+ }
+ else
+ {
+ if (!(value & (1ULL << 62))) /* Counter Bit */
+ {
+ /* No counter field present */
+ hw_threshold = 0;
+ }
+ }
+
+ if (hw_threshold)
+ {
+ /* No interrupt, reset counter value */
+ value &= ~(0x60FFF00000000ULL);
+ /* Counter enable */
+ value |= (1ULL << 51);
+ wrmsrl(MSR_IA32_MCx_MISC(4), value);
+ printk(XENLOG_INFO "MCA: Use hw thresholding to adjust polling frequency\n");
+ }
+ }
+
+ init_timer(&mce_timer, mce_amd_work_fn, NULL, 0);
+ set_timer(&mce_timer, NOW() + period);
}
diff --git a/xen/arch/x86/cpu/mcheck/barrier.c b/xen/arch/x86/cpu/mcheck/barrier.c
index a7e5b19a44..11a45dfd51 100644
--- a/xen/arch/x86/cpu/mcheck/barrier.c
+++ b/xen/arch/x86/cpu/mcheck/barrier.c
@@ -29,8 +29,8 @@ void mce_barrier_enter(struct mce_softirq_barrier *bar, bool wait)
while ( atomic_read(&bar->val) != num_online_cpus() &&
atomic_read(&bar->outgen) == gen )
{
- smp_mb();
- mce_panic_check();
+ smp_mb();
+ mce_panic_check();
}
}
@@ -47,8 +47,8 @@ void mce_barrier_exit(struct mce_softirq_barrier *bar, bool wait)
while ( atomic_read(&bar->val) != 0 &&
atomic_read(&bar->ingen) == gen )
{
- smp_mb();
- mce_panic_check();
+ smp_mb();
+ mce_panic_check();
}
}
diff --git a/xen/arch/x86/cpu/mcheck/barrier.h b/xen/arch/x86/cpu/mcheck/barrier.h
index c4d52b6192..5f05a8cc4e 100644
--- a/xen/arch/x86/cpu/mcheck/barrier.h
+++ b/xen/arch/x86/cpu/mcheck/barrier.h
@@ -4,7 +4,8 @@
#include <asm/atomic.h>
/* MCE handling */
-struct mce_softirq_barrier {
+struct mce_softirq_barrier
+{
atomic_t val;
atomic_t ingen;
atomic_t outgen;
diff --git a/xen/arch/x86/cpu/mcheck/mce-apei.c b/xen/arch/x86/cpu/mcheck/mce-apei.c
index 53b6735896..1326e36113 100644
--- a/xen/arch/x86/cpu/mcheck/mce-apei.c
+++ b/xen/arch/x86/cpu/mcheck/mce-apei.c
@@ -47,83 +47,85 @@
* CPER specification (in UEFI specification 2.3 appendix N) requires
* byte-packed.
*/
-struct __packed cper_mce_record {
- struct cper_record_header hdr;
- struct cper_section_descriptor sec_hdr;
- struct mce mce;
+struct __packed cper_mce_record
+{
+ struct cper_record_header hdr;
+ struct cper_section_descriptor sec_hdr;
+ struct mce mce;
};
int apei_write_mce(struct mce *m)
{
- struct cper_mce_record rcd;
-
- if (!m)
- return -EINVAL;
-
- memset(&rcd, 0, sizeof(rcd));
- memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
- rcd.hdr.revision = CPER_RECORD_REV;
- rcd.hdr.signature_end = CPER_SIG_END;
- rcd.hdr.section_count = 1;
- rcd.hdr.error_severity = CPER_SER_FATAL;
- /* timestamp, platform_id, partition_id are all invalid */
- rcd.hdr.validation_bits = 0;
- rcd.hdr.record_length = sizeof(rcd);
- rcd.hdr.creator_id = CPER_CREATOR_MCE;
- rcd.hdr.notification_type = CPER_NOTIFY_MCE;
- rcd.hdr.record_id = cper_next_record_id();
- rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
-
- rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd;
- rcd.sec_hdr.section_length = sizeof(rcd.mce);
- rcd.sec_hdr.revision = CPER_SEC_REV;
- /* fru_id and fru_text is invalid */
- rcd.sec_hdr.validation_bits = 0;
- rcd.sec_hdr.flags = CPER_SEC_PRIMARY;
- rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
- rcd.sec_hdr.section_severity = CPER_SER_FATAL;
-
- memcpy(&rcd.mce, m, sizeof(*m));
-
- return erst_write(&rcd.hdr);
+ struct cper_mce_record rcd;
+
+ if (!m)
+ return -EINVAL;
+
+ memset(&rcd, 0, sizeof(rcd));
+ memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
+ rcd.hdr.revision = CPER_RECORD_REV;
+ rcd.hdr.signature_end = CPER_SIG_END;
+ rcd.hdr.section_count = 1;
+ rcd.hdr.error_severity = CPER_SER_FATAL;
+ /* timestamp, platform_id, partition_id are all invalid */
+ rcd.hdr.validation_bits = 0;
+ rcd.hdr.record_length = sizeof(rcd);
+ rcd.hdr.creator_id = CPER_CREATOR_MCE;
+ rcd.hdr.notification_type = CPER_NOTIFY_MCE;
+ rcd.hdr.record_id = cper_next_record_id();
+ rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
+
+ rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd;
+ rcd.sec_hdr.section_length = sizeof(rcd.mce);
+ rcd.sec_hdr.revision = CPER_SEC_REV;
+ /* fru_id and fru_text is invalid */
+ rcd.sec_hdr.validation_bits = 0;
+ rcd.sec_hdr.flags = CPER_SEC_PRIMARY;
+ rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
+ rcd.sec_hdr.section_severity = CPER_SER_FATAL;
+
+ memcpy(&rcd.mce, m, sizeof(*m));
+
+ return erst_write(&rcd.hdr);
}
#ifndef NDEBUG /* currently dead code */
ssize_t apei_read_mce(struct mce *m, u64 *record_id)
{
- struct cper_mce_record rcd;
- ssize_t len;
-
- if (!m || !record_id)
- return -EINVAL;
-
- len = erst_read_next(&rcd.hdr, sizeof(rcd));
- if (len <= 0)
- return len;
- /* Can not skip other records in storage via ERST unless clear them */
- else if (len != sizeof(rcd) ||
- uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) {
- printk(KERN_WARNING
- "MCE-APEI: Can not skip the unknown record in ERST");
- return -EIO;
- }
-
- memcpy(m, &rcd.mce, sizeof(*m));
- *record_id = rcd.hdr.record_id;
-
- return sizeof(*m);
+ struct cper_mce_record rcd;
+ ssize_t len;
+
+ if (!m || !record_id)
+ return -EINVAL;
+
+ len = erst_read_next(&rcd.hdr, sizeof(rcd));
+ if (len <= 0)
+ return len;
+ /* Can not skip other records in storage via ERST unless clear them */
+ else if (len != sizeof(rcd) ||
+ uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE))
+ {
+ printk(KERN_WARNING
+ "MCE-APEI: Can not skip the unknown record in ERST");
+ return -EIO;
+ }
+
+ memcpy(m, &rcd.mce, sizeof(*m));
+ *record_id = rcd.hdr.record_id;
+
+ return sizeof(*m);
}
/* Check whether there is record in ERST */
bool apei_check_mce(void)
{
- return erst_get_record_count() > 0;
+ return erst_get_record_count() > 0;
}
int apei_clear_mce(u64 record_id)
{
- return erst_clear(record_id);
+ return erst_clear(record_id);
}
#endif /* currently dead code */
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 2a9747ed19..5b66db4ee1 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -604,7 +604,8 @@ int show_mca_info(int inited, struct cpuinfo_x86 *c)
if ( inited != g_type )
{
char prefix[20];
- static const char *const type_str[] = {
+ static const char *const type_str[] =
+ {
[mcheck_amd_famXX] = "AMD",
[mcheck_amd_k8] = "AMD K8",
[mcheck_intel] = "Intel"
@@ -755,7 +756,8 @@ static int cpu_callback(
return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block cpu_nfb =
+{
.notifier_call = cpu_callback
};
@@ -829,7 +831,7 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp)
set_poll_bankmask(c);
return;
- out:
+out:
if ( bsp )
{
cpu_bank_free(smp_processor_id());
@@ -929,7 +931,8 @@ void x86_mcinfo_dump(struct mc_info *mi)
/* then the bank information */
x86_mcinfo_lookup(mic, mi, MC_TYPE_BANK); /* finds the first entry */
- do {
+ do
+ {
if ( mic == NULL )
return;
if ( mic->type != MC_TYPE_BANK )
@@ -949,7 +952,7 @@ void x86_mcinfo_dump(struct mc_info *mi)
if ( is_mc_panic )
x86_mcinfo_apei_save(mc_global, mc_bank);
- next:
+next:
mic = x86_mcinfo_next(mic); /* next entry */
if ( (mic == NULL) || (mic->size == 0) )
break;
@@ -1053,7 +1056,8 @@ void x86_mc_get_cpu_info(unsigned cpu, uint32_t *chipid, uint16_t *coreid,
#define INTPOSE_NENT 50
-static struct intpose_ent {
+static struct intpose_ent
+{
unsigned int cpu_nr;
uint64_t msr;
uint64_t val;
@@ -1335,11 +1339,13 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc)
long ret = 0;
struct xen_mc curop, *op = &curop;
struct vcpu *v = current;
- union {
+ union
+ {
struct xen_mc_fetch *nat;
struct compat_mc_fetch *cmp;
} mc_fetch;
- union {
+ union
+ {
struct xen_mc_physcpuinfo *nat;
struct compat_mc_physcpuinfo *cmp;
} mc_physcpuinfo;
@@ -1590,12 +1596,12 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc)
}
for_each_cpu(target, cpumap)
- if ( cpu_online(target) && !per_cpu(nr_mce_banks, target) )
- {
- ret = x86_mcerr("do_mca #MC: CPU%u has no banks",
- -ENOENT, target);
- break;
- }
+ if ( cpu_online(target) && !per_cpu(nr_mce_banks, target) )
+ {
+ ret = x86_mcerr("do_mca #MC: CPU%u has no banks",
+ -ENOENT, target);
+ break;
+ }
if ( ret )
break;
@@ -1676,8 +1682,8 @@ static void mc_panic_dump(void)
dprintk(XENLOG_ERR, "Begin dump mc_info\n");
for_each_online_cpu(cpu)
- mctelem_process_deferred(cpu, x86_mcinfo_dump_panic,
- mctelem_has_deferred_lmce(cpu));
+ mctelem_process_deferred(cpu, x86_mcinfo_dump_panic,
+ mctelem_has_deferred_lmce(cpu));
dprintk(XENLOG_ERR, "End dump mc_info, %x mcinfo dumped\n", mcinfo_dumpped);
}
@@ -1750,9 +1756,7 @@ static enum mce_result mce_action(const struct cpu_user_regs *regs,
mic = x86_mcinfo_next(mic) )
{
if ( mic->type != MC_TYPE_BANK )
- {
continue;
- }
binfo.mib = (struct mcinfo_bank *)mic;
binfo.bank = binfo.mib->mc_bank;
bank_result = MCER_NOERROR;
@@ -1860,7 +1864,7 @@ static void mce_softirq(void)
mctelem_process_deferred(cpu, mce_delayed_action, true);
else
for_each_online_cpu(workcpu)
- mctelem_process_deferred(workcpu, mce_delayed_action, false);
+ mctelem_process_deferred(workcpu, mce_delayed_action, false);
/* Step2: Send Log to DOM0 through vIRQ */
if ( dom0_vmce_enabled() )
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index f2cebccdd1..b329277b98 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -30,7 +30,8 @@ extern int mce_verbosity;
printk(s, ##a); \
} while (0)
-enum mcheck_type {
+enum mcheck_type
+{
mcheck_unset = -1,
mcheck_none,
mcheck_amd_famXX,
@@ -75,7 +76,7 @@ extern void mce_recoverable_register(mce_recoverable_t);
/* Read an MSR, checking for an interposed value first */
extern struct intpose_ent *intpose_lookup(unsigned int, uint64_t,
- uint64_t *);
+ uint64_t *);
extern bool intpose_inval(unsigned int, uint64_t);
static inline uint64_t mca_rdmsr(unsigned int msr)
@@ -102,14 +103,16 @@ static inline uint64_t mca_rdmsr(unsigned int msr)
* of the MCA data observed in the logout operation.
*/
-enum mca_source {
+enum mca_source
+{
MCA_POLLER,
MCA_CMCI_HANDLER,
MCA_RESET,
MCA_MCE_SCAN
};
-struct mca_summary {
+struct mca_summary
+{
uint32_t errcnt; /* number of banks with valid errors */
int ripv; /* meaningful on #MC */
int eipv; /* meaningful on #MC */
@@ -129,7 +132,7 @@ extern bool mce_broadcast;
extern void mcheck_mca_clearbanks(struct mca_banks *);
extern mctelem_cookie_t mcheck_mca_logout(enum mca_source, struct mca_banks *,
- struct mca_summary *, struct mca_banks *);
+ struct mca_summary *, struct mca_banks *);
/*
* Register callbacks to be made during bank telemetry logout.
@@ -152,7 +155,7 @@ extern void mce_need_clearbank_register(mce_need_clearbank_t);
* MCi_STATUS value for that bank.
*/
typedef struct mcinfo_extended *(*x86_mce_callback_t)
- (struct mc_info *, uint16_t, uint64_t);
+(struct mc_info *, uint16_t, uint64_t);
extern void x86_mce_callback_register(x86_mce_callback_t);
void *x86_mcinfo_reserve(struct mc_info *mi,
@@ -161,7 +164,8 @@ void x86_mcinfo_dump(struct mc_info *mi);
static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr)
{
- switch (boot_cpu_data.x86_vendor) {
+ switch (boot_cpu_data.x86_vendor)
+ {
case X86_VENDOR_INTEL:
if (msr >= MSR_IA32_MC0_CTL2 &&
msr < MSR_IA32_MCx_CTL2(v->arch.vmce.mcg_cap & MCG_CAP_COUNT) )
@@ -169,7 +173,8 @@ static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr)
break;
case X86_VENDOR_AMD:
- switch (msr) {
+ switch (msr)
+ {
case MSR_F10_MC4_MISC1:
case MSR_F10_MC4_MISC2:
case MSR_F10_MC4_MISC3:
@@ -183,7 +188,7 @@ static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr)
static inline int mce_bank_msr(const struct vcpu *v, uint32_t msr)
{
if ( (msr >= MSR_IA32_MC0_CTL &&
- msr < MSR_IA32_MCx_CTL(v->arch.vmce.mcg_cap & MCG_CAP_COUNT)) ||
+ msr < MSR_IA32_MCx_CTL(v->arch.vmce.mcg_cap & MCG_CAP_COUNT)) ||
mce_vendor_bank_msr(v, msr) )
return 1;
return 0;
@@ -198,7 +203,8 @@ extern unsigned int mce_dhandler_num;
extern unsigned int mce_uhandler_num;
/* Fields are zero when not available */
-struct mce {
+struct mce
+{
uint64_t status;
uint64_t misc;
uint64_t addr;
diff --git a/xen/arch/x86/cpu/mcheck/mce_amd.c b/xen/arch/x86/cpu/mcheck/mce_amd.c
index 94a5ba4561..63638d0e97 100644
--- a/xen/arch/x86/cpu/mcheck/mce_amd.c
+++ b/xen/arch/x86/cpu/mcheck/mce_amd.c
@@ -24,7 +24,7 @@
* Issue Date: October 2013
*
* URL:
- * http://support.amd.com/TechDocs/24593.pdf
+ * http://support.amd.com/TechDocs/24593.pdf
*/
/* The related documentation for K8 Revisions A - E is:
@@ -35,7 +35,7 @@
* Issue Date: February 2006
*
* URL:
- * http://support.amd.com/TechDocs/26094.PDF
+ * http://support.amd.com/TechDocs/26094.PDF
*/
/* The related documentation for K8 Revisions F - G is:
@@ -46,7 +46,7 @@
* Issue Date: July 2007
*
* URL:
- * http://support.amd.com/TechDocs/32559.pdf
+ * http://support.amd.com/TechDocs/32559.pdf
*/
/* Family10 MCA documentation published at
@@ -57,7 +57,7 @@
* Isse Date: January 11, 2013
*
* URL:
- * http://support.amd.com/TechDocs/31116.pdf
+ * http://support.amd.com/TechDocs/31116.pdf
*/
#include <xen/init.h>
@@ -74,26 +74,34 @@
#define ANY (~0U)
-enum mcequirk_amd_flags {
+enum mcequirk_amd_flags
+{
MCEQUIRK_NONE,
MCEQUIRK_K8_GART,
MCEQUIRK_F10_GART,
};
-static const struct mce_quirkdata {
+static const struct mce_quirkdata
+{
unsigned int cpu_family;
unsigned int cpu_model;
unsigned int cpu_stepping;
enum mcequirk_amd_flags quirk;
-} mce_amd_quirks[] = {
- { 0xf /* cpu family */, ANY /* all models */, ANY /* all steppings */,
- MCEQUIRK_K8_GART },
- { 0x10 /* cpu family */, ANY /* all models */, ANY /* all steppings */,
- MCEQUIRK_F10_GART },
+} mce_amd_quirks[] =
+{
+ {
+ 0xf /* cpu family */, ANY /* all models */, ANY /* all steppings */,
+ MCEQUIRK_K8_GART
+ },
+ {
+ 0x10 /* cpu family */, ANY /* all models */, ANY /* all steppings */,
+ MCEQUIRK_F10_GART
+ },
};
/* Error Code Types */
-enum mc_ec_type {
+enum mc_ec_type
+{
MC_EC_TLB_TYPE = 0x0010,
MC_EC_MEM_TYPE = 0x0100,
MC_EC_BUS_TYPE = 0x0800,
@@ -183,7 +191,7 @@ mcequirk_lookup_amd_quirkdata(const struct cpuinfo_x86 *c)
continue;
if ( (mce_amd_quirks[i].cpu_stepping != ANY) &&
(mce_amd_quirks[i].cpu_stepping != c->x86_mask) )
- continue;
+ continue;
return mce_amd_quirks[i].quirk;
}
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 5a10744ade..cfb3807d5e 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -78,7 +78,8 @@ static void intel_thermal_interrupt(struct cpu_user_regs *regs)
printk(KERN_EMERG "CPU%u: Temperature above threshold\n", cpu);
printk(KERN_EMERG "CPU%u: Running in modulated clock mode\n", cpu);
add_taint(TAINT_MACHINE_CHECK);
- } else
+ }
+ else
printk(KERN_INFO "CPU%u: Temperature/speed normal\n", cpu);
}
@@ -254,7 +255,8 @@ static enum intel_mce_type intel_check_mce_type(uint64_t status)
return intel_mce_fatal;
else
return intel_mce_ucr_srar;
- } else
+ }
+ else
return intel_mce_ucr_srao;
}
else
@@ -265,9 +267,9 @@ static enum intel_mce_type intel_check_mce_type(uint64_t status)
}
static void intel_memerr_dhandler(
- struct mca_binfo *binfo,
- enum mce_result *result,
- const struct cpu_user_regs *regs)
+ struct mca_binfo *binfo,
+ enum mce_result *result,
+ const struct cpu_user_regs *regs)
{
mce_printk(MCE_VERBOSE, "MCE: Enter UCR recovery action\n");
mc_memerr_dhandler(binfo, result, regs);
@@ -290,9 +292,9 @@ static bool intel_checkaddr(uint64_t status, uint64_t misc, int addrtype)
}
static void intel_srar_dhandler(
- struct mca_binfo *binfo,
- enum mce_result *result,
- const struct cpu_user_regs *regs)
+ struct mca_binfo *binfo,
+ enum mce_result *result,
+ const struct cpu_user_regs *regs)
{
uint64_t status = binfo->mib->mc_status;
@@ -314,9 +316,9 @@ static bool intel_srao_check(uint64_t status)
}
static void intel_srao_dhandler(
- struct mca_binfo *binfo,
- enum mce_result *result,
- const struct cpu_user_regs *regs)
+ struct mca_binfo *binfo,
+ enum mce_result *result,
+ const struct cpu_user_regs *regs)
{
uint64_t status = binfo->mib->mc_status;
@@ -341,9 +343,9 @@ static bool intel_default_check(uint64_t status)
}
static void intel_default_mce_dhandler(
- struct mca_binfo *binfo,
- enum mce_result *result,
- const struct cpu_user_regs * regs)
+ struct mca_binfo *binfo,
+ enum mce_result *result,
+ const struct cpu_user_regs *regs)
{
uint64_t status = binfo->mib->mc_status;
enum intel_mce_type type;
@@ -356,16 +358,17 @@ static void intel_default_mce_dhandler(
*result = MCER_CONTINUE;
}
-static const struct mca_error_handler intel_mce_dhandlers[] = {
+static const struct mca_error_handler intel_mce_dhandlers[] =
+{
{intel_srao_check, intel_srao_dhandler},
{intel_srar_check, intel_srar_dhandler},
{intel_default_check, intel_default_mce_dhandler}
};
static void intel_default_mce_uhandler(
- struct mca_binfo *binfo,
- enum mce_result *result,
- const struct cpu_user_regs *regs)
+ struct mca_binfo *binfo,
+ enum mce_result *result,
+ const struct cpu_user_regs *regs)
{
uint64_t status = binfo->mib->mc_status;
enum intel_mce_type type;
@@ -384,7 +387,8 @@ static void intel_default_mce_uhandler(
}
}
-static const struct mca_error_handler intel_mce_uhandlers[] = {
+static const struct mca_error_handler intel_mce_uhandlers[] =
+{
{intel_default_check, intel_default_mce_uhandler}
};
@@ -415,7 +419,8 @@ static bool intel_need_clearbank_scan(enum mca_source who, u64 status)
&& !(status & MCi_STATUS_AR) )
return true;
/* Only Log, no clear */
- else return false;
+ else
+ return false;
}
else if ( who == MCA_MCE_SCAN )
{
@@ -558,7 +563,7 @@ static void cmci_discover(void)
*/
mctc = mcheck_mca_logout(
- MCA_CMCI_HANDLER, per_cpu(mce_banks_owned, cpu), &bs, NULL);
+ MCA_CMCI_HANDLER, per_cpu(mce_banks_owned, cpu), &bs, NULL);
if ( bs.errcnt && mctc != NULL )
{
@@ -649,7 +654,7 @@ static void cmci_interrupt(struct cpu_user_regs *regs)
ack_APIC_irq();
mctc = mcheck_mca_logout(
- MCA_CMCI_HANDLER, this_cpu(mce_banks_owned), &bs, NULL);
+ MCA_CMCI_HANDLER, this_cpu(mce_banks_owned), &bs, NULL);
if ( bs.errcnt && mctc != NULL )
{
@@ -879,7 +884,7 @@ static int cpu_mcabank_alloc(unsigned int cpu)
per_cpu(last_state, cpu) = -1;
return 0;
- out:
+out:
mcabanks_free(cmci);
mcabanks_free(owned);
return -ENOMEM;
@@ -911,7 +916,8 @@ static int cpu_callback(
return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block cpu_nfb =
+{
.notifier_call = cpu_callback
};
diff --git a/xen/arch/x86/cpu/mcheck/mctelem.c b/xen/arch/x86/cpu/mcheck/mctelem.c
index 3bb13e5265..ec909d623d 100644
--- a/xen/arch/x86/cpu/mcheck/mctelem.c
+++ b/xen/arch/x86/cpu/mcheck/mctelem.c
@@ -28,12 +28,13 @@
#include "mce.h"
-struct mctelem_ent {
- struct mctelem_ent *mcte_next; /* next in chronological order */
- struct mctelem_ent *mcte_prev; /* previous in chronological order */
- uint32_t mcte_flags; /* See MCTE_F_* below */
- uint32_t mcte_refcnt; /* Reference count */
- void *mcte_data; /* corresponding data payload */
+struct mctelem_ent
+{
+ struct mctelem_ent *mcte_next; /* next in chronological order */
+ struct mctelem_ent *mcte_prev; /* previous in chronological order */
+ uint32_t mcte_flags; /* See MCTE_F_* below */
+ uint32_t mcte_refcnt; /* Reference count */
+ void *mcte_data; /* corresponding data payload */
};
#define MCTE_F_CLASS_URGENT 0x0001U /* in use - urgent errors */
@@ -70,62 +71,64 @@ struct mctelem_ent {
#define COOKIE2MCTE(c) ((struct mctelem_ent *)(c))
#define MCTE2COOKIE(tep) ((mctelem_cookie_t)(tep))
-static struct mc_telem_ctl {
- /* Linked lists that thread the array members together.
- *
- * The free lists is a bit array where bit 1 means free.
- * This as element number is quite small and is easy to
- * atomically allocate that way.
- *
- * The committed list grows at the head and we do not maintain a
- * tail pointer; insertions are performed atomically. The head
- * thus has the most-recently committed telemetry, i.e. the
- * list is in reverse chronological order. The committed list
- * is singly-linked via mcte_prev pointers, and mcte_next is NULL.
- * When we move telemetry from the committed list to the processing
- * list we atomically unlink the committed list and keep a pointer
- * to the head of that list; we then traverse the list following
- * mcte_prev and fill in mcte_next to doubly-link the list, and then
- * append the tail of the list onto the processing list. If we panic
- * during this manipulation of the committed list we still have
- * the pointer to its head so we can recover all entries during
- * the panic flow (albeit in reverse chronological order).
- *
- * The processing list is updated in a controlled context, and
- * we can lock it for updates. The head of the processing list
- * always has the oldest telemetry, and we append (as above)
- * at the tail of the processing list. */
- DECLARE_BITMAP(mctc_free, MC_NENT);
- struct mctelem_ent *mctc_committed[MC_NCLASSES];
- struct mctelem_ent *mctc_processing_head[MC_NCLASSES];
- struct mctelem_ent *mctc_processing_tail[MC_NCLASSES];
- /*
- * Telemetry array
- */
- struct mctelem_ent *mctc_elems;
+static struct mc_telem_ctl
+{
+ /* Linked lists that thread the array members together.
+ *
+ * The free lists is a bit array where bit 1 means free.
+ * This as element number is quite small and is easy to
+ * atomically allocate that way.
+ *
+ * The committed list grows at the head and we do not maintain a
+ * tail pointer; insertions are performed atomically. The head
+ * thus has the most-recently committed telemetry, i.e. the
+ * list is in reverse chronological order. The committed list
+ * is singly-linked via mcte_prev pointers, and mcte_next is NULL.
+ * When we move telemetry from the committed list to the processing
+ * list we atomically unlink the committed list and keep a pointer
+ * to the head of that list; we then traverse the list following
+ * mcte_prev and fill in mcte_next to doubly-link the list, and then
+ * append the tail of the list onto the processing list. If we panic
+ * during this manipulation of the committed list we still have
+ * the pointer to its head so we can recover all entries during
+ * the panic flow (albeit in reverse chronological order).
+ *
+ * The processing list is updated in a controlled context, and
+ * we can lock it for updates. The head of the processing list
+ * always has the oldest telemetry, and we append (as above)
+ * at the tail of the processing list. */
+ DECLARE_BITMAP(mctc_free, MC_NENT);
+ struct mctelem_ent *mctc_committed[MC_NCLASSES];
+ struct mctelem_ent *mctc_processing_head[MC_NCLASSES];
+ struct mctelem_ent *mctc_processing_tail[MC_NCLASSES];
+ /*
+ * Telemetry array
+ */
+ struct mctelem_ent *mctc_elems;
} mctctl;
-struct mc_telem_cpu_ctl {
- /*
- * Per-CPU processing lists, used for deferred (softirq)
- * processing of telemetry.
- *
- * The two pending lists @lmce_pending and @pending grow at
- * the head in the reverse chronological order.
- *
- * @pending and @lmce_pending on the same CPU are mutually
- * exclusive, i.e. deferred MCE on a CPU are either all in
- * @lmce_pending or all in @pending. In the former case, all
- * deferred MCE are LMCE. In the latter case, both LMCE and
- * non-local MCE can be in @pending, and @pending contains at
- * least one non-local MCE if it's not empty.
- *
- * Changes to @pending and @lmce_pending should be performed
- * via mctelem_process_deferred() and mctelem_defer(), in order
- * to guarantee the above mutual exclusivity.
- */
- struct mctelem_ent *pending, *lmce_pending;
- struct mctelem_ent *processing;
+struct mc_telem_cpu_ctl
+{
+ /*
+ * Per-CPU processing lists, used for deferred (softirq)
+ * processing of telemetry.
+ *
+ * The two pending lists @lmce_pending and @pending grow at
+ * the head in the reverse chronological order.
+ *
+ * @pending and @lmce_pending on the same CPU are mutually
+ * exclusive, i.e. deferred MCE on a CPU are either all in
+ * @lmce_pending or all in @pending. In the former case, all
+ * deferred MCE are LMCE. In the latter case, both LMCE and
+ * non-local MCE can be in @pending, and @pending contains at
+ * least one non-local MCE if it's not empty.
+ *
+ * Changes to @pending and @lmce_pending should be performed
+ * via mctelem_process_deferred() and mctelem_defer(), in order
+ * to guarantee the above mutual exclusivity.
+ */
+ struct mctelem_ent *pending, *lmce_pending;
+ struct mctelem_ent *processing;
};
static DEFINE_PER_CPU(struct mc_telem_cpu_ctl, mctctl);
@@ -133,16 +136,17 @@ static DEFINE_PER_CPU(struct mc_telem_cpu_ctl, mctctl);
static DEFINE_SPINLOCK(processing_lock);
static void mctelem_xchg_head(struct mctelem_ent **headp,
- struct mctelem_ent **linkp,
- struct mctelem_ent *new)
+ struct mctelem_ent **linkp,
+ struct mctelem_ent *new)
{
- for (;;) {
- struct mctelem_ent *old;
-
- *linkp = old = *headp;
- if (cmpxchgptr(headp, old, new) == old)
- break;
- }
+ for (;;)
+ {
+ struct mctelem_ent *old;
+
+ *linkp = old = *headp;
+ if (cmpxchgptr(headp, old, new) == old)
+ break;
+ }
}
/**
@@ -170,30 +174,31 @@ static void mctelem_xchg_head(struct mctelem_ent **headp,
*/
void mctelem_defer(mctelem_cookie_t cookie, bool lmce)
{
- struct mctelem_ent *tep = COOKIE2MCTE(cookie);
- struct mc_telem_cpu_ctl *mctctl = &this_cpu(mctctl);
-
- ASSERT(mctctl->pending == NULL || mctctl->lmce_pending == NULL);
-
- if (mctctl->pending)
- mctelem_xchg_head(&mctctl->pending, &tep->mcte_next, tep);
- else if (lmce)
- mctelem_xchg_head(&mctctl->lmce_pending, &tep->mcte_next, tep);
- else {
- /*
- * LMCE is supported on Skylake-server and later CPUs, on
- * which mce_broadcast is always true. Therefore, non-empty
- * mctctl->lmce_pending in this branch implies a broadcasting
- * MC# is being handled, every CPU is in the exception
- * context, and no one is consuming mctctl->pending at this
- * moment. As a result, the following two exchanges together
- * can be treated as atomic.
- */
- if (mctctl->lmce_pending)
- mctelem_xchg_head(&mctctl->lmce_pending,
- &mctctl->pending, NULL);
- mctelem_xchg_head(&mctctl->pending, &tep->mcte_next, tep);
- }
+ struct mctelem_ent *tep = COOKIE2MCTE(cookie);
+ struct mc_telem_cpu_ctl *mctctl = &this_cpu(mctctl);
+
+ ASSERT(mctctl->pending == NULL || mctctl->lmce_pending == NULL);
+
+ if (mctctl->pending)
+ mctelem_xchg_head(&mctctl->pending, &tep->mcte_next, tep);
+ else if (lmce)
+ mctelem_xchg_head(&mctctl->lmce_pending, &tep->mcte_next, tep);
+ else
+ {
+ /*
+ * LMCE is supported on Skylake-server and later CPUs, on
+ * which mce_broadcast is always true. Therefore, non-empty
+ * mctctl->lmce_pending in this branch implies a broadcasting
+ * MC# is being handled, every CPU is in the exception
+ * context, and no one is consuming mctctl->pending at this
+ * moment. As a result, the following two exchanges together
+ * can be treated as atomic.
+ */
+ if (mctctl->lmce_pending)
+ mctelem_xchg_head(&mctctl->lmce_pending,
+ &mctctl->pending, NULL);
+ mctelem_xchg_head(&mctctl->pending, &tep->mcte_next, tep);
+ }
}
/**
@@ -211,77 +216,79 @@ void mctelem_defer(mctelem_cookie_t cookie, bool lmce)
* @lmce: indicate which pending list on @cpu is handled
*/
void mctelem_process_deferred(unsigned int cpu,
- int (*fn)(mctelem_cookie_t),
- bool lmce)
+ int (*fn)(mctelem_cookie_t),
+ bool lmce)
{
- struct mctelem_ent *tep;
- struct mctelem_ent *head, *prev;
- struct mc_telem_cpu_ctl *mctctl = &per_cpu(mctctl, cpu);
- int ret;
-
- /*
- * First, unhook the list of telemetry structures, and
- * hook it up to the processing list head for this CPU.
- *
- * If @lmce is true and a non-local MC# occurs before the
- * following atomic exchange, @lmce will not hold after
- * resumption, because all telemetries in @lmce_pending on
- * @cpu are moved to @pending on @cpu in mcheck_cmn_handler().
- * In such a case, no telemetries will be handled in this
- * function after resumption. Another round of MCE softirq,
- * which was raised by above mcheck_cmn_handler(), will handle
- * those moved telemetries in @pending on @cpu.
- *
- * Any MC# occurring after the following atomic exchange will be
- * handled by another round of MCE softirq.
- */
- mctelem_xchg_head(lmce ? &mctctl->lmce_pending : &mctctl->pending,
- &this_cpu(mctctl.processing), NULL);
-
- head = this_cpu(mctctl.processing);
-
- /*
- * Then, fix up the list to include prev pointers, to make
- * things a little easier, as the list must be traversed in
- * chronological order, which is backward from the order they
- * are in.
- */
- for (tep = head, prev = NULL; tep != NULL; tep = tep->mcte_next) {
- tep->mcte_prev = prev;
- prev = tep;
- }
-
- /*
- * Now walk the list of telemetry structures, handling each
- * one of them. Unhooking the structure here does not need to
- * be atomic, as this list is only accessed from a softirq
- * context; the MCE handler does not touch it.
- */
- for (tep = prev; tep != NULL; tep = prev) {
- prev = tep->mcte_prev;
- tep->mcte_next = tep->mcte_prev = NULL;
-
- ret = fn(MCTE2COOKIE(tep));
- if (prev != NULL)
- prev->mcte_next = NULL;
- tep->mcte_prev = tep->mcte_next = NULL;
- if (ret != 0)
- mctelem_commit(MCTE2COOKIE(tep));
- else
- mctelem_dismiss(MCTE2COOKIE(tep));
- }
+ struct mctelem_ent *tep;
+ struct mctelem_ent *head, *prev;
+ struct mc_telem_cpu_ctl *mctctl = &per_cpu(mctctl, cpu);
+ int ret;
+
+ /*
+ * First, unhook the list of telemetry structures, and
+ * hook it up to the processing list head for this CPU.
+ *
+ * If @lmce is true and a non-local MC# occurs before the
+ * following atomic exchange, @lmce will not hold after
+ * resumption, because all telemetries in @lmce_pending on
+ * @cpu are moved to @pending on @cpu in mcheck_cmn_handler().
+ * In such a case, no telemetries will be handled in this
+ * function after resumption. Another round of MCE softirq,
+ * which was raised by above mcheck_cmn_handler(), will handle
+ * those moved telemetries in @pending on @cpu.
+ *
+ * Any MC# occurring after the following atomic exchange will be
+ * handled by another round of MCE softirq.
+ */
+ mctelem_xchg_head(lmce ? &mctctl->lmce_pending : &mctctl->pending,
+ &this_cpu(mctctl.processing), NULL);
+
+ head = this_cpu(mctctl.processing);
+
+ /*
+ * Then, fix up the list to include prev pointers, to make
+ * things a little easier, as the list must be traversed in
+ * chronological order, which is backward from the order they
+ * are in.
+ */
+ for (tep = head, prev = NULL; tep != NULL; tep = tep->mcte_next)
+ {
+ tep->mcte_prev = prev;
+ prev = tep;
+ }
+
+ /*
+ * Now walk the list of telemetry structures, handling each
+ * one of them. Unhooking the structure here does not need to
+ * be atomic, as this list is only accessed from a softirq
+ * context; the MCE handler does not touch it.
+ */
+ for (tep = prev; tep != NULL; tep = prev)
+ {
+ prev = tep->mcte_prev;
+ tep->mcte_next = tep->mcte_prev = NULL;
+
+ ret = fn(MCTE2COOKIE(tep));
+ if (prev != NULL)
+ prev->mcte_next = NULL;
+ tep->mcte_prev = tep->mcte_next = NULL;
+ if (ret != 0)
+ mctelem_commit(MCTE2COOKIE(tep));
+ else
+ mctelem_dismiss(MCTE2COOKIE(tep));
+ }
}
bool mctelem_has_deferred(unsigned int cpu)
{
- if (per_cpu(mctctl.pending, cpu) != NULL)
- return true;
- return false;
+ if (per_cpu(mctctl.pending, cpu) != NULL)
+ return true;
+ return false;
}
bool mctelem_has_deferred_lmce(unsigned int cpu)
{
- return per_cpu(mctctl.lmce_pending, cpu) != NULL;
+ return per_cpu(mctctl.lmce_pending, cpu) != NULL;
}
/* Free an entry to its native free list; the entry must not be linked on
@@ -289,14 +296,14 @@ bool mctelem_has_deferred_lmce(unsigned int cpu)
*/
static void mctelem_free(struct mctelem_ent *tep)
{
- BUG_ON(tep->mcte_refcnt != 0);
- BUG_ON(MCTE_STATE(tep) != MCTE_F_STATE_FREE);
+ BUG_ON(tep->mcte_refcnt != 0);
+ BUG_ON(MCTE_STATE(tep) != MCTE_F_STATE_FREE);
- tep->mcte_prev = NULL;
- tep->mcte_next = NULL;
+ tep->mcte_prev = NULL;
+ tep->mcte_next = NULL;
- /* set free in array */
- set_bit(tep - mctctl.mctc_elems, mctctl.mctc_free);
+ /* set free in array */
+ set_bit(tep - mctctl.mctc_elems, mctctl.mctc_free);
}
/* Increment the reference count of an entry that is not linked on to
@@ -304,7 +311,7 @@ static void mctelem_free(struct mctelem_ent *tep)
*/
static void mctelem_hold(struct mctelem_ent *tep)
{
- tep->mcte_refcnt++;
+ tep->mcte_refcnt++;
}
/* Increment the reference count on an entry that is linked at the head of
@@ -312,11 +319,11 @@ static void mctelem_hold(struct mctelem_ent *tep)
*/
static void mctelem_processing_hold(struct mctelem_ent *tep)
{
- int which = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ?
- MC_URGENT : MC_NONURGENT;
+ int which = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ?
+ MC_URGENT : MC_NONURGENT;
- BUG_ON(tep != mctctl.mctc_processing_head[which]);
- tep->mcte_refcnt++;
+ BUG_ON(tep != mctctl.mctc_processing_head[which]);
+ tep->mcte_refcnt++;
}
/* Decrement the reference count on an entry that is linked at the head of
@@ -324,46 +331,49 @@ static void mctelem_processing_hold(struct mctelem_ent *tep)
*/
static void mctelem_processing_release(struct mctelem_ent *tep)
{
- int which = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ?
- MC_URGENT : MC_NONURGENT;
-
- BUG_ON(tep != mctctl.mctc_processing_head[which]);
- if (--tep->mcte_refcnt == 0) {
- MCTE_TRANSITION_STATE(tep, PROCESSING, FREE);
- mctctl.mctc_processing_head[which] = tep->mcte_next;
- mctelem_free(tep);
- }
+ int which = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ?
+ MC_URGENT : MC_NONURGENT;
+
+ BUG_ON(tep != mctctl.mctc_processing_head[which]);
+ if (--tep->mcte_refcnt == 0)
+ {
+ MCTE_TRANSITION_STATE(tep, PROCESSING, FREE);
+ mctctl.mctc_processing_head[which] = tep->mcte_next;
+ mctelem_free(tep);
+ }
}
void __init mctelem_init(unsigned int datasz)
{
- char *datarr;
- unsigned int i;
-
- BUILD_BUG_ON(MC_URGENT != 0 || MC_NONURGENT != 1 || MC_NCLASSES != 2);
-
- datasz = (datasz & ~0xf) + 0x10; /* 16 byte roundup */
-
- if ((mctctl.mctc_elems = xmalloc_array(struct mctelem_ent,
- MC_NENT)) == NULL ||
- (datarr = xmalloc_bytes(MC_NENT * datasz)) == NULL) {
- xfree(mctctl.mctc_elems);
- printk("Allocations for MCA telemetry failed\n");
- return;
- }
-
- for (i = 0; i < MC_NENT; i++) {
- struct mctelem_ent *tep;
-
- tep = mctctl.mctc_elems + i;
- tep->mcte_flags = MCTE_F_STATE_FREE;
- tep->mcte_refcnt = 0;
- tep->mcte_data = datarr + i * datasz;
-
- __set_bit(i, mctctl.mctc_free);
- tep->mcte_next = NULL;
- tep->mcte_prev = NULL;
- }
+ char *datarr;
+ unsigned int i;
+
+ BUILD_BUG_ON(MC_URGENT != 0 || MC_NONURGENT != 1 || MC_NCLASSES != 2);
+
+ datasz = (datasz & ~0xf) + 0x10; /* 16 byte roundup */
+
+ if ((mctctl.mctc_elems = xmalloc_array(struct mctelem_ent,
+ MC_NENT)) == NULL ||
+ (datarr = xmalloc_bytes(MC_NENT * datasz)) == NULL)
+ {
+ xfree(mctctl.mctc_elems);
+ printk("Allocations for MCA telemetry failed\n");
+ return;
+ }
+
+ for (i = 0; i < MC_NENT; i++)
+ {
+ struct mctelem_ent *tep;
+
+ tep = mctctl.mctc_elems + i;
+ tep->mcte_flags = MCTE_F_STATE_FREE;
+ tep->mcte_refcnt = 0;
+ tep->mcte_data = datarr + i * datasz;
+
+ __set_bit(i, mctctl.mctc_free);
+ tep->mcte_next = NULL;
+ tep->mcte_prev = NULL;
+ }
}
/* incremented non-atomically when reserve fails */
@@ -375,40 +385,43 @@ static int mctelem_drop_count;
*/
mctelem_cookie_t mctelem_reserve(mctelem_class_t which)
{
- unsigned bit;
- unsigned start_bit = (which == MC_URGENT) ? 0 : MC_URGENT_NENT;
-
- for (;;) {
- bit = find_next_bit(mctctl.mctc_free, MC_NENT, start_bit);
-
- if (bit >= MC_NENT) {
- mctelem_drop_count++;
- return (NULL);
- }
-
- /* try to allocate, atomically clear free bit */
- if (test_and_clear_bit(bit, mctctl.mctc_free)) {
- /* return element we got */
- struct mctelem_ent *tep = mctctl.mctc_elems + bit;
-
- mctelem_hold(tep);
- MCTE_TRANSITION_STATE(tep, FREE, UNCOMMITTED);
- tep->mcte_next = NULL;
- tep->mcte_prev = NULL;
- if (which == MC_URGENT)
- MCTE_SET_CLASS(tep, URGENT);
- else
- MCTE_SET_CLASS(tep, NONURGENT);
- return MCTE2COOKIE(tep);
- }
- }
+ unsigned bit;
+ unsigned start_bit = (which == MC_URGENT) ? 0 : MC_URGENT_NENT;
+
+ for (;;)
+ {
+ bit = find_next_bit(mctctl.mctc_free, MC_NENT, start_bit);
+
+ if (bit >= MC_NENT)
+ {
+ mctelem_drop_count++;
+ return (NULL);
+ }
+
+ /* try to allocate, atomically clear free bit */
+ if (test_and_clear_bit(bit, mctctl.mctc_free))
+ {
+ /* return element we got */
+ struct mctelem_ent *tep = mctctl.mctc_elems + bit;
+
+ mctelem_hold(tep);
+ MCTE_TRANSITION_STATE(tep, FREE, UNCOMMITTED);
+ tep->mcte_next = NULL;
+ tep->mcte_prev = NULL;
+ if (which == MC_URGENT)
+ MCTE_SET_CLASS(tep, URGENT);
+ else
+ MCTE_SET_CLASS(tep, NONURGENT);
+ return MCTE2COOKIE(tep);
+ }
+ }
}
void *mctelem_dataptr(mctelem_cookie_t cookie)
{
- struct mctelem_ent *tep = COOKIE2MCTE(cookie);
+ struct mctelem_ent *tep = COOKIE2MCTE(cookie);
- return tep->mcte_data;
+ return tep->mcte_data;
}
/* Release a previously reserved entry back to the freelist without
@@ -417,11 +430,11 @@ void *mctelem_dataptr(mctelem_cookie_t cookie)
*/
void mctelem_dismiss(mctelem_cookie_t cookie)
{
- struct mctelem_ent *tep = COOKIE2MCTE(cookie);
+ struct mctelem_ent *tep = COOKIE2MCTE(cookie);
- tep->mcte_refcnt--;
- MCTE_TRANSITION_STATE(tep, UNCOMMITTED, FREE);
- mctelem_free(tep);
+ tep->mcte_refcnt--;
+ MCTE_TRANSITION_STATE(tep, UNCOMMITTED, FREE);
+ mctelem_free(tep);
}
/* Commit an entry with completed telemetry for logging. The caller must
@@ -431,14 +444,14 @@ void mctelem_dismiss(mctelem_cookie_t cookie)
*/
void mctelem_commit(mctelem_cookie_t cookie)
{
- struct mctelem_ent *tep = COOKIE2MCTE(cookie);
- mctelem_class_t target = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ?
- MC_URGENT : MC_NONURGENT;
+ struct mctelem_ent *tep = COOKIE2MCTE(cookie);
+ mctelem_class_t target = MCTE_CLASS(tep) == MCTE_F_CLASS_URGENT ?
+ MC_URGENT : MC_NONURGENT;
- BUG_ON(tep->mcte_next != NULL || tep->mcte_prev != NULL);
- MCTE_TRANSITION_STATE(tep, UNCOMMITTED, COMMITTED);
+ BUG_ON(tep->mcte_next != NULL || tep->mcte_prev != NULL);
+ MCTE_TRANSITION_STATE(tep, UNCOMMITTED, COMMITTED);
- mctelem_xchg_head(&mctctl.mctc_committed[target], &tep->mcte_prev, tep);
+ mctelem_xchg_head(&mctctl.mctc_committed[target], &tep->mcte_prev, tep);
}
/* Move telemetry from committed list to processing list, reversing the
@@ -458,94 +471,99 @@ static struct mctelem_ent *dangling[MC_NCLASSES];
static void mctelem_append_processing(mctelem_class_t which)
{
- mctelem_class_t target = which == MC_URGENT ?
- MC_URGENT : MC_NONURGENT;
- struct mctelem_ent **commlp = &mctctl.mctc_committed[target];
- struct mctelem_ent **proclhp = &mctctl.mctc_processing_head[target];
- struct mctelem_ent **procltp = &mctctl.mctc_processing_tail[target];
- struct mctelem_ent *tep, *ltep;
-
- /* Check for an empty list; no race since we hold the processing lock */
- if (*commlp == NULL)
- return;
-
- /* Atomically unlink the committed list, and keep a pointer to
- * the list we unlink in a well-known location so it can be
- * picked up in panic code should we panic between this unlink
- * and the append to the processing list. */
- mctelem_xchg_head(commlp, &dangling[target], NULL);
-
- if (dangling[target] == NULL)
- return;
-
- /* Traverse the list following the previous pointers (reverse
- * chronological order). For each entry fill in the next pointer
- * and transition the element state. */
- for (tep = dangling[target], ltep = NULL; tep != NULL;
- tep = tep->mcte_prev) {
- MCTE_TRANSITION_STATE(tep, COMMITTED, PROCESSING);
- tep->mcte_next = ltep;
- ltep = tep;
- }
-
- /* ltep points to the head of a chronologically ordered linked
- * list of telemetry entries ending at the most recent entry
- * dangling[target] if mcte_next is followed; tack this on to
- * the processing list.
- */
- if (*proclhp == NULL) {
- *proclhp = ltep;
- *procltp = dangling[target];
- } else {
- (*procltp)->mcte_next = ltep;
- ltep->mcte_prev = *procltp;
- *procltp = dangling[target];
- }
- smp_wmb();
- dangling[target] = NULL;
- smp_wmb();
+ mctelem_class_t target = which == MC_URGENT ?
+ MC_URGENT : MC_NONURGENT;
+ struct mctelem_ent **commlp = &mctctl.mctc_committed[target];
+ struct mctelem_ent **proclhp = &mctctl.mctc_processing_head[target];
+ struct mctelem_ent **procltp = &mctctl.mctc_processing_tail[target];
+ struct mctelem_ent *tep, *ltep;
+
+ /* Check for an empty list; no race since we hold the processing lock */
+ if (*commlp == NULL)
+ return;
+
+ /* Atomically unlink the committed list, and keep a pointer to
+ * the list we unlink in a well-known location so it can be
+ * picked up in panic code should we panic between this unlink
+ * and the append to the processing list. */
+ mctelem_xchg_head(commlp, &dangling[target], NULL);
+
+ if (dangling[target] == NULL)
+ return;
+
+ /* Traverse the list following the previous pointers (reverse
+ * chronological order). For each entry fill in the next pointer
+ * and transition the element state. */
+ for (tep = dangling[target], ltep = NULL; tep != NULL;
+ tep = tep->mcte_prev)
+ {
+ MCTE_TRANSITION_STATE(tep, COMMITTED, PROCESSING);
+ tep->mcte_next = ltep;
+ ltep = tep;
+ }
+
+ /* ltep points to the head of a chronologically ordered linked
+ * list of telemetry entries ending at the most recent entry
+ * dangling[target] if mcte_next is followed; tack this on to
+ * the processing list.
+ */
+ if (*proclhp == NULL)
+ {
+ *proclhp = ltep;
+ *procltp = dangling[target];
+ }
+ else
+ {
+ (*procltp)->mcte_next = ltep;
+ ltep->mcte_prev = *procltp;
+ *procltp = dangling[target];
+ }
+ smp_wmb();
+ dangling[target] = NULL;
+ smp_wmb();
}
mctelem_cookie_t mctelem_consume_oldest_begin(mctelem_class_t which)
{
- mctelem_class_t target = (which == MC_URGENT) ?
- MC_URGENT : MC_NONURGENT;
- struct mctelem_ent *tep;
-
- spin_lock(&processing_lock);
- mctelem_append_processing(target);
- if ((tep = mctctl.mctc_processing_head[target]) == NULL) {
- spin_unlock(&processing_lock);
- return NULL;
- }
-
- mctelem_processing_hold(tep);
- spin_unlock(&processing_lock);
- return MCTE2COOKIE(tep);
+ mctelem_class_t target = (which == MC_URGENT) ?
+ MC_URGENT : MC_NONURGENT;
+ struct mctelem_ent *tep;
+
+ spin_lock(&processing_lock);
+ mctelem_append_processing(target);
+ if ((tep = mctctl.mctc_processing_head[target]) == NULL)
+ {
+ spin_unlock(&processing_lock);
+ return NULL;
+ }
+
+ mctelem_processing_hold(tep);
+ spin_unlock(&processing_lock);
+ return MCTE2COOKIE(tep);
}
void mctelem_consume_oldest_end(mctelem_cookie_t cookie)
{
- struct mctelem_ent *tep = COOKIE2MCTE(cookie);
+ struct mctelem_ent *tep = COOKIE2MCTE(cookie);
- spin_lock(&processing_lock);
- mctelem_processing_release(tep);
- spin_unlock(&processing_lock);
+ spin_lock(&processing_lock);
+ mctelem_processing_release(tep);
+ spin_unlock(&processing_lock);
}
void mctelem_ack(mctelem_class_t which, mctelem_cookie_t cookie)
{
- mctelem_class_t target = (which == MC_URGENT) ?
- MC_URGENT : MC_NONURGENT;
- struct mctelem_ent *tep = COOKIE2MCTE(cookie);
+ mctelem_class_t target = (which == MC_URGENT) ?
+ MC_URGENT : MC_NONURGENT;
+ struct mctelem_ent *tep = COOKIE2MCTE(cookie);
- if (tep == NULL)
- return;
+ if (tep == NULL)
+ return;
- spin_lock(&processing_lock);
- if (tep == mctctl.mctc_processing_head[target])
- mctelem_processing_release(tep);
- spin_unlock(&processing_lock);
+ spin_lock(&processing_lock);
+ if (tep == mctctl.mctc_processing_head[target])
+ mctelem_processing_release(tep);
+ spin_unlock(&processing_lock);
}
/*
diff --git a/xen/arch/x86/cpu/mcheck/mctelem.h b/xen/arch/x86/cpu/mcheck/mctelem.h
index d4eba53ae0..800e31897c 100644
--- a/xen/arch/x86/cpu/mcheck/mctelem.h
+++ b/xen/arch/x86/cpu/mcheck/mctelem.h
@@ -54,9 +54,10 @@
typedef struct mctelem_cookie *mctelem_cookie_t;
-typedef enum mctelem_class {
- MC_URGENT,
- MC_NONURGENT
+typedef enum mctelem_class
+{
+ MC_URGENT,
+ MC_NONURGENT
} mctelem_class_t;
extern void mctelem_init(unsigned int);
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c
index ec52d37c96..5aab13dcfb 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -15,7 +15,7 @@
#include <xen/errno.h>
#include <xen/event.h>
#include <xen/sched.h>
-#include <asm/processor.h>
+#include <asm/processor.h>
#include <asm/system.h>
#include <asm/msr.h>
@@ -34,86 +34,92 @@ static int variable_period = 1;
static void mce_checkregs (void *info)
{
- mctelem_cookie_t mctc;
- struct mca_summary bs;
- static uint64_t dumpcount = 0;
-
- mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask),
- &bs, NULL);
-
- if (bs.errcnt && mctc != NULL) {
- adjust++;
-
- /* If Dom0 enabled the VIRQ_MCA event, then notify it.
- * Otherwise, if dom0 has had plenty of time to register
- * the virq handler but still hasn't then dump telemetry
- * to the Xen console. The call count may be incremented
- * on multiple cpus at once and is indicative only - just
- * a simple-minded attempt to avoid spamming the console
- * for corrected errors in early startup.
- */
-
- if (dom0_vmce_enabled()) {
- mctelem_commit(mctc);
- send_global_virq(VIRQ_MCA);
- } else if (++dumpcount >= 10) {
- x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
- mctelem_dismiss(mctc);
- } else {
- mctelem_dismiss(mctc);
- }
- } else if (mctc != NULL) {
- mctelem_dismiss(mctc);
- }
+ mctelem_cookie_t mctc;
+ struct mca_summary bs;
+ static uint64_t dumpcount = 0;
+
+ mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask),
+ &bs, NULL);
+
+ if (bs.errcnt && mctc != NULL)
+ {
+ adjust++;
+
+ /* If Dom0 enabled the VIRQ_MCA event, then notify it.
+ * Otherwise, if dom0 has had plenty of time to register
+ * the virq handler but still hasn't then dump telemetry
+ * to the Xen console. The call count may be incremented
+ * on multiple cpus at once and is indicative only - just
+ * a simple-minded attempt to avoid spamming the console
+ * for corrected errors in early startup.
+ */
+
+ if (dom0_vmce_enabled())
+ {
+ mctelem_commit(mctc);
+ send_global_virq(VIRQ_MCA);
+ }
+ else if (++dumpcount >= 10)
+ {
+ x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
+ mctelem_dismiss(mctc);
+ }
+ else
+ mctelem_dismiss(mctc);
+ }
+ else if (mctc != NULL)
+ mctelem_dismiss(mctc);
}
static void mce_work_fn(void *data)
-{
- on_each_cpu(mce_checkregs, NULL, 1);
-
- if (variable_period) {
- if (adjust)
- period /= (adjust + 1);
- else
- period *= 2;
- if (period > MCE_PERIOD_MAX)
- period = MCE_PERIOD_MAX;
- if (period < MCE_PERIOD_MIN)
- period = MCE_PERIOD_MIN;
- }
-
- set_timer(&mce_timer, NOW() + period);
- adjust = 0;
+{
+ on_each_cpu(mce_checkregs, NULL, 1);
+
+ if (variable_period)
+ {
+ if (adjust)
+ period /= (adjust + 1);
+ else
+ period *= 2;
+ if (period > MCE_PERIOD_MAX)
+ period = MCE_PERIOD_MAX;
+ if (period < MCE_PERIOD_MIN)
+ period = MCE_PERIOD_MIN;
+ }
+
+ set_timer(&mce_timer, NOW() + period);
+ adjust = 0;
}
static int __init init_nonfatal_mce_checker(void)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- /* Check for MCE support */
- if (!opt_mce || !mce_available(c))
- return -ENODEV;
-
- if (!this_cpu(poll_bankmask))
- return -EINVAL;
-
- /*
- * Check for non-fatal errors every MCE_RATE s
- */
- switch (c->x86_vendor) {
- case X86_VENDOR_AMD:
- case X86_VENDOR_HYGON:
- /* Assume we are on K8 or newer AMD or Hygon CPU here */
- amd_nonfatal_mcheck_init(c);
- break;
-
- case X86_VENDOR_INTEL:
- init_timer(&mce_timer, mce_work_fn, NULL, 0);
- set_timer(&mce_timer, NOW() + MCE_PERIOD);
- break;
- }
-
- printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n");
- return 0;
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ /* Check for MCE support */
+ if (!opt_mce || !mce_available(c))
+ return -ENODEV;
+
+ if (!this_cpu(poll_bankmask))
+ return -EINVAL;
+
+ /*
+ * Check for non-fatal errors every MCE_RATE s
+ */
+ switch (c->x86_vendor)
+ {
+ case X86_VENDOR_AMD:
+ case X86_VENDOR_HYGON:
+ /* Assume we are on K8 or newer AMD or Hygon CPU here */
+ amd_nonfatal_mcheck_init(c);
+ break;
+
+ case X86_VENDOR_INTEL:
+ init_timer(&mce_timer, mce_work_fn, NULL, 0);
+ set_timer(&mce_timer, NOW() + MCE_PERIOD);
+ break;
+ }
+
+ printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n");
+ return 0;
}
__initcall(init_nonfatal_mce_checker);
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 4f5de07e01..f81cd9b695 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -354,7 +354,8 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
#if CONFIG_HVM
static int vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
{
- struct hvm_vmce_vcpu ctxt = {
+ struct hvm_vmce_vcpu ctxt =
+ {
.caps = v->arch.vmce.mcg_cap,
.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2,
@@ -489,14 +490,14 @@ int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d,
mc_bank->mc_addr, mc_bank->mc_misc);
if ( broadcast )
for_each_vcpu ( d, v )
- {
- if ( !v->vcpu_id )
- continue;
- err = vcpu_fill_mc_msrs(v, MCG_STATUS_MCIP | MCG_STATUS_RIPV,
- 0, 0, 0);
- if ( err )
- ret = err;
- }
+ {
+ if ( !v->vcpu_id )
+ continue;
+ err = vcpu_fill_mc_msrs(v, MCG_STATUS_MCIP | MCG_STATUS_RIPV,
+ 0, 0, 0);
+ if ( err )
+ ret = err;
+ }
return ret;
}
@@ -554,7 +555,7 @@ int vmce_enable_mca_cap(struct domain *d, uint64_t cap)
if ( !lmce_support )
return -EINVAL;
for_each_vcpu(d, v)
- v->arch.vmce.mcg_cap |= MCG_LMCE_P;
+ v->arch.vmce.mcg_cap |= MCG_LMCE_P;
}
return 0;
diff --git a/xen/arch/x86/cpu/mcheck/x86_mca.h b/xen/arch/x86/cpu/mcheck/x86_mca.h
index 8f7fced0fe..df70ce1a94 100644
--- a/xen/arch/x86/cpu/mcheck/x86_mca.h
+++ b/xen/arch/x86/cpu/mcheck/x86_mca.h
@@ -1,6 +1,6 @@
/*
* MCA implementation for AMD CPUs
- * Copyright (c) 2007-2012 Advanced Micro Devices, Inc.
+ * Copyright (c) 2007-2012 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -111,14 +111,14 @@ static inline void mcabanks_clear(int bit, struct mca_banks *banks)
clear_bit(bit, banks->bank_map);
}
-static inline void mcabanks_set(int bit, struct mca_banks* banks)
+static inline void mcabanks_set(int bit, struct mca_banks *banks)
{
if (!banks || !banks->bank_map || bit >= banks->num)
return;
set_bit(bit, banks->bank_map);
}
-static inline int mcabanks_test(int bit, struct mca_banks* banks)
+static inline int mcabanks_test(int bit, struct mca_banks *banks)
{
if (!banks || !banks->bank_map || bit >= banks->num)
return 0;
@@ -130,7 +130,8 @@ void mcabanks_free(struct mca_banks *banks);
extern struct mca_banks *mca_allbanks;
/* Keep bank so that we can get status even if mib is NULL */
-struct mca_binfo {
+struct mca_binfo
+{
int bank;
struct mcinfo_global *mig;
struct mcinfo_bank *mib;
@@ -156,7 +157,7 @@ struct mca_error_handler
*/
bool (*owned_error)(uint64_t status);
void (*recovery_handler)(struct mca_binfo *binfo,
- enum mce_result *result, const struct cpu_user_regs *regs);
+ enum mce_result *result, const struct cpu_user_regs *regs);
};
/* Global variables */
diff --git a/xen/arch/x86/cpu/mtrr/generic.c b/xen/arch/x86/cpu/mtrr/generic.c
index 94ee7d61ad..a354d5d776 100644
--- a/xen/arch/x86/cpu/mtrr/generic.c
+++ b/xen/arch/x86/cpu/mtrr/generic.c
@@ -1,5 +1,5 @@
/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
- because MTRRs can span upto 40 bits (36bits on most modern x86) */
+ because MTRRs can span upto 40 bits (36bits on most modern x86) */
#include <xen/lib.h>
#include <xen/init.h>
#include <xen/mm.h>
@@ -13,14 +13,16 @@
#include <asm/cpufeature.h>
#include "mtrr.h"
-static const struct fixed_range_block {
- uint32_t base_msr; /* start address of an MTRR block */
- unsigned int ranges; /* number of MTRRs in this block */
-} fixed_range_blocks[] = {
- { MSR_MTRRfix64K_00000, (0x80000 - 0x00000) >> (16 + 3) },
- { MSR_MTRRfix16K_80000, (0xC0000 - 0x80000) >> (14 + 3) },
- { MSR_MTRRfix4K_C0000, (0x100000 - 0xC0000) >> (12 + 3) },
- {}
+static const struct fixed_range_block
+{
+ uint32_t base_msr; /* start address of an MTRR block */
+ unsigned int ranges; /* number of MTRRs in this block */
+} fixed_range_blocks[] =
+{
+ { MSR_MTRRfix64K_00000, (0x80000 - 0x00000) >> (16 + 3) },
+ { MSR_MTRRfix16K_80000, (0xC0000 - 0x80000) >> (14 + 3) },
+ { MSR_MTRRfix4K_C0000, (0x100000 - 0xC0000) >> (12 + 3) },
+ {}
};
static unsigned long smp_changes_mask;
@@ -30,25 +32,26 @@ struct mtrr_state mtrr_state = {};
static void
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{
- rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), vr->base);
- rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), vr->mask);
+ rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), vr->base);
+ rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), vr->mask);
}
static void
-get_fixed_ranges(mtrr_type * frs)
+get_fixed_ranges(mtrr_type *frs)
{
- uint64_t *p = (uint64_t *) frs;
- const struct fixed_range_block *block;
+ uint64_t *p = (uint64_t *) frs;
+ const struct fixed_range_block *block;
- if (!mtrr_state.have_fixed)
- return;
+ if (!mtrr_state.have_fixed)
+ return;
- for (block = fixed_range_blocks; block->ranges; ++block) {
- unsigned int i;
+ for (block = fixed_range_blocks; block->ranges; ++block)
+ {
+ unsigned int i;
- for (i = 0; i < block->ranges; ++i, ++p)
- rdmsrl(block->base_msr + i, *p);
- }
+ for (i = 0; i < block->ranges; ++i, ++p)
+ rdmsrl(block->base_msr + i, *p);
+ }
}
bool is_var_mtrr_overlapped(const struct mtrr_state *m)
@@ -85,38 +88,39 @@ bool is_var_mtrr_overlapped(const struct mtrr_state *m)
void mtrr_save_fixed_ranges(void *info)
{
- get_fixed_ranges(mtrr_state.fixed_ranges);
+ get_fixed_ranges(mtrr_state.fixed_ranges);
}
/* Grab all of the MTRR state for this CPU into *state */
void __init get_mtrr_state(void)
{
- unsigned int i;
- struct mtrr_var_range *vrs;
- uint64_t msr_content;
-
- if (!mtrr_state.var_ranges) {
- mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range,
- num_var_ranges);
- if (!mtrr_state.var_ranges)
- return;
- }
- vrs = mtrr_state.var_ranges;
-
- rdmsrl(MSR_MTRRcap, msr_content);
- mtrr_state.have_fixed = (msr_content >> 8) & 1;
-
- for (i = 0; i < num_var_ranges; i++)
- get_mtrr_var_range(i, &vrs[i]);
- get_fixed_ranges(mtrr_state.fixed_ranges);
-
- rdmsrl(MSR_MTRRdefType, msr_content);
- mtrr_state.def_type = (msr_content & 0xff);
- mtrr_state.enabled = MASK_EXTR(msr_content, MTRRdefType_E);
- mtrr_state.fixed_enabled = MASK_EXTR(msr_content, MTRRdefType_FE);
-
- /* Store mtrr_cap for HVM MTRR virtualisation. */
- rdmsrl(MSR_MTRRcap, mtrr_state.mtrr_cap);
+ unsigned int i;
+ struct mtrr_var_range *vrs;
+ uint64_t msr_content;
+
+ if (!mtrr_state.var_ranges)
+ {
+ mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range,
+ num_var_ranges);
+ if (!mtrr_state.var_ranges)
+ return;
+ }
+ vrs = mtrr_state.var_ranges;
+
+ rdmsrl(MSR_MTRRcap, msr_content);
+ mtrr_state.have_fixed = (msr_content >> 8) & 1;
+
+ for (i = 0; i < num_var_ranges; i++)
+ get_mtrr_var_range(i, &vrs[i]);
+ get_fixed_ranges(mtrr_state.fixed_ranges);
+
+ rdmsrl(MSR_MTRRdefType, msr_content);
+ mtrr_state.def_type = (msr_content & 0xff);
+ mtrr_state.enabled = MASK_EXTR(msr_content, MTRRdefType_E);
+ mtrr_state.fixed_enabled = MASK_EXTR(msr_content, MTRRdefType_FE);
+
+ /* Store mtrr_cap for HVM MTRR virtualisation. */
+ rdmsrl(MSR_MTRRcap, mtrr_state.mtrr_cap);
}
static bool_t __initdata mtrr_show;
@@ -124,16 +128,16 @@ boolean_param("mtrr.show", mtrr_show);
static const char *__init mtrr_attrib_to_str(mtrr_type x)
{
- static const char __initconst strings[MTRR_NUM_TYPES][16] =
- {
- [MTRR_TYPE_UNCACHABLE] = "uncachable",
- [MTRR_TYPE_WRCOMB] = "write-combining",
- [MTRR_TYPE_WRTHROUGH] = "write-through",
- [MTRR_TYPE_WRPROT] = "write-protect",
- [MTRR_TYPE_WRBACK] = "write-back",
- };
-
- return (x < ARRAY_SIZE(strings) && strings[x][0]) ? strings[x] : "?";
+ static const char __initconst strings[MTRR_NUM_TYPES][16] =
+ {
+ [MTRR_TYPE_UNCACHABLE] = "uncachable",
+ [MTRR_TYPE_WRCOMB] = "write-combining",
+ [MTRR_TYPE_WRTHROUGH] = "write-through",
+ [MTRR_TYPE_WRPROT] = "write-protect",
+ [MTRR_TYPE_WRBACK] = "write-back",
+ };
+
+ return (x < ARRAY_SIZE(strings) && strings[x][0]) ? strings[x] : "?";
}
static unsigned int __initdata last_fixed_start;
@@ -142,114 +146,124 @@ static mtrr_type __initdata last_fixed_type;
static void __init print_fixed_last(const char *level)
{
- if (!last_fixed_end)
- return;
+ if (!last_fixed_end)
+ return;
- printk("%s %05x-%05x %s\n", level, last_fixed_start,
- last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
+ printk("%s %05x-%05x %s\n", level, last_fixed_start,
+ last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
- last_fixed_end = 0;
+ last_fixed_end = 0;
}
static void __init update_fixed_last(unsigned int base, unsigned int end,
- mtrr_type type)
+ mtrr_type type)
{
- last_fixed_start = base;
- last_fixed_end = end;
- last_fixed_type = type;
+ last_fixed_start = base;
+ last_fixed_end = end;
+ last_fixed_type = type;
}
static void __init print_fixed(unsigned int base, unsigned int step,
- const mtrr_type *types, const char *level)
+ const mtrr_type *types, const char *level)
{
- unsigned i;
-
- for (i = 0; i < 8; ++i, ++types, base += step) {
- if (last_fixed_end == 0) {
- update_fixed_last(base, base + step, *types);
- continue;
- }
- if (last_fixed_end == base && last_fixed_type == *types) {
- last_fixed_end = base + step;
- continue;
- }
- /* new segments: gap or different type */
- print_fixed_last(level);
- update_fixed_last(base, base + step, *types);
- }
+ unsigned i;
+
+ for (i = 0; i < 8; ++i, ++types, base += step)
+ {
+ if (last_fixed_end == 0)
+ {
+ update_fixed_last(base, base + step, *types);
+ continue;
+ }
+ if (last_fixed_end == base && last_fixed_type == *types)
+ {
+ last_fixed_end = base + step;
+ continue;
+ }
+ /* new segments: gap or different type */
+ print_fixed_last(level);
+ update_fixed_last(base, base + step, *types);
+ }
}
static void __init print_mtrr_state(const char *level)
{
- unsigned int i;
- int width;
-
- printk("%sMTRR default type: %s\n", level,
- mtrr_attrib_to_str(mtrr_state.def_type));
- if (mtrr_state.have_fixed) {
- const mtrr_type *fr = mtrr_state.fixed_ranges;
- const struct fixed_range_block *block = fixed_range_blocks;
- unsigned int base = 0, step = 0x10000;
-
- printk("%sMTRR fixed ranges %sabled:\n", level,
- mtrr_state.fixed_enabled ? "en" : "dis");
- for (; block->ranges; ++block, step >>= 2) {
- for (i = 0; i < block->ranges; ++i, fr += 8) {
- print_fixed(base, step, fr, level);
- base += 8 * step;
- }
- }
- print_fixed_last(level);
- }
- printk("%sMTRR variable ranges %sabled:\n", level,
- mtrr_state.enabled ? "en" : "dis");
- width = (paddr_bits - PAGE_SHIFT + 3) / 4;
-
- for (i = 0; i < num_var_ranges; ++i) {
- if (mtrr_state.var_ranges[i].mask & MTRR_PHYSMASK_VALID)
- printk("%s %u base %0*"PRIx64"000 mask %0*"PRIx64"000 %s\n",
- level, i,
- width, mtrr_state.var_ranges[i].base >> 12,
- width, mtrr_state.var_ranges[i].mask >> 12,
- mtrr_attrib_to_str(mtrr_state.var_ranges[i].base &
- MTRR_PHYSBASE_TYPE_MASK));
- else
- printk("%s %u disabled\n", level, i);
- }
-
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 >= 0xf) ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- uint64_t syscfg, tom2;
-
- rdmsrl(MSR_K8_SYSCFG, syscfg);
- if (syscfg & (1 << 21)) {
- rdmsrl(MSR_K8_TOP_MEM2, tom2);
- printk("%sTOM2: %012"PRIx64"%s\n", level, tom2,
- syscfg & (1 << 22) ? " (WB)" : "");
- }
- }
+ unsigned int i;
+ int width;
+
+ printk("%sMTRR default type: %s\n", level,
+ mtrr_attrib_to_str(mtrr_state.def_type));
+ if (mtrr_state.have_fixed)
+ {
+ const mtrr_type *fr = mtrr_state.fixed_ranges;
+ const struct fixed_range_block *block = fixed_range_blocks;
+ unsigned int base = 0, step = 0x10000;
+
+ printk("%sMTRR fixed ranges %sabled:\n", level,
+ mtrr_state.fixed_enabled ? "en" : "dis");
+ for (; block->ranges; ++block, step >>= 2)
+ {
+ for (i = 0; i < block->ranges; ++i, fr += 8)
+ {
+ print_fixed(base, step, fr, level);
+ base += 8 * step;
+ }
+ }
+ print_fixed_last(level);
+ }
+ printk("%sMTRR variable ranges %sabled:\n", level,
+ mtrr_state.enabled ? "en" : "dis");
+ width = (paddr_bits - PAGE_SHIFT + 3) / 4;
+
+ for (i = 0; i < num_var_ranges; ++i)
+ {
+ if (mtrr_state.var_ranges[i].mask & MTRR_PHYSMASK_VALID)
+ printk("%s %u base %0*"PRIx64"000 mask %0*"PRIx64"000 %s\n",
+ level, i,
+ width, mtrr_state.var_ranges[i].base >> 12,
+ width, mtrr_state.var_ranges[i].mask >> 12,
+ mtrr_attrib_to_str(mtrr_state.var_ranges[i].base &
+ MTRR_PHYSBASE_TYPE_MASK));
+ else
+ printk("%s %u disabled\n", level, i);
+ }
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 >= 0xf) ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ {
+ uint64_t syscfg, tom2;
+
+ rdmsrl(MSR_K8_SYSCFG, syscfg);
+ if (syscfg & (1 << 21))
+ {
+ rdmsrl(MSR_K8_TOP_MEM2, tom2);
+ printk("%sTOM2: %012"PRIx64"%s\n", level, tom2,
+ syscfg & (1 << 22) ? " (WB)" : "");
+ }
+ }
}
/* Some BIOS's are fucked and don't set all MTRRs the same! */
void __init mtrr_state_warn(void)
{
- unsigned long mask = smp_changes_mask;
-
- if (mtrr_show)
- print_mtrr_state(mask ? KERN_WARNING : "");
- if (!mask)
- return;
- if (mask & MTRR_CHANGE_MASK_FIXED)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
- if (mask & MTRR_CHANGE_MASK_VARIABLE)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
- if (mask & MTRR_CHANGE_MASK_DEFTYPE)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
- printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
- printk(KERN_INFO "mtrr: corrected configuration.\n");
- if (!mtrr_show)
- print_mtrr_state(KERN_INFO);
+ unsigned long mask = smp_changes_mask;
+
+ if (mtrr_show)
+ print_mtrr_state(mask ? KERN_WARNING : "");
+ if (!mask)
+ return;
+ if (mask & MTRR_CHANGE_MASK_FIXED)
+ printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
+ if (mask & MTRR_CHANGE_MASK_VARIABLE)
+ printk(KERN_WARNING
+ "mtrr: your CPUs had inconsistent variable MTRR settings\n");
+ if (mask & MTRR_CHANGE_MASK_DEFTYPE)
+ printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
+ printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
+ printk(KERN_INFO "mtrr: corrected configuration.\n");
+ if (!mtrr_show)
+ print_mtrr_state(KERN_INFO);
}
/* Doesn't attempt to pass an error out to MTRR users
@@ -257,12 +271,12 @@ void __init mtrr_state_warn(void)
worth it because the best error handling is to ignore it. */
static void mtrr_wrmsr(unsigned int msr, uint64_t msr_content)
{
- if (wrmsr_safe(msr, msr_content) < 0)
- printk(KERN_ERR
- "MTRR: CPU %u: Writing MSR %x to %"PRIx64" failed\n",
- smp_processor_id(), msr, msr_content);
- /* Cache overlap status for efficient HVM MTRR virtualisation. */
- mtrr_state.overlapped = is_var_mtrr_overlapped(&mtrr_state);
+ if (wrmsr_safe(msr, msr_content) < 0)
+ printk(KERN_ERR
+ "MTRR: CPU %u: Writing MSR %x to %"PRIx64" failed\n",
+ smp_processor_id(), msr, msr_content);
+ /* Cache overlap status for efficient HVM MTRR virtualisation. */
+ mtrr_state.overlapped = is_var_mtrr_overlapped(&mtrr_state);
}
/**
@@ -275,63 +289,67 @@ static void mtrr_wrmsr(unsigned int msr, uint64_t msr_content)
*/
static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
{
- uint64_t msr_content, val;
+ uint64_t msr_content, val;
- rdmsrl(msr, msr_content);
- val = ((uint64_t)msrwords[1] << 32) | msrwords[0];
+ rdmsrl(msr, msr_content);
+ val = ((uint64_t)msrwords[1] << 32) | msrwords[0];
- if (msr_content != val) {
- mtrr_wrmsr(msr, val);
- *changed = true;
- }
+ if (msr_content != val)
+ {
+ mtrr_wrmsr(msr, val);
+ *changed = true;
+ }
}
-int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
+int generic_get_free_region(unsigned long base, unsigned long size,
+ int replace_reg)
/* [SUMMARY] Get a free MTRR.
<base> The starting (base) address of the region.
<size> The size (in bytes) of the region.
[RETURNS] The index of the region on success, else -1 on error.
*/
{
- int i, max;
- mtrr_type ltype;
- unsigned long lbase, lsize;
-
- max = num_var_ranges;
- if (replace_reg >= 0 && replace_reg < max)
- return replace_reg;
- for (i = 0; i < max; ++i) {
- mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (lsize == 0)
- return i;
- }
- return -ENOSPC;
+ int i, max;
+ mtrr_type ltype;
+ unsigned long lbase, lsize;
+
+ max = num_var_ranges;
+ if (replace_reg >= 0 && replace_reg < max)
+ return replace_reg;
+ for (i = 0; i < max; ++i)
+ {
+ mtrr_if->get(i, &lbase, &lsize, &ltype);
+ if (lsize == 0)
+ return i;
+ }
+ return -ENOSPC;
}
static void generic_get_mtrr(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type *type)
+ unsigned long *size, mtrr_type *type)
{
- uint64_t _mask, _base;
-
- rdmsrl(MSR_IA32_MTRR_PHYSMASK(reg), _mask);
- if (!(_mask & MTRR_PHYSMASK_VALID)) {
- /* Invalid (i.e. free) range */
- *base = 0;
- *size = 0;
- *type = 0;
- return;
- }
-
- rdmsrl(MSR_IA32_MTRR_PHYSBASE(reg), _base);
-
- /* Work out the shifted address mask. */
- _mask = size_or_mask | (_mask >> PAGE_SHIFT);
-
- /* This works correctly if size is a power of two, i.e. a
- contiguous range. */
- *size = -(uint32_t)_mask;
- *base = _base >> PAGE_SHIFT;
- *type = _base & 0xff;
+ uint64_t _mask, _base;
+
+ rdmsrl(MSR_IA32_MTRR_PHYSMASK(reg), _mask);
+ if (!(_mask & MTRR_PHYSMASK_VALID))
+ {
+ /* Invalid (i.e. free) range */
+ *base = 0;
+ *size = 0;
+ *type = 0;
+ return;
+ }
+
+ rdmsrl(MSR_IA32_MTRR_PHYSBASE(reg), _base);
+
+ /* Work out the shifted address mask. */
+ _mask = size_or_mask | (_mask >> PAGE_SHIFT);
+
+ /* This works correctly if size is a power of two, i.e. a
+ contiguous range. */
+ *size = -(uint32_t)_mask;
+ *base = _base >> PAGE_SHIFT;
+ *type = _base & 0xff;
}
/**
@@ -340,58 +358,60 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
*/
static bool set_fixed_ranges(mtrr_type *frs)
{
- unsigned long long *saved = (unsigned long long *) frs;
- bool changed = false;
- int block=-1, range;
+ unsigned long long *saved = (unsigned long long *) frs;
+ bool changed = false;
+ int block=-1, range;
- while (fixed_range_blocks[++block].ranges)
- for (range=0; range < fixed_range_blocks[block].ranges; range++)
- set_fixed_range(fixed_range_blocks[block].base_msr + range,
- &changed, (unsigned int *) saved++);
+ while (fixed_range_blocks[++block].ranges)
+ for (range=0; range < fixed_range_blocks[block].ranges; range++)
+ set_fixed_range(fixed_range_blocks[block].base_msr + range,
+ &changed, (unsigned int *) saved++);
- return changed;
+ return changed;
}
/* Set the MSR pair relating to a var range. Returns true if
changes are made */
static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
{
- uint32_t lo, hi, base_lo, base_hi, mask_lo, mask_hi;
- uint64_t msr_content;
- bool changed = false;
-
- rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), msr_content);
- lo = (uint32_t)msr_content;
- hi = (uint32_t)(msr_content >> 32);
- base_lo = (uint32_t)vr->base;
- base_hi = (uint32_t)(vr->base >> 32);
-
- lo &= 0xfffff0ffUL;
- base_lo &= 0xfffff0ffUL;
- hi &= size_and_mask >> (32 - PAGE_SHIFT);
- base_hi &= size_and_mask >> (32 - PAGE_SHIFT);
-
- if ((base_lo != lo) || (base_hi != hi)) {
- mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(index), vr->base);
- changed = true;
- }
-
- rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), msr_content);
- lo = (uint32_t)msr_content;
- hi = (uint32_t)(msr_content >> 32);
- mask_lo = (uint32_t)vr->mask;
- mask_hi = (uint32_t)(vr->mask >> 32);
-
- lo &= 0xfffff800UL;
- mask_lo &= 0xfffff800UL;
- hi &= size_and_mask >> (32 - PAGE_SHIFT);
- mask_hi &= size_and_mask >> (32 - PAGE_SHIFT);
-
- if ((mask_lo != lo) || (mask_hi != hi)) {
- mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(index), vr->mask);
- changed = true;
- }
- return changed;
+ uint32_t lo, hi, base_lo, base_hi, mask_lo, mask_hi;
+ uint64_t msr_content;
+ bool changed = false;
+
+ rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), msr_content);
+ lo = (uint32_t)msr_content;
+ hi = (uint32_t)(msr_content >> 32);
+ base_lo = (uint32_t)vr->base;
+ base_hi = (uint32_t)(vr->base >> 32);
+
+ lo &= 0xfffff0ffUL;
+ base_lo &= 0xfffff0ffUL;
+ hi &= size_and_mask >> (32 - PAGE_SHIFT);
+ base_hi &= size_and_mask >> (32 - PAGE_SHIFT);
+
+ if ((base_lo != lo) || (base_hi != hi))
+ {
+ mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(index), vr->base);
+ changed = true;
+ }
+
+ rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), msr_content);
+ lo = (uint32_t)msr_content;
+ hi = (uint32_t)(msr_content >> 32);
+ mask_lo = (uint32_t)vr->mask;
+ mask_hi = (uint32_t)(vr->mask >> 32);
+
+ lo &= 0xfffff800UL;
+ mask_lo &= 0xfffff800UL;
+ hi &= size_and_mask >> (32 - PAGE_SHIFT);
+ mask_hi &= size_and_mask >> (32 - PAGE_SHIFT);
+
+ if ((mask_lo != lo) || (mask_hi != hi))
+ {
+ mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(index), vr->mask);
+ changed = true;
+ }
+ return changed;
}
static uint64_t deftype;
@@ -404,28 +424,29 @@ static unsigned long set_mtrr_state(void)
[RETURNS] 0 if no changes made, else a mask indication what was changed.
*/
{
- unsigned int i;
- unsigned long change_mask = 0;
-
- for (i = 0; i < num_var_ranges; i++)
- if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
- change_mask |= MTRR_CHANGE_MASK_VARIABLE;
-
- if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
- change_mask |= MTRR_CHANGE_MASK_FIXED;
-
- /* Set_mtrr_restore restores the old value of MTRRdefType,
- so to set it we fiddle with the saved value */
- if ((deftype & 0xff) != mtrr_state.def_type
- || MASK_EXTR(deftype, MTRRdefType_E) != mtrr_state.enabled
- || MASK_EXTR(deftype, MTRRdefType_FE) != mtrr_state.fixed_enabled) {
- deftype = (deftype & ~0xcff) | mtrr_state.def_type |
- MASK_INSR(mtrr_state.enabled, MTRRdefType_E) |
- MASK_INSR(mtrr_state.fixed_enabled, MTRRdefType_FE);
- change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
- }
-
- return change_mask;
+ unsigned int i;
+ unsigned long change_mask = 0;
+
+ for (i = 0; i < num_var_ranges; i++)
+ if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
+ change_mask |= MTRR_CHANGE_MASK_VARIABLE;
+
+ if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
+ change_mask |= MTRR_CHANGE_MASK_FIXED;
+
+ /* Set_mtrr_restore restores the old value of MTRRdefType,
+ so to set it we fiddle with the saved value */
+ if ((deftype & 0xff) != mtrr_state.def_type
+ || MASK_EXTR(deftype, MTRRdefType_E) != mtrr_state.enabled
+ || MASK_EXTR(deftype, MTRRdefType_FE) != mtrr_state.fixed_enabled)
+ {
+ deftype = (deftype & ~0xcff) | mtrr_state.def_type |
+ MASK_INSR(mtrr_state.enabled, MTRRdefType_E) |
+ MASK_INSR(mtrr_state.fixed_enabled, MTRRdefType_FE);
+ change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
+ }
+
+ return change_mask;
}
@@ -440,79 +461,80 @@ static DEFINE_SPINLOCK(set_atomicity_lock);
static bool prepare_set(void)
{
- unsigned long cr4;
+ unsigned long cr4;
- /* Note that this is not ideal, since the cache is only flushed/disabled
- for this CPU while the MTRRs are changed, but changing this requires
- more invasive changes to the way the kernel boots */
+ /* Note that this is not ideal, since the cache is only flushed/disabled
+ for this CPU while the MTRRs are changed, but changing this requires
+ more invasive changes to the way the kernel boots */
- spin_lock(&set_atomicity_lock);
+ spin_lock(&set_atomicity_lock);
- /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
- write_cr0(read_cr0() | X86_CR0_CD);
- wbinvd();
+ /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+ write_cr0(read_cr0() | X86_CR0_CD);
+ wbinvd();
- cr4 = read_cr4();
- if (cr4 & X86_CR4_PGE)
- write_cr4(cr4 & ~X86_CR4_PGE);
- else if (use_invpcid)
- invpcid_flush_all();
- else
- write_cr3(read_cr3());
+ cr4 = read_cr4();
+ if (cr4 & X86_CR4_PGE)
+ write_cr4(cr4 & ~X86_CR4_PGE);
+ else if (use_invpcid)
+ invpcid_flush_all();
+ else
+ write_cr3(read_cr3());
- /* Save MTRR state */
- rdmsrl(MSR_MTRRdefType, deftype);
+ /* Save MTRR state */
+ rdmsrl(MSR_MTRRdefType, deftype);
- /* Disable MTRRs, and set the default type to uncached */
- mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff);
+ /* Disable MTRRs, and set the default type to uncached */
+ mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff);
- return cr4 & X86_CR4_PGE;
+ return cr4 & X86_CR4_PGE;
}
static void post_set(bool pge)
{
- /* Intel (P6) standard MTRRs */
- mtrr_wrmsr(MSR_MTRRdefType, deftype);
+ /* Intel (P6) standard MTRRs */
+ mtrr_wrmsr(MSR_MTRRdefType, deftype);
- /* Enable caches */
- write_cr0(read_cr0() & ~X86_CR0_CD);
+ /* Enable caches */
+ write_cr0(read_cr0() & ~X86_CR0_CD);
- /* Reenable CR4.PGE (also flushes the TLB) */
- if (pge)
- write_cr4(read_cr4() | X86_CR4_PGE);
- else if (use_invpcid)
- invpcid_flush_all();
- else
- write_cr3(read_cr3());
+ /* Reenable CR4.PGE (also flushes the TLB) */
+ if (pge)
+ write_cr4(read_cr4() | X86_CR4_PGE);
+ else if (use_invpcid)
+ invpcid_flush_all();
+ else
+ write_cr3(read_cr3());
- spin_unlock(&set_atomicity_lock);
+ spin_unlock(&set_atomicity_lock);
}
static void generic_set_all(void)
{
- unsigned long mask, count;
- unsigned long flags;
- bool pge;
+ unsigned long mask, count;
+ unsigned long flags;
+ bool pge;
- local_irq_save(flags);
- pge = prepare_set();
+ local_irq_save(flags);
+ pge = prepare_set();
- /* Actually set the state */
- mask = set_mtrr_state();
+ /* Actually set the state */
+ mask = set_mtrr_state();
- post_set(pge);
- local_irq_restore(flags);
+ post_set(pge);
+ local_irq_restore(flags);
- /* Use the atomic bitops to update the global mask */
- for (count = 0; count < sizeof mask * 8; ++count) {
- if (mask & 0x01)
- set_bit(count, &smp_changes_mask);
- mask >>= 1;
- }
+ /* Use the atomic bitops to update the global mask */
+ for (count = 0; count < sizeof mask * 8; ++count)
+ {
+ if (mask & 0x01)
+ set_bit(count, &smp_changes_mask);
+ mask >>= 1;
+ }
}
static void generic_set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
+ unsigned long size, mtrr_type type)
/* [SUMMARY] Set variable MTRR register on the local CPU.
<reg> The register to set.
<base> The base address of the region.
@@ -523,88 +545,99 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
[RETURNS] Nothing.
*/
{
- unsigned long flags;
- struct mtrr_var_range *vr;
- bool pge;
-
- vr = &mtrr_state.var_ranges[reg];
-
- local_irq_save(flags);
- pge = prepare_set();
-
- if (size == 0) {
- /* The invalid bit is kept in the mask, so we simply clear the
- relevant mask register to disable a range. */
- memset(vr, 0, sizeof(*vr));
- mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), 0);
- } else {
- uint32_t base_lo, base_hi, mask_lo, mask_hi;
-
- base_lo = base << PAGE_SHIFT | type;
- base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
- mask_lo = (-size << PAGE_SHIFT) | MTRR_PHYSMASK_VALID;
- mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
- vr->base = ((uint64_t)base_hi << 32) | base_lo;
- vr->mask = ((uint64_t)mask_hi << 32) | mask_lo;
-
- mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(reg), vr->base);
- mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask);
- }
-
- post_set(pge);
- local_irq_restore(flags);
+ unsigned long flags;
+ struct mtrr_var_range *vr;
+ bool pge;
+
+ vr = &mtrr_state.var_ranges[reg];
+
+ local_irq_save(flags);
+ pge = prepare_set();
+
+ if (size == 0)
+ {
+ /* The invalid bit is kept in the mask, so we simply clear the
+ relevant mask register to disable a range. */
+ memset(vr, 0, sizeof(*vr));
+ mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), 0);
+ }
+ else
+ {
+ uint32_t base_lo, base_hi, mask_lo, mask_hi;
+
+ base_lo = base << PAGE_SHIFT | type;
+ base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
+ mask_lo = (-size << PAGE_SHIFT) | MTRR_PHYSMASK_VALID;
+ mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
+ vr->base = ((uint64_t)base_hi << 32) | base_lo;
+ vr->mask = ((uint64_t)mask_hi << 32) | mask_lo;
+
+ mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(reg), vr->base);
+ mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask);
+ }
+
+ post_set(pge);
+ local_irq_restore(flags);
}
-int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
+int generic_validate_add_page(unsigned long base, unsigned long size,
+ unsigned int type)
{
- unsigned long lbase, last;
-
- /* For Intel PPro stepping <= 7, must be 4 MiB aligned
- and not touch 0x70000000->0x7003FFFF */
- if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model == 1 &&
- boot_cpu_data.x86_mask <= 7) {
- if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
- printk(KERN_WARNING "mtrr: base(%#lx000) is not 4 MiB aligned\n", base);
- return -EINVAL;
- }
- if (!(base + size < 0x70000 || base > 0x7003F) &&
- (type == MTRR_TYPE_WRCOMB
- || type == MTRR_TYPE_WRBACK)) {
- printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
- return -EINVAL;
- }
- }
-
- /* Check upper bits of base and last are equal and lower bits are 0
- for base and 1 for last */
- last = base + size - 1;
- for (lbase = base; !(lbase & 1) && (last & 1);
- lbase = lbase >> 1, last = last >> 1) ;
- if (lbase != last) {
- printk(KERN_WARNING "mtrr: base(%#lx000) is not aligned on a size(%#lx000) boundary\n",
- base, size);
- return -EINVAL;
- }
- return 0;
+ unsigned long lbase, last;
+
+ /* For Intel PPro stepping <= 7, must be 4 MiB aligned
+ and not touch 0x70000000->0x7003FFFF */
+ if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 1 &&
+ boot_cpu_data.x86_mask <= 7)
+ {
+ if (base & ((1 << (22 - PAGE_SHIFT)) - 1))
+ {
+ printk(KERN_WARNING "mtrr: base(%#lx000) is not 4 MiB aligned\n", base);
+ return -EINVAL;
+ }
+ if (!(base + size < 0x70000 || base > 0x7003F) &&
+ (type == MTRR_TYPE_WRCOMB
+ || type == MTRR_TYPE_WRBACK))
+ {
+ printk(KERN_WARNING
+ "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Check upper bits of base and last are equal and lower bits are 0
+ for base and 1 for last */
+ last = base + size - 1;
+ for (lbase = base; !(lbase & 1) && (last & 1);
+ lbase = lbase >> 1, last = last >> 1) ;
+ if (lbase != last)
+ {
+ printk(KERN_WARNING
+ "mtrr: base(%#lx000) is not aligned on a size(%#lx000) boundary\n",
+ base, size);
+ return -EINVAL;
+ }
+ return 0;
}
static int generic_have_wrcomb(void)
{
- unsigned long config;
- rdmsrl(MSR_MTRRcap, config);
- return (config & (1ULL << 10));
+ unsigned long config;
+ rdmsrl(MSR_MTRRcap, config);
+ return (config & (1ULL << 10));
}
/* generic structure...
*/
-const struct mtrr_ops generic_mtrr_ops = {
- .use_intel_if = true,
- .set_all = generic_set_all,
- .get = generic_get_mtrr,
- .get_free_region = generic_get_free_region,
- .set = generic_set_mtrr,
- .validate_add_page = generic_validate_add_page,
- .have_wrcomb = generic_have_wrcomb,
+const struct mtrr_ops generic_mtrr_ops =
+{
+ .use_intel_if = true,
+ .set_all = generic_set_all,
+ .get = generic_get_mtrr,
+ .get_free_region = generic_get_free_region,
+ .set = generic_set_mtrr,
+ .validate_add_page = generic_validate_add_page,
+ .have_wrcomb = generic_have_wrcomb,
};
diff --git a/xen/arch/x86/cpu/mtrr/main.c b/xen/arch/x86/cpu/mtrr/main.c
index e9df53f00d..99c953347d 100644
--- a/xen/arch/x86/cpu/mtrr/main.c
+++ b/xen/arch/x86/cpu/mtrr/main.c
@@ -24,9 +24,9 @@
Operating System Writer's Guide" (Intel document number 242692),
section 11.11.7
- This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
- on 6-7 March 2002.
- Source: Intel Architecture Software Developers Manual, Volume 3:
+ This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
+ on 6-7 March 2002.
+ Source: Intel Architecture Software Developers Manual, Volume 3:
System Programming Guide; Section 9.11. (1997 edition - PPro).
*/
@@ -60,7 +60,7 @@ u64 __read_mostly size_and_mask;
const struct mtrr_ops *__read_mostly mtrr_if = NULL;
static void set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type);
+ unsigned long size, mtrr_type type);
static const char *const mtrr_strings[MTRR_NUM_TYPES] =
{
@@ -75,58 +75,61 @@ static const char *const mtrr_strings[MTRR_NUM_TYPES] =
static const char *mtrr_attrib_to_str(int x)
{
- return (x <= 6) ? mtrr_strings[x] : "?";
+ return (x <= 6) ? mtrr_strings[x] : "?";
}
/* Returns non-zero if we have the write-combining memory type */
static int have_wrcomb(void)
{
- return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
+ return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
}
/* This function returns the number of variable MTRRs */
static void __init set_num_var_ranges(void)
{
- unsigned long config = 0;
-
- if (use_intel()) {
- rdmsrl(MSR_MTRRcap, config);
- } else if (is_cpu(AMD))
- config = 2;
- else if (is_cpu(CENTAUR))
- config = 8;
- num_var_ranges = MASK_EXTR(config, MTRRcap_VCNT);
+ unsigned long config = 0;
+
+ if (use_intel())
+ rdmsrl(MSR_MTRRcap, config);
+
+ else if (is_cpu(AMD))
+ config = 2;
+ else if (is_cpu(CENTAUR))
+ config = 8;
+ num_var_ranges = MASK_EXTR(config, MTRRcap_VCNT);
}
static void __init init_table(void)
{
- int i, max;
-
- max = num_var_ranges;
- if ((usage_table = xmalloc_array(unsigned int, max)) == NULL) {
- printk(KERN_ERR "mtrr: could not allocate\n");
- return;
- }
- for (i = 0; i < max; i++)
- usage_table[i] = 1;
+ int i, max;
+
+ max = num_var_ranges;
+ if ((usage_table = xmalloc_array(unsigned int, max)) == NULL)
+ {
+ printk(KERN_ERR "mtrr: could not allocate\n");
+ return;
+ }
+ for (i = 0; i < max; i++)
+ usage_table[i] = 1;
}
-struct set_mtrr_data {
- atomic_t count;
- atomic_t gate;
- unsigned long smp_base;
- unsigned long smp_size;
- unsigned int smp_reg;
- mtrr_type smp_type;
+struct set_mtrr_data
+{
+ atomic_t count;
+ atomic_t gate;
+ unsigned long smp_base;
+ unsigned long smp_size;
+ unsigned int smp_reg;
+ mtrr_type smp_type;
};
/* As per the IA32 SDM vol-3: 10.11.8 MTRR Considerations in MP Systems section
* MTRRs updates must to be synchronized across all the processors.
* This flags avoids multiple cpu synchronization while booting each cpu.
* At the boot & resume time, this flag is turned on in mtrr_aps_sync_begin().
- * Using this flag the mtrr initialization (and the all cpus sync up) in the
- * mtrr_ap_init() is avoided while booting each cpu.
- * After all the cpus have came up, then mtrr_aps_sync_end() synchronizes all
+ * Using this flag the mtrr initialization (and the all cpus sync up) in the
+ * mtrr_ap_init() is avoided while booting each cpu.
+ * After all the cpus have came up, then mtrr_aps_sync_end() synchronizes all
* the cpus and updates mtrrs on all of them. Then this flag is turned off.
*/
int hold_mtrr_updates_on_aps;
@@ -136,37 +139,38 @@ static void ipi_handler(void *info)
[RETURNS] Nothing.
*/
{
- struct set_mtrr_data *data = info;
- unsigned long flags;
-
- local_irq_save(flags);
-
- atomic_dec(&data->count);
- while(!atomic_read(&data->gate))
- cpu_relax();
-
- /* The master has cleared me to execute */
- if (data->smp_reg == ~0U) /* update all mtrr registers */
- /* At the cpu hot-add time this will reinitialize mtrr
- * registres on the existing cpus. It is ok. */
- mtrr_if->set_all();
- else /* single mtrr register update */
- mtrr_if->set(data->smp_reg, data->smp_base,
- data->smp_size, data->smp_type);
-
- atomic_dec(&data->count);
- while(atomic_read(&data->gate))
- cpu_relax();
-
- atomic_dec(&data->count);
- local_irq_restore(flags);
+ struct set_mtrr_data *data = info;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ atomic_dec(&data->count);
+ while (!atomic_read(&data->gate))
+ cpu_relax();
+
+ /* The master has cleared me to execute */
+ if (data->smp_reg == ~0U) /* update all mtrr registers */
+ /* At the cpu hot-add time this will reinitialize mtrr
+ * registres on the existing cpus. It is ok. */
+ mtrr_if->set_all();
+ else /* single mtrr register update */
+ mtrr_if->set(data->smp_reg, data->smp_base,
+ data->smp_size, data->smp_type);
+
+ atomic_dec(&data->count);
+ while (atomic_read(&data->gate))
+ cpu_relax();
+
+ atomic_dec(&data->count);
+ local_irq_restore(flags);
}
-static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
- return type1 == MTRR_TYPE_UNCACHABLE ||
- type2 == MTRR_TYPE_UNCACHABLE ||
- (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
- (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
+static inline int types_compatible(mtrr_type type1, mtrr_type type2)
+{
+ return type1 == MTRR_TYPE_UNCACHABLE ||
+ type2 == MTRR_TYPE_UNCACHABLE ||
+ (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
+ (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
}
/**
@@ -177,10 +181,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
* @type: mtrr type
*
* This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
- *
+ *
* 1. Send IPI to do the following:
* 2. Disable Interrupts
- * 3. Wait for all procs to do so
+ * 3. Wait for all procs to do so
* 4. Enter no-fill cache mode
* 5. Flush caches
* 6. Clear PGE bit
@@ -190,18 +194,18 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
* 10. Enable all range registers
* 11. Flush all TLBs and caches again
* 12. Enter normal cache mode and reenable caching
- * 13. Set PGE
+ * 13. Set PGE
* 14. Wait for buddies to catch up
* 15. Enable interrupts.
- *
+ *
* What does that mean for us? Well, first we set data.count to the number
* of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
* until it hits 0 and proceed. We set the data.gate flag and reset data.count.
- * Meanwhile, they are waiting for that flag to be set. Once it's set, each
- * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
+ * Meanwhile, they are waiting for that flag to be set. Once it's set, each
+ * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
* differently, so we call mtrr_if->set() callback and let them take care of it.
- * When they're done, they again decrement data->count and wait for data.gate to
- * be reset.
+ * When they're done, they again decrement data->count and wait for data.gate to
+ * be reset.
* When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
* Everyone then enables interrupts and we all continue on.
*
@@ -209,68 +213,68 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
* becomes nops.
*/
static void set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
+ unsigned long size, mtrr_type type)
{
- cpumask_t allbutself;
- unsigned int nr_cpus;
- struct set_mtrr_data data;
- unsigned long flags;
-
- cpumask_andnot(&allbutself, &cpu_online_map,
- cpumask_of(smp_processor_id()));
- nr_cpus = cpumask_weight(&allbutself);
-
- data.smp_reg = reg;
- data.smp_base = base;
- data.smp_size = size;
- data.smp_type = type;
- atomic_set(&data.count, nr_cpus);
- atomic_set(&data.gate,0);
-
- /* Start the ball rolling on other CPUs */
- on_selected_cpus(&allbutself, ipi_handler, &data, 0);
-
- local_irq_save(flags);
-
- while (atomic_read(&data.count))
- cpu_relax();
-
- /* ok, reset count and toggle gate */
- atomic_set(&data.count, nr_cpus);
- smp_wmb();
- atomic_set(&data.gate,1);
-
- /* do our MTRR business */
-
- /* HACK!
- * We use this same function to initialize the mtrrs on boot.
- * The state of the boot cpu's mtrrs has been saved, and we want
- * to replicate across all the APs.
- * If we're doing that @reg is set to something special...
- */
- if (reg == ~0U) /* update all mtrr registers */
- /* at boot or resume time, this will reinitialize the mtrrs on
- * the bp. It is ok. */
- mtrr_if->set_all();
- else /* update the single mtrr register */
- mtrr_if->set(reg,base,size,type);
-
- /* wait for the others */
- while (atomic_read(&data.count))
- cpu_relax();
-
- atomic_set(&data.count, nr_cpus);
- smp_wmb();
- atomic_set(&data.gate,0);
-
- /*
- * Wait here for everyone to have seen the gate change
- * So we're the last ones to touch 'data'
- */
- while (atomic_read(&data.count))
- cpu_relax();
-
- local_irq_restore(flags);
+ cpumask_t allbutself;
+ unsigned int nr_cpus;
+ struct set_mtrr_data data;
+ unsigned long flags;
+
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
+ nr_cpus = cpumask_weight(&allbutself);
+
+ data.smp_reg = reg;
+ data.smp_base = base;
+ data.smp_size = size;
+ data.smp_type = type;
+ atomic_set(&data.count, nr_cpus);
+ atomic_set(&data.gate, 0);
+
+ /* Start the ball rolling on other CPUs */
+ on_selected_cpus(&allbutself, ipi_handler, &data, 0);
+
+ local_irq_save(flags);
+
+ while (atomic_read(&data.count))
+ cpu_relax();
+
+ /* ok, reset count and toggle gate */
+ atomic_set(&data.count, nr_cpus);
+ smp_wmb();
+ atomic_set(&data.gate, 1);
+
+ /* do our MTRR business */
+
+ /* HACK!
+ * We use this same function to initialize the mtrrs on boot.
+ * The state of the boot cpu's mtrrs has been saved, and we want
+ * to replicate across all the APs.
+ * If we're doing that @reg is set to something special...
+ */
+ if (reg == ~0U) /* update all mtrr registers */
+ /* at boot or resume time, this will reinitialize the mtrrs on
+ * the bp. It is ok. */
+ mtrr_if->set_all();
+ else /* update the single mtrr register */
+ mtrr_if->set(reg, base, size, type);
+
+ /* wait for the others */
+ while (atomic_read(&data.count))
+ cpu_relax();
+
+ atomic_set(&data.count, nr_cpus);
+ smp_wmb();
+ atomic_set(&data.gate, 0);
+
+ /*
+ * Wait here for everyone to have seen the gate change
+ * So we're the last ones to touch 'data'
+ */
+ while (atomic_read(&data.count))
+ cpu_relax();
+
+ local_irq_restore(flags);
}
/**
@@ -283,7 +287,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
* Memory type region registers control the caching on newer Intel and
* non Intel processors. This function allows drivers to request an
* MTRR is added. The details and hardware specifics of each processor's
- * implementation are hidden from the caller, but nevertheless the
+ * implementation are hidden from the caller, but nevertheless the
* caller should expect to need to provide a power of two size on an
* equivalent power of two boundary.
*
@@ -309,113 +313,127 @@ static void set_mtrr(unsigned int reg, unsigned long base,
* failures and do not wish system log messages to be sent.
*/
-int mtrr_add_page(unsigned long base, unsigned long size,
- unsigned int type, char increment)
+int mtrr_add_page(unsigned long base, unsigned long size,
+ unsigned int type, char increment)
{
- int i, replace, error;
- mtrr_type ltype;
- unsigned long lbase, lsize;
-
- if (!mtrr_if)
- return -ENXIO;
-
- if ((error = mtrr_if->validate_add_page(base,size,type)))
- return error;
-
- if (type >= MTRR_NUM_TYPES) {
- printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
- return -EINVAL;
- }
-
- /* If the type is WC, check that this processor supports it */
- if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
- printk(KERN_WARNING
- "mtrr: your processor doesn't support write-combining\n");
- return -EOPNOTSUPP;
- }
-
- if (!size) {
- printk(KERN_WARNING "mtrr: zero sized request\n");
- return -EINVAL;
- }
-
- if ((base | (base + size - 1)) >> (paddr_bits - PAGE_SHIFT)) {
- printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
- return -EINVAL;
- }
-
- error = -EINVAL;
- replace = -1;
-
- /* Search for existing MTRR */
- mutex_lock(&mtrr_mutex);
- for (i = 0; i < num_var_ranges; ++i) {
- mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
- continue;
- /* At this point we know there is some kind of overlap/enclosure */
- if (base < lbase || base + size - 1 > lbase + lsize - 1) {
- if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
- /* New region encloses an existing region */
- if (type == ltype) {
- replace = replace == -1 ? i : -2;
- continue;
- }
- else if (types_compatible(type, ltype))
- continue;
- }
- printk(KERN_WARNING
- "mtrr: %#lx000,%#lx000 overlaps existing"
- " %#lx000,%#lx000\n", base, size, lbase,
- lsize);
- goto out;
- }
- /* New region is enclosed by an existing region */
- if (ltype != type) {
- if (types_compatible(type, ltype))
- continue;
- printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
- base, size, mtrr_attrib_to_str(ltype),
- mtrr_attrib_to_str(type));
- goto out;
- }
- if (increment)
- ++usage_table[i];
- error = i;
- goto out;
- }
- /* Search for an empty MTRR */
- i = mtrr_if->get_free_region(base, size, replace);
- if (i >= 0) {
- set_mtrr(i, base, size, type);
- if (likely(replace < 0))
- usage_table[i] = 1;
- else {
- usage_table[i] = usage_table[replace] + !!increment;
- if (unlikely(replace != i)) {
- set_mtrr(replace, 0, 0, 0);
- usage_table[replace] = 0;
- }
- }
- } else
- printk(KERN_INFO "mtrr: no more MTRRs available\n");
- error = i;
- out:
- mutex_unlock(&mtrr_mutex);
- return error;
+ int i, replace, error;
+ mtrr_type ltype;
+ unsigned long lbase, lsize;
+
+ if (!mtrr_if)
+ return -ENXIO;
+
+ if ((error = mtrr_if->validate_add_page(base, size, type)))
+ return error;
+
+ if (type >= MTRR_NUM_TYPES)
+ {
+ printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
+ return -EINVAL;
+ }
+
+ /* If the type is WC, check that this processor supports it */
+ if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb())
+ {
+ printk(KERN_WARNING
+ "mtrr: your processor doesn't support write-combining\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!size)
+ {
+ printk(KERN_WARNING "mtrr: zero sized request\n");
+ return -EINVAL;
+ }
+
+ if ((base | (base + size - 1)) >> (paddr_bits - PAGE_SHIFT))
+ {
+ printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
+ return -EINVAL;
+ }
+
+ error = -EINVAL;
+ replace = -1;
+
+ /* Search for existing MTRR */
+ mutex_lock(&mtrr_mutex);
+ for (i = 0; i < num_var_ranges; ++i)
+ {
+ mtrr_if->get(i, &lbase, &lsize, &ltype);
+ if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
+ continue;
+ /* At this point we know there is some kind of overlap/enclosure */
+ if (base < lbase || base + size - 1 > lbase + lsize - 1)
+ {
+ if (base <= lbase && base + size - 1 >= lbase + lsize - 1)
+ {
+ /* New region encloses an existing region */
+ if (type == ltype)
+ {
+ replace = replace == -1 ? i : -2;
+ continue;
+ }
+ else if (types_compatible(type, ltype))
+ continue;
+ }
+ printk(KERN_WARNING
+ "mtrr: %#lx000,%#lx000 overlaps existing"
+ " %#lx000,%#lx000\n", base, size, lbase,
+ lsize);
+ goto out;
+ }
+ /* New region is enclosed by an existing region */
+ if (ltype != type)
+ {
+ if (types_compatible(type, ltype))
+ continue;
+ printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
+ base, size, mtrr_attrib_to_str(ltype),
+ mtrr_attrib_to_str(type));
+ goto out;
+ }
+ if (increment)
+ ++usage_table[i];
+ error = i;
+ goto out;
+ }
+ /* Search for an empty MTRR */
+ i = mtrr_if->get_free_region(base, size, replace);
+ if (i >= 0)
+ {
+ set_mtrr(i, base, size, type);
+ if (likely(replace < 0))
+ usage_table[i] = 1;
+ else
+ {
+ usage_table[i] = usage_table[replace] + !!increment;
+ if (unlikely(replace != i))
+ {
+ set_mtrr(replace, 0, 0, 0);
+ usage_table[replace] = 0;
+ }
+ }
+ }
+ else
+ printk(KERN_INFO "mtrr: no more MTRRs available\n");
+ error = i;
+out:
+ mutex_unlock(&mtrr_mutex);
+ return error;
}
static int mtrr_check(unsigned long base, unsigned long size)
{
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_WARNING
- "mtrr: size and base must be multiples of 4 kiB\n");
- printk(KERN_DEBUG
- "mtrr: size: %#lx base: %#lx\n", size, base);
- dump_stack();
- return -1;
- }
- return 0;
+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
+ {
+ printk(KERN_WARNING
+ "mtrr: size and base must be multiples of 4 kiB\n");
+ printk(KERN_DEBUG
+ "mtrr: size: %#lx base: %#lx\n", size, base);
+ dump_stack();
+ return -1;
+ }
+ return 0;
}
/**
@@ -428,7 +446,7 @@ static int mtrr_check(unsigned long base, unsigned long size)
* Memory type region registers control the caching on newer Intel and
* non Intel processors. This function allows drivers to request an
* MTRR is added. The details and hardware specifics of each processor's
- * implementation are hidden from the caller, but nevertheless the
+ * implementation are hidden from the caller, but nevertheless the
* caller should expect to need to provide a power of two size on an
* equivalent power of two boundary.
*
@@ -456,12 +474,12 @@ static int mtrr_check(unsigned long base, unsigned long size)
int __init
mtrr_add(unsigned long base, unsigned long size, unsigned int type,
- char increment)
+ char increment)
{
- if (mtrr_check(base, size))
- return -EINVAL;
- return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
- increment);
+ if (mtrr_check(base, size))
+ return -EINVAL;
+ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
+ increment);
}
/**
@@ -473,7 +491,7 @@ mtrr_add(unsigned long base, unsigned long size, unsigned int type,
* If register is supplied then base and size are ignored. This is
* how drivers should call it.
*
- * Releases an MTRR region. If the usage count drops to zero the
+ * Releases an MTRR region. If the usage count drops to zero the
* register is freed and the region returns to default state.
* On success the register is returned, on failure a negative error
* code.
@@ -481,50 +499,57 @@ mtrr_add(unsigned long base, unsigned long size, unsigned int type,
int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
- int i, max;
- mtrr_type ltype;
- unsigned long lbase, lsize;
- int error = -EINVAL;
-
- if (!mtrr_if)
- return -ENXIO;
-
- max = num_var_ranges;
- mutex_lock(&mtrr_mutex);
- if (reg < 0) {
- /* Search for existing MTRR */
- for (i = 0; i < max; ++i) {
- mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (lbase == base && lsize == size) {
- reg = i;
- break;
- }
- }
- if (reg < 0) {
- printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
- size);
- goto out;
- }
- }
- if (reg >= max) {
- printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
- goto out;
- }
- mtrr_if->get(reg, &lbase, &lsize, &ltype);
- if (lsize < 1) {
- printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
- goto out;
- }
- if (usage_table[reg] < 1) {
- printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
- goto out;
- }
- if (--usage_table[reg] < 1)
- set_mtrr(reg, 0, 0, 0);
- error = reg;
- out:
- mutex_unlock(&mtrr_mutex);
- return error;
+ int i, max;
+ mtrr_type ltype;
+ unsigned long lbase, lsize;
+ int error = -EINVAL;
+
+ if (!mtrr_if)
+ return -ENXIO;
+
+ max = num_var_ranges;
+ mutex_lock(&mtrr_mutex);
+ if (reg < 0)
+ {
+ /* Search for existing MTRR */
+ for (i = 0; i < max; ++i)
+ {
+ mtrr_if->get(i, &lbase, &lsize, &ltype);
+ if (lbase == base && lsize == size)
+ {
+ reg = i;
+ break;
+ }
+ }
+ if (reg < 0)
+ {
+ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
+ size);
+ goto out;
+ }
+ }
+ if (reg >= max)
+ {
+ printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
+ goto out;
+ }
+ mtrr_if->get(reg, &lbase, &lsize, &ltype);
+ if (lsize < 1)
+ {
+ printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
+ goto out;
+ }
+ if (usage_table[reg] < 1)
+ {
+ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
+ goto out;
+ }
+ if (--usage_table[reg] < 1)
+ set_mtrr(reg, 0, 0, 0);
+ error = reg;
+out:
+ mutex_unlock(&mtrr_mutex);
+ return error;
}
/**
* mtrr_del - delete a memory type region
@@ -535,7 +560,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
* If register is supplied then base and size are ignored. This is
* how drivers should call it.
*
- * Releases an MTRR region. If the usage count drops to zero the
+ * Releases an MTRR region. If the usage count drops to zero the
* register is freed and the region returns to default state.
* On success the register is returned, on failure a negative error
* code.
@@ -544,56 +569,59 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
int __init
mtrr_del(int reg, unsigned long base, unsigned long size)
{
- if (mtrr_check(base, size))
- return -EINVAL;
- return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
+ if (mtrr_check(base, size))
+ return -EINVAL;
+ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
}
/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
* MTRR driver doesn't require this
*/
-struct mtrr_value {
- mtrr_type ltype;
- unsigned long lbase;
- unsigned long lsize;
+struct mtrr_value
+{
+ mtrr_type ltype;
+ unsigned long lbase;
+ unsigned long lsize;
};
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
*
- * This needs to be called early; before any of the other CPUs are
+ * This needs to be called early; before any of the other CPUs are
* initialized (i.e. before smp_init()).
- *
+ *
*/
void __init mtrr_bp_init(void)
{
- if (cpu_has_mtrr) {
- mtrr_if = &generic_mtrr_ops;
- size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
- size_and_mask = ~size_or_mask & 0xfffff00000ULL;
- }
-
- if (mtrr_if) {
- set_num_var_ranges();
- init_table();
- if (use_intel())
- get_mtrr_state();
- }
+ if (cpu_has_mtrr)
+ {
+ mtrr_if = &generic_mtrr_ops;
+ size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
+ size_and_mask = ~size_or_mask & 0xfffff00000ULL;
+ }
+
+ if (mtrr_if)
+ {
+ set_num_var_ranges();
+ init_table();
+ if (use_intel())
+ get_mtrr_state();
+ }
}
void mtrr_ap_init(void)
{
- if (!mtrr_if || !use_intel() || hold_mtrr_updates_on_aps)
- return;
- /*
- * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
- * but this routine will be called in cpu boot time, holding the lock
- * breaks it. This routine is called in two cases: 1.very earily time
- * of software resume, when there absolutely isn't mtrr entry changes;
- * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
- * prevent mtrr entry changes
- */
- set_mtrr(~0U, 0, 0, 0);
+ if (!mtrr_if || !use_intel() || hold_mtrr_updates_on_aps)
+ return;
+ /*
+ * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
+ * but this routine will be called in cpu boot time, holding the lock
+ * breaks it. This routine is called in two cases: 1.very earily time
+ * of software resume, when there absolutely isn't mtrr entry changes;
+ * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
+ * prevent mtrr entry changes
+ */
+ set_mtrr(~0U, 0, 0, 0);
}
/**
@@ -601,43 +629,43 @@ void mtrr_ap_init(void)
*/
void mtrr_save_state(void)
{
- int cpu = get_cpu();
+ int cpu = get_cpu();
- if (cpu == 0)
- mtrr_save_fixed_ranges(NULL);
- else
- on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1);
- put_cpu();
+ if (cpu == 0)
+ mtrr_save_fixed_ranges(NULL);
+ else
+ on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1);
+ put_cpu();
}
void mtrr_aps_sync_begin(void)
{
- if (!use_intel())
- return;
- hold_mtrr_updates_on_aps = 1;
+ if (!use_intel())
+ return;
+ hold_mtrr_updates_on_aps = 1;
}
void mtrr_aps_sync_end(void)
{
- if (!use_intel())
- return;
- set_mtrr(~0U, 0, 0, 0);
- hold_mtrr_updates_on_aps = 0;
+ if (!use_intel())
+ return;
+ set_mtrr(~0U, 0, 0, 0);
+ hold_mtrr_updates_on_aps = 0;
}
void mtrr_bp_restore(void)
{
- if (!use_intel())
- return;
- mtrr_if->set_all();
+ if (!use_intel())
+ return;
+ mtrr_if->set_all();
}
static int __init mtrr_init_finialize(void)
{
- if (!mtrr_if)
- return 0;
- if (use_intel())
- mtrr_state_warn();
- return 0;
+ if (!mtrr_if)
+ return 0;
+ if (use_intel())
+ mtrr_state_warn();
+ return 0;
}
__initcall(mtrr_init_finialize);
diff --git a/xen/arch/x86/cpu/mtrr/mtrr.h b/xen/arch/x86/cpu/mtrr/mtrr.h
index 9a406e6f61..118f4add40 100644
--- a/xen/arch/x86/cpu/mtrr/mtrr.h
+++ b/xen/arch/x86/cpu/mtrr/mtrr.h
@@ -7,27 +7,28 @@
#define MTRR_CHANGE_MASK_DEFTYPE 0x04
-struct mtrr_ops {
- u32 vendor;
- bool use_intel_if;
+struct mtrr_ops
+{
+ u32 vendor;
+ bool use_intel_if;
// void (*init)(void);
- void (*set)(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type);
- void (*set_all)(void);
-
- void (*get)(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type * type);
- int (*get_free_region)(unsigned long base, unsigned long size,
- int replace_reg);
- int (*validate_add_page)(unsigned long base, unsigned long size,
- unsigned int type);
- int (*have_wrcomb)(void);
+ void (*set)(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type);
+ void (*set_all)(void);
+
+ void (*get)(unsigned int reg, unsigned long *base,
+ unsigned long *size, mtrr_type *type);
+ int (*get_free_region)(unsigned long base, unsigned long size,
+ int replace_reg);
+ int (*validate_add_page)(unsigned long base, unsigned long size,
+ unsigned int type);
+ int (*have_wrcomb)(void);
};
extern int generic_get_free_region(unsigned long base, unsigned long size,
- int replace_reg);
+ int replace_reg);
extern int generic_validate_add_page(unsigned long base, unsigned long size,
- unsigned int type);
+ unsigned int type);
extern const struct mtrr_ops generic_mtrr_ops;
diff --git a/xen/arch/x86/cpu/mwait-idle.c b/xen/arch/x86/cpu/mwait-idle.c
index 24b757b8c1..21d1a03a45 100644
--- a/xen/arch/x86/cpu/mwait-idle.c
+++ b/xen/arch/x86/cpu/mwait-idle.c
@@ -80,25 +80,27 @@ static unsigned int mwait_substates;
/* Reliable LAPIC Timer States, bit 1 for C1 etc. Default to only C1. */
static unsigned int lapic_timer_reliable_states = (1 << 1);
-struct idle_cpu {
- const struct cpuidle_state *state_table;
-
- /*
- * Hardware C-state auto-demotion may not always be optimal.
- * Indicate which enable bits to clear here.
- */
- unsigned long auto_demotion_disable_flags;
- bool_t byt_auto_demotion_disable_flag;
- bool_t disable_promotion_to_c1e;
+struct idle_cpu
+{
+ const struct cpuidle_state *state_table;
+
+ /*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+ unsigned long auto_demotion_disable_flags;
+ bool_t byt_auto_demotion_disable_flag;
+ bool_t disable_promotion_to_c1e;
};
static const struct idle_cpu *icpu;
-static const struct cpuidle_state {
- char name[16];
- unsigned int flags;
- unsigned int exit_latency; /* in US */
- unsigned int target_residency; /* in US */
+static const struct cpuidle_state
+{
+ char name[16];
+ unsigned int flags;
+ unsigned int exit_latency; /* in US */
+ unsigned int target_residency; /* in US */
} *cpuidle_state_table;
#define CPUIDLE_FLAG_DISABLED 0x1
@@ -127,836 +129,876 @@ static const struct cpuidle_state {
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static const struct cpuidle_state nehalem_cstates[] = {
- {
- .name = "C1-NHM",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 3,
- .target_residency = 6,
- },
- {
- .name = "C1E-NHM",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C3-NHM",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 20,
- .target_residency = 80,
- },
- {
- .name = "C6-NHM",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 200,
- .target_residency = 800,
- },
- {}
+static const struct cpuidle_state nehalem_cstates[] =
+{
+ {
+ .name = "C1-NHM",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 3,
+ .target_residency = 6,
+ },
+ {
+ .name = "C1E-NHM",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C3-NHM",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 20,
+ .target_residency = 80,
+ },
+ {
+ .name = "C6-NHM",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 200,
+ .target_residency = 800,
+ },
+ {}
};
-static const struct cpuidle_state snb_cstates[] = {
- {
- .name = "C1-SNB",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 2,
- .target_residency = 2,
- },
- {
- .name = "C1E-SNB",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C3-SNB",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 80,
- .target_residency = 211,
- },
- {
- .name = "C6-SNB",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 104,
- .target_residency = 345,
- },
- {
- .name = "C7-SNB",
- .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 109,
- .target_residency = 345,
- },
- {}
+static const struct cpuidle_state snb_cstates[] =
+{
+ {
+ .name = "C1-SNB",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ },
+ {
+ .name = "C1E-SNB",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C3-SNB",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 80,
+ .target_residency = 211,
+ },
+ {
+ .name = "C6-SNB",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 104,
+ .target_residency = 345,
+ },
+ {
+ .name = "C7-SNB",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 109,
+ .target_residency = 345,
+ },
+ {}
};
-static const struct cpuidle_state byt_cstates[] = {
- {
- .name = "C1-BYT",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 1,
- .target_residency = 1,
- },
- {
- .name = "C6N-BYT",
- .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 300,
- .target_residency = 275,
- },
- {
- .name = "C6S-BYT",
- .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 500,
- .target_residency = 560,
- },
- {
- .name = "C7-BYT",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 1200,
- .target_residency = 4000,
- },
- {
- .name = "C7S-BYT",
- .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 10000,
- .target_residency = 20000,
- },
- {}
+static const struct cpuidle_state byt_cstates[] =
+{
+ {
+ .name = "C1-BYT",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ },
+ {
+ .name = "C6N-BYT",
+ .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 300,
+ .target_residency = 275,
+ },
+ {
+ .name = "C6S-BYT",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 500,
+ .target_residency = 560,
+ },
+ {
+ .name = "C7-BYT",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ },
+ {
+ .name = "C7S-BYT",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ },
+ {}
};
-static const struct cpuidle_state cht_cstates[] = {
- {
- .name = "C1-CHT",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 1,
- .target_residency = 1,
- },
- {
- .name = "C6N-CHT",
- .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 80,
- .target_residency = 275,
- },
- {
- .name = "C6S-CHT",
- .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 200,
- .target_residency = 560,
- },
- {
- .name = "C7-CHT",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 1200,
- .target_residency = 4000,
- },
- {
- .name = "C7S-CHT",
- .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 10000,
- .target_residency = 20000,
- },
- {}
+static const struct cpuidle_state cht_cstates[] =
+{
+ {
+ .name = "C1-CHT",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ },
+ {
+ .name = "C6N-CHT",
+ .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 80,
+ .target_residency = 275,
+ },
+ {
+ .name = "C6S-CHT",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 200,
+ .target_residency = 560,
+ },
+ {
+ .name = "C7-CHT",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ },
+ {
+ .name = "C7S-CHT",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ },
+ {}
};
-static const struct cpuidle_state ivb_cstates[] = {
- {
- .name = "C1-IVB",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 1,
- .target_residency = 1,
- },
- {
- .name = "C1E-IVB",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C3-IVB",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 59,
- .target_residency = 156,
- },
- {
- .name = "C6-IVB",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 80,
- .target_residency = 300,
- },
- {
- .name = "C7-IVB",
- .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 87,
- .target_residency = 300,
- },
- {}
+static const struct cpuidle_state ivb_cstates[] =
+{
+ {
+ .name = "C1-IVB",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ },
+ {
+ .name = "C1E-IVB",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C3-IVB",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 59,
+ .target_residency = 156,
+ },
+ {
+ .name = "C6-IVB",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 80,
+ .target_residency = 300,
+ },
+ {
+ .name = "C7-IVB",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 87,
+ .target_residency = 300,
+ },
+ {}
};
-static const struct cpuidle_state ivt_cstates[] = {
- {
- .name = "C1-IVT",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 1,
- .target_residency = 1,
- },
- {
- .name = "C1E-IVT",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 80,
- },
- {
- .name = "C3-IVT",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 59,
- .target_residency = 156,
- },
- {
- .name = "C6-IVT",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 82,
- .target_residency = 300,
- },
- {}
+static const struct cpuidle_state ivt_cstates[] =
+{
+ {
+ .name = "C1-IVT",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ },
+ {
+ .name = "C1E-IVT",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 80,
+ },
+ {
+ .name = "C3-IVT",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 59,
+ .target_residency = 156,
+ },
+ {
+ .name = "C6-IVT",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 82,
+ .target_residency = 300,
+ },
+ {}
};
-static const struct cpuidle_state ivt_cstates_4s[] = {
- {
- .name = "C1-IVT-4S",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 1,
- .target_residency = 1,
- },
- {
- .name = "C1E-IVT-4S",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 250,
- },
- {
- .name = "C3-IVT-4S",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 59,
- .target_residency = 300,
- },
- {
- .name = "C6-IVT-4S",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 84,
- .target_residency = 400,
- },
- {}
+static const struct cpuidle_state ivt_cstates_4s[] =
+{
+ {
+ .name = "C1-IVT-4S",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ },
+ {
+ .name = "C1E-IVT-4S",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 250,
+ },
+ {
+ .name = "C3-IVT-4S",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 59,
+ .target_residency = 300,
+ },
+ {
+ .name = "C6-IVT-4S",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 84,
+ .target_residency = 400,
+ },
+ {}
};
-static const struct cpuidle_state ivt_cstates_8s[] = {
- {
- .name = "C1-IVT-8S",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 1,
- .target_residency = 1,
- },
- {
- .name = "C1E-IVT-8S",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 500,
- },
- {
- .name = "C3-IVT-8S",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 59,
- .target_residency = 600,
- },
- {
- .name = "C6-IVT-8S",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 88,
- .target_residency = 700,
- },
- {}
+static const struct cpuidle_state ivt_cstates_8s[] =
+{
+ {
+ .name = "C1-IVT-8S",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ },
+ {
+ .name = "C1E-IVT-8S",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 500,
+ },
+ {
+ .name = "C3-IVT-8S",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 59,
+ .target_residency = 600,
+ },
+ {
+ .name = "C6-IVT-8S",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 88,
+ .target_residency = 700,
+ },
+ {}
};
-static const struct cpuidle_state hsw_cstates[] = {
- {
- .name = "C1-HSW",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 2,
- .target_residency = 2,
- },
- {
- .name = "C1E-HSW",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C3-HSW",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 33,
- .target_residency = 100,
- },
- {
- .name = "C6-HSW",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 133,
- .target_residency = 400,
- },
- {
- .name = "C7s-HSW",
- .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 166,
- .target_residency = 500,
- },
- {
- .name = "C8-HSW",
- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 300,
- .target_residency = 900,
- },
- {
- .name = "C9-HSW",
- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 600,
- .target_residency = 1800,
- },
- {
- .name = "C10-HSW",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 2600,
- .target_residency = 7700,
- },
- {}
+static const struct cpuidle_state hsw_cstates[] =
+{
+ {
+ .name = "C1-HSW",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ },
+ {
+ .name = "C1E-HSW",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C3-HSW",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 33,
+ .target_residency = 100,
+ },
+ {
+ .name = "C6-HSW",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 400,
+ },
+ {
+ .name = "C7s-HSW",
+ .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 166,
+ .target_residency = 500,
+ },
+ {
+ .name = "C8-HSW",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 300,
+ .target_residency = 900,
+ },
+ {
+ .name = "C9-HSW",
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 600,
+ .target_residency = 1800,
+ },
+ {
+ .name = "C10-HSW",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 2600,
+ .target_residency = 7700,
+ },
+ {}
};
-static const struct cpuidle_state bdw_cstates[] = {
- {
- .name = "C1-BDW",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 2,
- .target_residency = 2,
- },
- {
- .name = "C1E-BDW",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C3-BDW",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 40,
- .target_residency = 100,
- },
- {
- .name = "C6-BDW",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 133,
- .target_residency = 400,
- },
- {
- .name = "C7s-BDW",
- .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 166,
- .target_residency = 500,
- },
- {
- .name = "C8-BDW",
- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 300,
- .target_residency = 900,
- },
- {
- .name = "C9-BDW",
- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 600,
- .target_residency = 1800,
- },
- {
- .name = "C10-BDW",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 2600,
- .target_residency = 7700,
- },
- {}
+static const struct cpuidle_state bdw_cstates[] =
+{
+ {
+ .name = "C1-BDW",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ },
+ {
+ .name = "C1E-BDW",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C3-BDW",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 40,
+ .target_residency = 100,
+ },
+ {
+ .name = "C6-BDW",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 400,
+ },
+ {
+ .name = "C7s-BDW",
+ .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 166,
+ .target_residency = 500,
+ },
+ {
+ .name = "C8-BDW",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 300,
+ .target_residency = 900,
+ },
+ {
+ .name = "C9-BDW",
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 600,
+ .target_residency = 1800,
+ },
+ {
+ .name = "C10-BDW",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 2600,
+ .target_residency = 7700,
+ },
+ {}
};
-static struct cpuidle_state skl_cstates[] = {
- {
- .name = "C1-SKL",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 2,
- .target_residency = 2,
- },
- {
- .name = "C1E-SKL",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C3-SKL",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 70,
- .target_residency = 100,
- },
- {
- .name = "C6-SKL",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 85,
- .target_residency = 200,
- },
- {
- .name = "C7s-SKL",
- .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 124,
- .target_residency = 800,
- },
- {
- .name = "C8-SKL",
- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 200,
- .target_residency = 800,
- },
- {
- .name = "C9-SKL",
- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 480,
- .target_residency = 5000,
- },
- {
- .name = "C10-SKL",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 890,
- .target_residency = 5000,
- },
- {}
+static struct cpuidle_state skl_cstates[] =
+{
+ {
+ .name = "C1-SKL",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ },
+ {
+ .name = "C1E-SKL",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C3-SKL",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 70,
+ .target_residency = 100,
+ },
+ {
+ .name = "C6-SKL",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 85,
+ .target_residency = 200,
+ },
+ {
+ .name = "C7s-SKL",
+ .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 124,
+ .target_residency = 800,
+ },
+ {
+ .name = "C8-SKL",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 200,
+ .target_residency = 800,
+ },
+ {
+ .name = "C9-SKL",
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 480,
+ .target_residency = 5000,
+ },
+ {
+ .name = "C10-SKL",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 890,
+ .target_residency = 5000,
+ },
+ {}
};
-static const struct cpuidle_state skx_cstates[] = {
- {
- .name = "C1-SKX",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 2,
- .target_residency = 2,
- },
- {
- .name = "C1E-SKX",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C6-SKX",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 133,
- .target_residency = 600,
- },
- {}
+static const struct cpuidle_state skx_cstates[] =
+{
+ {
+ .name = "C1-SKX",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ },
+ {
+ .name = "C1E-SKX",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C6-SKX",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 600,
+ },
+ {}
};
-static const struct cpuidle_state atom_cstates[] = {
- {
- .name = "C1E-ATM",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C2-ATM",
- .flags = MWAIT2flg(0x10),
- .exit_latency = 20,
- .target_residency = 80,
- },
- {
- .name = "C4-ATM",
- .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 100,
- .target_residency = 400,
- },
- {
- .name = "C6-ATM",
- .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 140,
- .target_residency = 560,
- },
- {}
+static const struct cpuidle_state atom_cstates[] =
+{
+ {
+ .name = "C1E-ATM",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C2-ATM",
+ .flags = MWAIT2flg(0x10),
+ .exit_latency = 20,
+ .target_residency = 80,
+ },
+ {
+ .name = "C4-ATM",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 100,
+ .target_residency = 400,
+ },
+ {
+ .name = "C6-ATM",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 560,
+ },
+ {}
};
-static const struct cpuidle_state tangier_cstates[] = {
- {
- .name = "C1-TNG",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 1,
- .target_residency = 4,
- },
- {
- .name = "C4-TNG",
- .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 100,
- .target_residency = 400,
- },
- {
- .name = "C6-TNG",
- .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 140,
- .target_residency = 560,
- },
- {
- .name = "C7-TNG",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 1200,
- .target_residency = 4000,
- },
- {
- .name = "C9-TNG",
- .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 10000,
- .target_residency = 20000,
- },
- {}
+static const struct cpuidle_state tangier_cstates[] =
+{
+ {
+ .name = "C1-TNG",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 4,
+ },
+ {
+ .name = "C4-TNG",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 100,
+ .target_residency = 400,
+ },
+ {
+ .name = "C6-TNG",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 560,
+ },
+ {
+ .name = "C7-TNG",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ },
+ {
+ .name = "C9-TNG",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ },
+ {}
};
-static const struct cpuidle_state avn_cstates[] = {
- {
- .name = "C1-AVN",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 2,
- .target_residency = 2,
- },
- {
- .name = "C6-AVN",
- .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 15,
- .target_residency = 45,
- },
- {}
+static const struct cpuidle_state avn_cstates[] =
+{
+ {
+ .name = "C1-AVN",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ },
+ {
+ .name = "C6-AVN",
+ .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 15,
+ .target_residency = 45,
+ },
+ {}
};
-static const struct cpuidle_state knl_cstates[] = {
- {
- .name = "C1-KNL",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 1,
- .target_residency = 2,
- },
- {
- .name = "C6-KNL",
- .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 120,
- .target_residency = 500,
- },
- {}
+static const struct cpuidle_state knl_cstates[] =
+{
+ {
+ .name = "C1-KNL",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 2,
+ },
+ {
+ .name = "C6-KNL",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 120,
+ .target_residency = 500,
+ },
+ {}
};
-static struct cpuidle_state bxt_cstates[] = {
- {
- .name = "C1-BXT",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 2,
- .target_residency = 2,
- },
- {
- .name = "C1E-BXT",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C6-BXT",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 133,
- .target_residency = 133,
- },
- {
- .name = "C7s-BXT",
- .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 155,
- .target_residency = 155,
- },
- {
- .name = "C8-BXT",
- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 1000,
- .target_residency = 1000,
- },
- {
- .name = "C9-BXT",
- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 2000,
- .target_residency = 2000,
- },
- {
- .name = "C10-BXT",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 10000,
- .target_residency = 10000,
- },
- {}
+static struct cpuidle_state bxt_cstates[] =
+{
+ {
+ .name = "C1-BXT",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ },
+ {
+ .name = "C1E-BXT",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C6-BXT",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 133,
+ },
+ {
+ .name = "C7s-BXT",
+ .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 155,
+ .target_residency = 155,
+ },
+ {
+ .name = "C8-BXT",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1000,
+ .target_residency = 1000,
+ },
+ {
+ .name = "C9-BXT",
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 2000,
+ .target_residency = 2000,
+ },
+ {
+ .name = "C10-BXT",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 10000,
+ },
+ {}
};
-static const struct cpuidle_state dnv_cstates[] = {
- {
- .name = "C1-DNV",
- .flags = MWAIT2flg(0x00),
- .exit_latency = 2,
- .target_residency = 2,
- },
- {
- .name = "C1E-DNV",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 10,
- .target_residency = 20,
- },
- {
- .name = "C6-DNV",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 50,
- .target_residency = 500,
- },
- {}
+static const struct cpuidle_state dnv_cstates[] =
+{
+ {
+ .name = "C1-DNV",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ },
+ {
+ .name = "C1E-DNV",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ },
+ {
+ .name = "C6-DNV",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 50,
+ .target_residency = 500,
+ },
+ {}
};
static void mwait_idle(void)
{
- unsigned int cpu = smp_processor_id();
- struct acpi_processor_power *power = processor_powers[cpu];
- struct acpi_processor_cx *cx = NULL;
- unsigned int eax, next_state, cstate;
- u64 before, after;
- u32 exp = 0, pred = 0, irq_traced[4] = { 0 };
-
- if (max_cstate > 0 && power && !sched_has_urgent_vcpu() &&
- (next_state = cpuidle_current_governor->select(power)) > 0) {
- do {
- cx = &power->states[next_state];
- } while (cx->type > max_cstate && --next_state);
- if (!next_state)
- cx = NULL;
- menu_get_trace_data(&exp, &pred);
- }
- if (!cx) {
- if (pm_idle_save)
- pm_idle_save();
- else
- {
- struct cpu_info *info = get_cpu_info();
-
- spec_ctrl_enter_idle(info);
- safe_halt();
- spec_ctrl_exit_idle(info);
- }
- return;
- }
-
- cpufreq_dbs_timer_suspend();
-
- sched_tick_suspend();
- /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
- process_pending_softirqs();
-
- /* Interrupts must be disabled for C2 and higher transitions. */
- local_irq_disable();
-
- if (!cpu_is_haltable(cpu)) {
- local_irq_enable();
- sched_tick_resume();
- cpufreq_dbs_timer_resume();
- return;
- }
-
- eax = cx->address;
- cstate = ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+ unsigned int cpu = smp_processor_id();
+ struct acpi_processor_power *power = processor_powers[cpu];
+ struct acpi_processor_cx *cx = NULL;
+ unsigned int eax, next_state, cstate;
+ u64 before, after;
+ u32 exp = 0, pred = 0, irq_traced[4] = { 0 };
+
+ if (max_cstate > 0 && power && !sched_has_urgent_vcpu() &&
+ (next_state = cpuidle_current_governor->select(power)) > 0)
+ {
+ do
+ {
+ cx = &power->states[next_state];
+ } while (cx->type > max_cstate && --next_state);
+ if (!next_state)
+ cx = NULL;
+ menu_get_trace_data(&exp, &pred);
+ }
+ if (!cx)
+ {
+ if (pm_idle_save)
+ pm_idle_save();
+ else
+ {
+ struct cpu_info *info = get_cpu_info();
+
+ spec_ctrl_enter_idle(info);
+ safe_halt();
+ spec_ctrl_exit_idle(info);
+ }
+ return;
+ }
+
+ cpufreq_dbs_timer_suspend();
+
+ sched_tick_suspend();
+ /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
+ process_pending_softirqs();
+
+ /* Interrupts must be disabled for C2 and higher transitions. */
+ local_irq_disable();
+
+ if (!cpu_is_haltable(cpu))
+ {
+ local_irq_enable();
+ sched_tick_resume();
+ cpufreq_dbs_timer_resume();
+ return;
+ }
+
+ eax = cx->address;
+ cstate = ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
#if 0 /* XXX Can we/do we need to do something similar on Xen? */
- /*
- * leave_mm() to avoid costly and often unnecessary wakeups
- * for flushing the user TLB's associated with the active mm.
- */
- if (cpuidle_state_table[].flags & CPUIDLE_FLAG_TLB_FLUSHED)
- leave_mm(cpu);
+ /*
+ * leave_mm() to avoid costly and often unnecessary wakeups
+ * for flushing the user TLB's associated with the active mm.
+ */
+ if (cpuidle_state_table[].flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(cpu);
#endif
- if (!(lapic_timer_reliable_states & (1 << cstate)))
- lapic_timer_off();
+ if (!(lapic_timer_reliable_states & (1 << cstate)))
+ lapic_timer_off();
- before = alternative_call(cpuidle_get_tick);
- TRACE_4D(TRC_PM_IDLE_ENTRY, cx->type, before, exp, pred);
+ before = alternative_call(cpuidle_get_tick);
+ TRACE_4D(TRC_PM_IDLE_ENTRY, cx->type, before, exp, pred);
- update_last_cx_stat(power, cx, before);
+ update_last_cx_stat(power, cx, before);
- if (cpu_is_haltable(cpu))
- mwait_idle_with_hints(eax, MWAIT_ECX_INTERRUPT_BREAK);
+ if (cpu_is_haltable(cpu))
+ mwait_idle_with_hints(eax, MWAIT_ECX_INTERRUPT_BREAK);
- after = alternative_call(cpuidle_get_tick);
+ after = alternative_call(cpuidle_get_tick);
- cstate_restore_tsc();
- trace_exit_reason(irq_traced);
- TRACE_6D(TRC_PM_IDLE_EXIT, cx->type, after,
- irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
+ cstate_restore_tsc();
+ trace_exit_reason(irq_traced);
+ TRACE_6D(TRC_PM_IDLE_EXIT, cx->type, after,
+ irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
- /* Now back in C0. */
- update_idle_stats(power, cx, before, after);
- local_irq_enable();
+ /* Now back in C0. */
+ update_idle_stats(power, cx, before, after);
+ local_irq_enable();
- if (!(lapic_timer_reliable_states & (1 << cstate)))
- lapic_timer_on();
+ if (!(lapic_timer_reliable_states & (1 << cstate)))
+ lapic_timer_on();
- sched_tick_resume();
- cpufreq_dbs_timer_resume();
+ sched_tick_resume();
+ cpufreq_dbs_timer_resume();
- if ( cpuidle_current_governor->reflect )
- cpuidle_current_governor->reflect(power);
+ if ( cpuidle_current_governor->reflect )
+ cpuidle_current_governor->reflect(power);
}
static void auto_demotion_disable(void *dummy)
{
- u64 msr_bits;
+ u64 msr_bits;
- rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
- msr_bits &= ~(icpu->auto_demotion_disable_flags);
- wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+ msr_bits &= ~(icpu->auto_demotion_disable_flags);
+ wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
}
static void byt_auto_demotion_disable(void *dummy)
{
- wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
- wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
static void c1e_promotion_disable(void *dummy)
{
- u64 msr_bits;
+ u64 msr_bits;
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
- msr_bits &= ~0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits &= ~0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
}
-static const struct idle_cpu idle_cpu_nehalem = {
- .state_table = nehalem_cstates,
- .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_nehalem =
+{
+ .state_table = nehalem_cstates,
+ .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_atom = {
- .state_table = atom_cstates,
+static const struct idle_cpu idle_cpu_atom =
+{
+ .state_table = atom_cstates,
};
-static const struct idle_cpu idle_cpu_tangier = {
- .state_table = tangier_cstates,
+static const struct idle_cpu idle_cpu_tangier =
+{
+ .state_table = tangier_cstates,
};
-static const struct idle_cpu idle_cpu_lincroft = {
- .state_table = atom_cstates,
- .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
+static const struct idle_cpu idle_cpu_lincroft =
+{
+ .state_table = atom_cstates,
+ .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
};
-static const struct idle_cpu idle_cpu_snb = {
- .state_table = snb_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_snb =
+{
+ .state_table = snb_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_byt = {
- .state_table = byt_cstates,
- .disable_promotion_to_c1e = 1,
- .byt_auto_demotion_disable_flag = 1,
+static const struct idle_cpu idle_cpu_byt =
+{
+ .state_table = byt_cstates,
+ .disable_promotion_to_c1e = 1,
+ .byt_auto_demotion_disable_flag = 1,
};
-static const struct idle_cpu idle_cpu_cht = {
- .state_table = cht_cstates,
- .disable_promotion_to_c1e = 1,
- .byt_auto_demotion_disable_flag = 1,
+static const struct idle_cpu idle_cpu_cht =
+{
+ .state_table = cht_cstates,
+ .disable_promotion_to_c1e = 1,
+ .byt_auto_demotion_disable_flag = 1,
};
-static const struct idle_cpu idle_cpu_ivb = {
- .state_table = ivb_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_ivb =
+{
+ .state_table = ivb_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_ivt = {
- .state_table = ivt_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_ivt =
+{
+ .state_table = ivt_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_hsw = {
- .state_table = hsw_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_hsw =
+{
+ .state_table = hsw_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_bdw = {
- .state_table = bdw_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_bdw =
+{
+ .state_table = bdw_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_skl = {
- .state_table = skl_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_skl =
+{
+ .state_table = skl_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_skx = {
- .state_table = skx_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_skx =
+{
+ .state_table = skx_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_avn = {
- .state_table = avn_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_avn =
+{
+ .state_table = avn_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_knl = {
- .state_table = knl_cstates,
+static const struct idle_cpu idle_cpu_knl =
+{
+ .state_table = knl_cstates,
};
-static const struct idle_cpu idle_cpu_bxt = {
- .state_table = bxt_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_bxt =
+{
+ .state_table = bxt_cstates,
+ .disable_promotion_to_c1e = 1,
};
-static const struct idle_cpu idle_cpu_dnv = {
- .state_table = dnv_cstates,
- .disable_promotion_to_c1e = 1,
+static const struct idle_cpu idle_cpu_dnv =
+{
+ .state_table = dnv_cstates,
+ .disable_promotion_to_c1e = 1,
};
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ALWAYS, &idle_cpu_##cpu}
-static const struct x86_cpu_id intel_idle_ids[] __initconstrel = {
- ICPU(0x1a, nehalem),
- ICPU(0x1e, nehalem),
- ICPU(0x1f, nehalem),
- ICPU(0x25, nehalem),
- ICPU(0x2c, nehalem),
- ICPU(0x2e, nehalem),
- ICPU(0x2f, nehalem),
- ICPU(0x1c, atom),
- ICPU(0x26, lincroft),
- ICPU(0x2a, snb),
- ICPU(0x2d, snb),
- ICPU(0x36, atom),
- ICPU(0x37, byt),
- ICPU(0x4a, tangier),
- ICPU(0x4c, cht),
- ICPU(0x3a, ivb),
- ICPU(0x3e, ivt),
- ICPU(0x3c, hsw),
- ICPU(0x3f, hsw),
- ICPU(0x45, hsw),
- ICPU(0x46, hsw),
- ICPU(0x4d, avn),
- ICPU(0x3d, bdw),
- ICPU(0x47, bdw),
- ICPU(0x4f, bdw),
- ICPU(0x56, bdw),
- ICPU(0x4e, skl),
- ICPU(0x5e, skl),
- ICPU(0x8e, skl),
- ICPU(0x9e, skl),
- ICPU(0x55, skx),
- ICPU(0x57, knl),
- ICPU(0x85, knl),
- ICPU(0x5c, bxt),
- ICPU(0x7a, bxt),
- ICPU(0x5f, dnv),
- {}
+static const struct x86_cpu_id intel_idle_ids[] __initconstrel =
+{
+ ICPU(0x1a, nehalem),
+ ICPU(0x1e, nehalem),
+ ICPU(0x1f, nehalem),
+ ICPU(0x25, nehalem),
+ ICPU(0x2c, nehalem),
+ ICPU(0x2e, nehalem),
+ ICPU(0x2f, nehalem),
+ ICPU(0x1c, atom),
+ ICPU(0x26, lincroft),
+ ICPU(0x2a, snb),
+ ICPU(0x2d, snb),
+ ICPU(0x36, atom),
+ ICPU(0x37, byt),
+ ICPU(0x4a, tangier),
+ ICPU(0x4c, cht),
+ ICPU(0x3a, ivb),
+ ICPU(0x3e, ivt),
+ ICPU(0x3c, hsw),
+ ICPU(0x3f, hsw),
+ ICPU(0x45, hsw),
+ ICPU(0x46, hsw),
+ ICPU(0x4d, avn),
+ ICPU(0x3d, bdw),
+ ICPU(0x47, bdw),
+ ICPU(0x4f, bdw),
+ ICPU(0x56, bdw),
+ ICPU(0x4e, skl),
+ ICPU(0x5e, skl),
+ ICPU(0x8e, skl),
+ ICPU(0x9e, skl),
+ ICPU(0x55, skx),
+ ICPU(0x57, knl),
+ ICPU(0x85, knl),
+ ICPU(0x5c, bxt),
+ ICPU(0x7a, bxt),
+ ICPU(0x5f, dnv),
+ {}
};
/*
@@ -967,42 +1009,47 @@ static const struct x86_cpu_id intel_idle_ids[] __initconstrel = {
*/
static void __init ivt_idle_state_table_update(void)
{
- /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
- unsigned int cpu, max_apicid = boot_cpu_physical_apicid;
-
- for_each_present_cpu(cpu)
- if (max_apicid < x86_cpu_to_apicid[cpu])
- max_apicid = x86_cpu_to_apicid[cpu];
- switch (apicid_to_socket(max_apicid)) {
- case 0: case 1:
- /* 1 and 2 socket systems use default ivt_cstates */
- break;
- case 2: case 3:
- cpuidle_state_table = ivt_cstates_4s;
- break;
- default:
- cpuidle_state_table = ivt_cstates_8s;
- break;
- }
+ /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
+ unsigned int cpu, max_apicid = boot_cpu_physical_apicid;
+
+ for_each_present_cpu(cpu)
+ if (max_apicid < x86_cpu_to_apicid[cpu])
+ max_apicid = x86_cpu_to_apicid[cpu];
+ switch (apicid_to_socket(max_apicid))
+ {
+ case 0:
+ case 1:
+ /* 1 and 2 socket systems use default ivt_cstates */
+ break;
+ case 2:
+ case 3:
+ cpuidle_state_table = ivt_cstates_4s;
+ break;
+ default:
+ cpuidle_state_table = ivt_cstates_8s;
+ break;
+ }
}
/*
* Translate IRTL (Interrupt Response Time Limit) MSR to usec
*/
-static const unsigned int __initconst irtl_ns_units[] = {
- 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
+static const unsigned int __initconst irtl_ns_units[] =
+{
+ 1, 32, 1024, 32768, 1048576, 33554432, 0, 0
+};
static unsigned long long __init irtl_2_usec(unsigned long long irtl)
{
- unsigned long long ns;
+ unsigned long long ns;
- if (!irtl)
- return 0;
+ if (!irtl)
+ return 0;
- ns = irtl_ns_units[(irtl >> 10) & 0x7];
+ ns = irtl_ns_units[(irtl >> 10) & 0x7];
- return (irtl & 0x3FF) * ns / 1000;
+ return (irtl & 0x3FF) * ns / 1000;
}
/*
* bxt_idle_state_table_update(void)
@@ -1012,43 +1059,48 @@ static unsigned long long __init irtl_2_usec(unsigned long long irtl)
*/
static void __init bxt_idle_state_table_update(void)
{
- unsigned long long msr;
- unsigned int usec;
-
- rdmsrl(MSR_PKGC6_IRTL, msr);
- usec = irtl_2_usec(msr);
- if (usec) {
- bxt_cstates[2].exit_latency = usec;
- bxt_cstates[2].target_residency = usec;
- }
-
- rdmsrl(MSR_PKGC7_IRTL, msr);
- usec = irtl_2_usec(msr);
- if (usec) {
- bxt_cstates[3].exit_latency = usec;
- bxt_cstates[3].target_residency = usec;
- }
-
- rdmsrl(MSR_PKGC8_IRTL, msr);
- usec = irtl_2_usec(msr);
- if (usec) {
- bxt_cstates[4].exit_latency = usec;
- bxt_cstates[4].target_residency = usec;
- }
-
- rdmsrl(MSR_PKGC9_IRTL, msr);
- usec = irtl_2_usec(msr);
- if (usec) {
- bxt_cstates[5].exit_latency = usec;
- bxt_cstates[5].target_residency = usec;
- }
-
- rdmsrl(MSR_PKGC10_IRTL, msr);
- usec = irtl_2_usec(msr);
- if (usec) {
- bxt_cstates[6].exit_latency = usec;
- bxt_cstates[6].target_residency = usec;
- }
+ unsigned long long msr;
+ unsigned int usec;
+
+ rdmsrl(MSR_PKGC6_IRTL, msr);
+ usec = irtl_2_usec(msr);
+ if (usec)
+ {
+ bxt_cstates[2].exit_latency = usec;
+ bxt_cstates[2].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC7_IRTL, msr);
+ usec = irtl_2_usec(msr);
+ if (usec)
+ {
+ bxt_cstates[3].exit_latency = usec;
+ bxt_cstates[3].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC8_IRTL, msr);
+ usec = irtl_2_usec(msr);
+ if (usec)
+ {
+ bxt_cstates[4].exit_latency = usec;
+ bxt_cstates[4].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC9_IRTL, msr);
+ usec = irtl_2_usec(msr);
+ if (usec)
+ {
+ bxt_cstates[5].exit_latency = usec;
+ bxt_cstates[5].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC10_IRTL, msr);
+ usec = irtl_2_usec(msr);
+ if (usec)
+ {
+ bxt_cstates[6].exit_latency = usec;
+ bxt_cstates[6].target_residency = usec;
+ }
}
/*
@@ -1059,33 +1111,34 @@ static void __init bxt_idle_state_table_update(void)
*/
static void __init sklh_idle_state_table_update(void)
{
- u64 msr;
+ u64 msr;
- /* if PC10 disabled via cmdline max_cstate=7 or shallower */
- if (max_cstate <= 7)
- return;
+ /* if PC10 disabled via cmdline max_cstate=7 or shallower */
+ if (max_cstate <= 7)
+ return;
- /* if PC10 not present in CPUID.MWAIT.EDX */
- if ((mwait_substates & (MWAIT_CSTATE_MASK << 28)) == 0)
- return;
+ /* if PC10 not present in CPUID.MWAIT.EDX */
+ if ((mwait_substates & (MWAIT_CSTATE_MASK << 28)) == 0)
+ return;
- rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
- /* PC10 is not enabled in PKG C-state limit */
- if ((msr & 0xF) != 8)
- return;
+ /* PC10 is not enabled in PKG C-state limit */
+ if ((msr & 0xF) != 8)
+ return;
- /* if SGX is present */
- if (boot_cpu_has(X86_FEATURE_SGX)) {
- rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+ /* if SGX is present */
+ if (boot_cpu_has(X86_FEATURE_SGX))
+ {
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
- /* if SGX is enabled */
- if (msr & IA32_FEATURE_CONTROL_SGX_ENABLE)
- return;
- }
+ /* if SGX is enabled */
+ if (msr & IA32_FEATURE_CONTROL_SGX_ENABLE)
+ return;
+ }
- skl_cstates[5].flags |= CPUIDLE_FLAG_DISABLED; /* C8-SKL */
- skl_cstates[6].flags |= CPUIDLE_FLAG_DISABLED; /* C9-SKL */
+ skl_cstates[5].flags |= CPUIDLE_FLAG_DISABLED; /* C8-SKL */
+ skl_cstates[6].flags |= CPUIDLE_FLAG_DISABLED; /* C9-SKL */
}
/*
@@ -1095,180 +1148,191 @@ static void __init sklh_idle_state_table_update(void)
*/
static void __init mwait_idle_state_table_update(void)
{
- switch (boot_cpu_data.x86_model) {
- case 0x3e: /* IVT */
- ivt_idle_state_table_update();
- break;
- case 0x5c: /* BXT */
- case 0x7a:
- bxt_idle_state_table_update();
- break;
- case 0x5e: /* SKL-H */
- sklh_idle_state_table_update();
- break;
- }
+ switch (boot_cpu_data.x86_model)
+ {
+ case 0x3e: /* IVT */
+ ivt_idle_state_table_update();
+ break;
+ case 0x5c: /* BXT */
+ case 0x7a:
+ bxt_idle_state_table_update();
+ break;
+ case 0x5e: /* SKL-H */
+ sklh_idle_state_table_update();
+ break;
+ }
}
static int __init mwait_idle_probe(void)
{
- unsigned int eax, ebx, ecx;
- const struct x86_cpu_id *id = x86_match_cpu(intel_idle_ids);
+ unsigned int eax, ebx, ecx;
+ const struct x86_cpu_id *id = x86_match_cpu(intel_idle_ids);
- if (!id) {
- pr_debug(PREFIX "does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- return -ENODEV;
- }
+ if (!id)
+ {
+ pr_debug(PREFIX "does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
- if (!boot_cpu_has(X86_FEATURE_MONITOR)) {
- pr_debug(PREFIX "Please enable MWAIT in BIOS SETUP\n");
- return -ENODEV;
- }
+ if (!boot_cpu_has(X86_FEATURE_MONITOR))
+ {
+ pr_debug(PREFIX "Please enable MWAIT in BIOS SETUP\n");
+ return -ENODEV;
+ }
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return -ENODEV;
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return -ENODEV;
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
- !mwait_substates)
- return -ENODEV;
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+ !mwait_substates)
+ return -ENODEV;
- if (!max_cstate || !opt_mwait_idle) {
- pr_debug(PREFIX "disabled\n");
- return -EPERM;
- }
+ if (!max_cstate || !opt_mwait_idle)
+ {
+ pr_debug(PREFIX "disabled\n");
+ return -EPERM;
+ }
- pr_debug(PREFIX "MWAIT substates: %#x\n", mwait_substates);
+ pr_debug(PREFIX "MWAIT substates: %#x\n", mwait_substates);
- icpu = id->driver_data;
- cpuidle_state_table = icpu->state_table;
+ icpu = id->driver_data;
+ cpuidle_state_table = icpu->state_table;
- if (boot_cpu_has(X86_FEATURE_ARAT))
- lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ if (boot_cpu_has(X86_FEATURE_ARAT))
+ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
- pr_debug(PREFIX "v" MWAIT_IDLE_VERSION " model %#x\n",
- boot_cpu_data.x86_model);
+ pr_debug(PREFIX "v" MWAIT_IDLE_VERSION " model %#x\n",
+ boot_cpu_data.x86_model);
- pr_debug(PREFIX "lapic_timer_reliable_states %#x\n",
- lapic_timer_reliable_states);
+ pr_debug(PREFIX "lapic_timer_reliable_states %#x\n",
+ lapic_timer_reliable_states);
- mwait_idle_state_table_update();
+ mwait_idle_state_table_update();
- return 0;
+ return 0;
}
static int mwait_idle_cpu_init(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+ unsigned long action, void *hcpu)
{
- unsigned int cpu = (unsigned long)hcpu, cstate;
- struct acpi_processor_power *dev = processor_powers[cpu];
-
- switch (action) {
- int rc;
-
- default:
- return NOTIFY_DONE;
-
- case CPU_UP_PREPARE:
- rc = cpuidle_init_cpu(cpu);
- dev = processor_powers[cpu];
- if (!rc && cpuidle_current_governor->enable)
- rc = cpuidle_current_governor->enable(dev);
- return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
-
- case CPU_ONLINE:
- if (!dev)
- return NOTIFY_DONE;
- break;
- }
-
- dev->count = 1;
-
- for (cstate = 0; cpuidle_state_table[cstate].target_residency; ++cstate) {
- unsigned int num_substates, hint, state;
- struct acpi_processor_cx *cx;
-
- hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
- state = MWAIT_HINT2CSTATE(hint) + 1;
-
- if (state > max_cstate) {
- printk(PREFIX "max C-state %u reached\n", max_cstate);
- break;
- }
-
- /* Number of sub-states for this state in CPUID.MWAIT. */
- num_substates = (mwait_substates >> (state * 4))
- & MWAIT_SUBSTATE_MASK;
- /* If NO sub-states for this state in CPUID, skip it. */
- if (num_substates == 0)
- continue;
-
- /* if state marked as disabled, skip it */
- if (cpuidle_state_table[cstate].flags &
- CPUIDLE_FLAG_DISABLED) {
- printk(XENLOG_DEBUG PREFIX "state %s is disabled",
- cpuidle_state_table[cstate].name);
- continue;
- }
-
- if (dev->count >= ACPI_PROCESSOR_MAX_POWER) {
- printk(PREFIX "max C-state count of %u reached\n",
- ACPI_PROCESSOR_MAX_POWER);
- break;
- }
-
- if (state > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
- !pm_idle_save)
- setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
- cx = dev->states + dev->count;
- cx->type = state;
- cx->address = hint;
- cx->entry_method = ACPI_CSTATE_EM_FFH;
- cx->latency = cpuidle_state_table[cstate].exit_latency;
- cx->target_residency =
- cpuidle_state_table[cstate].target_residency;
-
- dev->count++;
- }
-
- if (icpu->auto_demotion_disable_flags)
- on_selected_cpus(cpumask_of(cpu), auto_demotion_disable, NULL, 1);
-
- if (icpu->byt_auto_demotion_disable_flag)
- on_selected_cpus(cpumask_of(cpu), byt_auto_demotion_disable, NULL, 1);
-
- if (icpu->disable_promotion_to_c1e)
- on_selected_cpus(cpumask_of(cpu), c1e_promotion_disable, NULL, 1);
-
- return NOTIFY_DONE;
+ unsigned int cpu = (unsigned long)hcpu, cstate;
+ struct acpi_processor_power *dev = processor_powers[cpu];
+
+ switch (action)
+ {
+ int rc;
+
+ default:
+ return NOTIFY_DONE;
+
+ case CPU_UP_PREPARE:
+ rc = cpuidle_init_cpu(cpu);
+ dev = processor_powers[cpu];
+ if (!rc && cpuidle_current_governor->enable)
+ rc = cpuidle_current_governor->enable(dev);
+ return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
+
+ case CPU_ONLINE:
+ if (!dev)
+ return NOTIFY_DONE;
+ break;
+ }
+
+ dev->count = 1;
+
+ for (cstate = 0; cpuidle_state_table[cstate].target_residency; ++cstate)
+ {
+ unsigned int num_substates, hint, state;
+ struct acpi_processor_cx *cx;
+
+ hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ state = MWAIT_HINT2CSTATE(hint) + 1;
+
+ if (state > max_cstate)
+ {
+ printk(PREFIX "max C-state %u reached\n", max_cstate);
+ break;
+ }
+
+ /* Number of sub-states for this state in CPUID.MWAIT. */
+ num_substates = (mwait_substates >> (state * 4))
+ & MWAIT_SUBSTATE_MASK;
+ /* If NO sub-states for this state in CPUID, skip it. */
+ if (num_substates == 0)
+ continue;
+
+ /* if state marked as disabled, skip it */
+ if (cpuidle_state_table[cstate].flags &
+ CPUIDLE_FLAG_DISABLED)
+ {
+ printk(XENLOG_DEBUG PREFIX "state %s is disabled",
+ cpuidle_state_table[cstate].name);
+ continue;
+ }
+
+ if (dev->count >= ACPI_PROCESSOR_MAX_POWER)
+ {
+ printk(PREFIX "max C-state count of %u reached\n",
+ ACPI_PROCESSOR_MAX_POWER);
+ break;
+ }
+
+ if (state > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
+ !pm_idle_save)
+ setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+
+ cx = dev->states + dev->count;
+ cx->type = state;
+ cx->address = hint;
+ cx->entry_method = ACPI_CSTATE_EM_FFH;
+ cx->latency = cpuidle_state_table[cstate].exit_latency;
+ cx->target_residency =
+ cpuidle_state_table[cstate].target_residency;
+
+ dev->count++;
+ }
+
+ if (icpu->auto_demotion_disable_flags)
+ on_selected_cpus(cpumask_of(cpu), auto_demotion_disable, NULL, 1);
+
+ if (icpu->byt_auto_demotion_disable_flag)
+ on_selected_cpus(cpumask_of(cpu), byt_auto_demotion_disable, NULL, 1);
+
+ if (icpu->disable_promotion_to_c1e)
+ on_selected_cpus(cpumask_of(cpu), c1e_promotion_disable, NULL, 1);
+
+ return NOTIFY_DONE;
}
int __init mwait_idle_init(struct notifier_block *nfb)
{
- int err;
-
- if (pm_idle_save)
- return -ENODEV;
-
- err = mwait_idle_probe();
- if (!err && !boot_cpu_has(X86_FEATURE_ARAT)) {
- hpet_broadcast_init();
- if (xen_cpuidle < 0 && !hpet_broadcast_is_available())
- err = -ENODEV;
- else if(!lapic_timer_init())
- err = -EINVAL;
- if (err)
- pr_debug(PREFIX "not used (%d)\n", err);
- }
- if (!err) {
- nfb->notifier_call = mwait_idle_cpu_init;
- pm_idle_save = pm_idle;
- pm_idle = mwait_idle;
- dead_idle = acpi_dead_idle;
- }
-
- return err;
+ int err;
+
+ if (pm_idle_save)
+ return -ENODEV;
+
+ err = mwait_idle_probe();
+ if (!err && !boot_cpu_has(X86_FEATURE_ARAT))
+ {
+ hpet_broadcast_init();
+ if (xen_cpuidle < 0 && !hpet_broadcast_is_available())
+ err = -ENODEV;
+ else if (!lapic_timer_init())
+ err = -EINVAL;
+ if (err)
+ pr_debug(PREFIX "not used (%d)\n", err);
+ }
+ if (!err)
+ {
+ nfb->notifier_call = mwait_idle_cpu_init;
+ pm_idle_save = pm_idle;
+ pm_idle = mwait_idle;
+ dead_idle = acpi_dead_idle;
+ }
+
+ return err;
}
diff --git a/xen/arch/x86/cpu/shanghai.c b/xen/arch/x86/cpu/shanghai.c
index 08a81f0f0c..0d7dbb2e4d 100644
--- a/xen/arch/x86/cpu/shanghai.c
+++ b/xen/arch/x86/cpu/shanghai.c
@@ -15,6 +15,7 @@ static void init_shanghai(struct cpuinfo_x86 *c)
init_intel_cacheinfo(c);
}
-const struct cpu_dev shanghai_cpu_dev = {
+const struct cpu_dev shanghai_cpu_dev =
+{
.c_init = init_shanghai,
};
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 375599aca5..479e0f294c 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -57,7 +57,8 @@ static int __init parse_vpmu_params(const char *s)
const char *ss;
int rc = 0, val;
- do {
+ do
+ {
ss = strchr(s, ',');
if ( !ss )
ss = strchr(s, '\0');
@@ -126,7 +127,7 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
((vpmu_mode & XENPMU_MODE_ALL) &&
!is_hardware_domain(curr->domain)) )
- goto nop;
+ goto nop;
vpmu = vcpu_vpmu(curr);
ops = vpmu->arch_vpmu_ops;
@@ -146,7 +147,7 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
* (and unload) it again.
*/
if ( !has_vlapic(curr->domain) && vpmu->xenpmu_data &&
- vpmu_is_set(vpmu, VPMU_CACHED) )
+ vpmu_is_set(vpmu, VPMU_CACHED) )
{
vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
ops->arch_vpmu_save(curr, 0);
@@ -155,7 +156,7 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
return ret;
- nop:
+nop:
if ( !is_write && (msr != MSR_IA32_MISC_ENABLE) )
*msr_content = 0;
@@ -357,7 +358,7 @@ void vpmu_save(struct vcpu *v)
int pcpu = smp_processor_id();
if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_ALLOCATED | VPMU_CONTEXT_LOADED) )
- return;
+ return;
vpmu->last_pcpu = pcpu;
per_cpu(last_vcpu, pcpu) = v;
@@ -394,7 +395,7 @@ int vpmu_load(struct vcpu *v, bool_t from_guest)
vpmu_save_force, (void *)v, 1);
vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
}
- }
+ }
/* Prevent forced context save from remote CPU */
local_irq_disable();
@@ -417,7 +418,7 @@ int vpmu_load(struct vcpu *v, bool_t from_guest)
/* Only when PMU is counting, we load PMU context immediately. */
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) ||
(!has_vlapic(vpmu_vcpu(vpmu)->domain) &&
- vpmu_is_set(vpmu, VPMU_CACHED)) )
+ vpmu_is_set(vpmu, VPMU_CACHED)) )
return 0;
if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
@@ -494,7 +495,7 @@ static void get_vpmu(struct vcpu *v)
* so we don't need to include it in the count.
*/
if ( !is_hardware_domain(v->domain) &&
- (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
+ (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
{
vpmu_count++;
vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);
@@ -523,7 +524,7 @@ static void put_vpmu(struct vcpu *v)
(vpmu_mode != XENPMU_MODE_OFF) )
vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE);
- out:
+out:
spin_unlock(&vpmu_lock);
}
@@ -574,7 +575,7 @@ static void vpmu_arch_destroy(struct vcpu *v)
on_selected_cpus(cpumask_of(vcpu_vpmu(v)->last_pcpu),
vpmu_save_force, v, 1);
- vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
+ vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
}
}
@@ -848,7 +849,8 @@ static int cpu_callback(
return NOTIFY_DONE;
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block cpu_nfb =
+{
.notifier_call = cpu_callback
};
@@ -875,17 +877,17 @@ static int __init vpmu_init(void)
{
case X86_VENDOR_AMD:
if ( amd_vpmu_init() )
- vpmu_mode = XENPMU_MODE_OFF;
+ vpmu_mode = XENPMU_MODE_OFF;
break;
case X86_VENDOR_HYGON:
if ( hygon_vpmu_init() )
- vpmu_mode = XENPMU_MODE_OFF;
+ vpmu_mode = XENPMU_MODE_OFF;
break;
case X86_VENDOR_INTEL:
if ( core2_vpmu_init() )
- vpmu_mode = XENPMU_MODE_OFF;
+ vpmu_mode = XENPMU_MODE_OFF;
break;
default:
diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 3c6799b42c..decde53c8b 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -52,7 +52,8 @@ static unsigned int __read_mostly regs_sz;
#define MAX_NUM_COUNTERS F15H_NUM_COUNTERS
/* PMU Counter MSRs. */
-static const u32 AMD_F10H_COUNTERS[] = {
+static const u32 AMD_F10H_COUNTERS[] =
+{
MSR_K7_PERFCTR0,
MSR_K7_PERFCTR1,
MSR_K7_PERFCTR2,
@@ -60,14 +61,16 @@ static const u32 AMD_F10H_COUNTERS[] = {
};
/* PMU Control MSRs. */
-static const u32 AMD_F10H_CTRLS[] = {
+static const u32 AMD_F10H_CTRLS[] =
+{
MSR_K7_EVNTSEL0,
MSR_K7_EVNTSEL1,
MSR_K7_EVNTSEL2,
MSR_K7_EVNTSEL3
};
-static const u32 AMD_F15H_COUNTERS[] = {
+static const u32 AMD_F15H_COUNTERS[] =
+{
MSR_AMD_FAM15H_PERFCTR0,
MSR_AMD_FAM15H_PERFCTR1,
MSR_AMD_FAM15H_PERFCTR2,
@@ -76,7 +79,8 @@ static const u32 AMD_F15H_COUNTERS[] = {
MSR_AMD_FAM15H_PERFCTR5
};
-static const u32 AMD_F15H_CTRLS[] = {
+static const u32 AMD_F15H_CTRLS[] =
+{
MSR_AMD_FAM15H_EVNTSEL0,
MSR_AMD_FAM15H_EVNTSEL1,
MSR_AMD_FAM15H_EVNTSEL2,
@@ -332,18 +336,16 @@ static void context_update(unsigned int msr, u64 msr_content)
uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
if ( k7_counters_mirrored &&
- ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) )
- {
+ ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) )
msr = get_fam15h_addr(msr);
- }
for ( i = 0; i < num_counters; i++ )
{
- if ( msr == ctrls[i] )
- {
- ctrl_regs[i] = msr_content;
- return;
- }
+ if ( msr == ctrls[i] )
+ {
+ ctrl_regs[i] = msr_content;
+ return;
+ }
else if (msr == counters[i] )
{
counter_regs[i] = msr_content;
@@ -369,34 +371,32 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
/* For all counters, enable guest only mode for HVM guest */
if ( is_hvm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
!is_guest_mode(msr_content) )
- {
set_guest_mode(msr_content);
- }
/* check if the first counter is enabled */
if ( (type == MSR_TYPE_CTRL) &&
- is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
+ is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
{
if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
return 0;
vpmu_set(vpmu, VPMU_RUNNING);
if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
- amd_vpmu_set_msr_bitmap(v);
+ amd_vpmu_set_msr_bitmap(v);
}
/* stop saving & restore if guest stops first counter */
if ( (type == MSR_TYPE_CTRL) &&
- (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
+ (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
{
vpmu_reset(vpmu, VPMU_RUNNING);
if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
- amd_vpmu_unset_msr_bitmap(v);
+ amd_vpmu_unset_msr_bitmap(v);
release_pmu_ownership(PMU_OWNER_HVM);
}
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
- || vpmu_is_set(vpmu, VPMU_FROZEN) )
+ || vpmu_is_set(vpmu, VPMU_FROZEN) )
{
context_load(v);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
@@ -417,7 +417,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
struct vpmu_struct *vpmu = vcpu_vpmu(v);
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
- || vpmu_is_set(vpmu, VPMU_FROZEN) )
+ || vpmu_is_set(vpmu, VPMU_FROZEN) )
{
context_load(v);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
@@ -458,8 +458,8 @@ static void amd_vpmu_dump(const struct vcpu *v)
printk(" VPMU state: 0x%x ", vpmu->flags);
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
{
- printk("\n");
- return;
+ printk("\n");
+ return;
}
printk("(");
@@ -487,7 +487,8 @@ static void amd_vpmu_dump(const struct vcpu *v)
}
}
-static const struct arch_vpmu_ops amd_vpmu_ops = {
+static const struct arch_vpmu_ops amd_vpmu_ops =
+{
.do_wrmsr = amd_vpmu_do_wrmsr,
.do_rdmsr = amd_vpmu_do_rdmsr,
.do_interrupt = amd_vpmu_do_interrupt,
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 6e27f6ec8e..38fb46510a 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -90,8 +90,8 @@ static uint64_t __read_mostly global_ovf_ctrl_mask, global_ctrl_mask;
static unsigned int __read_mostly regs_sz;
/* Offset into context of the beginning of PMU register block */
static const unsigned int regs_off =
- sizeof(((struct xen_pmu_intel_ctxt *)0)->fixed_counters) +
- sizeof(((struct xen_pmu_intel_ctxt *)0)->arch_counters);
+ sizeof(((struct xen_pmu_intel_ctxt *)0)->fixed_counters) +
+ sizeof(((struct xen_pmu_intel_ctxt *)0)->arch_counters);
/*
* QUIRK to workaround an issue on various family 6 cpus.
@@ -110,7 +110,7 @@ static void check_pmc_quirk(void)
if ( current_cpu_data.x86 == 6 )
is_pmc_quirk = 1;
else
- is_pmc_quirk = 0;
+ is_pmc_quirk = 0;
}
static void handle_pmc_quirk(u64 msr_content)
@@ -569,7 +569,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
{
if ( !(is_hvm_vcpu(v) ? is_canonical_address(msr_content)
- : __addr_ok(msr_content)) )
+ : __addr_ok(msr_content)) )
{
gdprintk(XENLOG_WARNING,
"Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
@@ -653,7 +653,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
case 0x00c5: /* All Branch Mispredict Retired */
blocked = 0;
break;
- }
+ }
}
if ( blocked )
@@ -743,7 +743,7 @@ static void core2_vpmu_dump(const struct vcpu *v)
struct xen_pmu_cntr_pair *cntr_pair;
if ( !core2_vpmu_cxt || !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
- return;
+ return;
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) )
{
@@ -762,7 +762,7 @@ static void core2_vpmu_dump(const struct vcpu *v)
/* Print the contents of the counter and its configuration msr. */
for ( i = 0; i < arch_pmc_cnt; i++ )
printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n",
- i, cntr_pair[i].counter, cntr_pair[i].control);
+ i, cntr_pair[i].counter, cntr_pair[i].control);
/*
* The configuration of the fixed counter is 4 bits each in the
@@ -819,7 +819,8 @@ static void core2_vpmu_destroy(struct vcpu *v)
vpmu_clear(vpmu);
}
-static const struct arch_vpmu_ops core2_vpmu_ops = {
+static const struct arch_vpmu_ops core2_vpmu_ops =
+{
.do_wrmsr = core2_vpmu_do_wrmsr,
.do_rdmsr = core2_vpmu_do_rdmsr,
.do_interrupt = core2_vpmu_do_interrupt,
@@ -887,7 +888,7 @@ int vmx_vpmu_initialise(struct vcpu *v)
break;
}
ds_warned = 1;
- func_out:
+func_out:
/* PV domains can allocate resources immediately */
if ( is_pv_vcpu(v) && !core2_vpmu_alloc_resource(v) )
@@ -912,7 +913,7 @@ int __init core2_vpmu_init(void)
case 4:
printk(XENLOG_INFO "VPMU: PMU version 4 is not fully supported. "
"Emulating version 3\n");
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case 2:
case 3:
@@ -937,9 +938,9 @@ int __init core2_vpmu_init(void)
fixed_ctrl_mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1);
/* mask .AnyThread bits for all fixed counters */
- for( i = 0; i < fixed_pmc_cnt; i++ )
- fixed_ctrl_mask |=
- (FIXED_CTR_CTRL_ANYTHREAD_MASK << (FIXED_CTR_CTRL_BITS * i));
+ for ( i = 0; i < fixed_pmc_cnt; i++ )
+ fixed_ctrl_mask |=
+ (FIXED_CTR_CTRL_ANYTHREAD_MASK << (FIXED_CTR_CTRL_BITS * i));
fixed_counters_mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1);
global_ctrl_mask = ~((((1ULL << fixed_pmc_cnt) - 1) << 32) |
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index ab1a48ff90..f906848691 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -24,7 +24,8 @@ static int __init parse_xen_cpuid(const char *s)
const char *ss;
int val, rc = 0;
- do {
+ do
+ {
ss = strchr(s, ',');
if ( !ss )
ss = strchr(s, '\0');
@@ -77,15 +78,15 @@ static void zero_leaves(struct cpuid_leaf *l,
}
struct cpuid_policy __read_mostly raw_cpuid_policy,
- __read_mostly host_cpuid_policy,
- __read_mostly pv_max_cpuid_policy,
- __read_mostly hvm_max_cpuid_policy;
+ __read_mostly host_cpuid_policy,
+ __read_mostly pv_max_cpuid_policy,
+ __read_mostly hvm_max_cpuid_policy;
static void sanitise_featureset(uint32_t *fs)
{
/* for_each_set_bit() uses unsigned longs. Extend with zeroes. */
uint32_t disabled_features[
- ROUNDUP(FSCAPINTS, sizeof(unsigned long)/sizeof(uint32_t))] = {};
+ ROUNDUP(FSCAPINTS, sizeof(unsigned long)/sizeof(uint32_t))] = {};
unsigned int i;
for ( i = 0; i < FSCAPINTS; ++i )
@@ -372,7 +373,7 @@ static void __init calculate_hvm_max_policy(void)
cpuid_policy_to_featureset(p, hvm_featureset);
hvm_featuremask = hvm_hap_supported() ?
- hvm_hap_featuremask : hvm_shadow_featuremask;
+ hvm_hap_featuremask : hvm_shadow_featuremask;
for ( i = 0; i < ARRAY_SIZE(hvm_featureset); ++i )
hvm_featureset[i] &= hvm_featuremask[i];
@@ -458,7 +459,7 @@ void recalculate_cpuid_policy(struct domain *d)
unsigned int i;
p->x86_vendor = x86_cpuid_lookup_vendor(
- p->basic.vendor_ebx, p->basic.vendor_ecx, p->basic.vendor_edx);
+ p->basic.vendor_ebx, p->basic.vendor_ecx, p->basic.vendor_edx);
p->basic.max_leaf = min(p->basic.max_leaf, max->basic.max_leaf);
p->feat.max_subleaf = min(p->feat.max_subleaf, max->feat.max_subleaf);
@@ -580,7 +581,7 @@ int init_domain_cpuid_policy(struct domain *d)
{
struct cpuid_policy *p =
xmemdup(is_pv_domain(d) ? &pv_max_cpuid_policy
- : &hvm_max_cpuid_policy);
+ : &hvm_max_cpuid_policy);
if ( !p )
return -ENOMEM;
@@ -660,12 +661,12 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
if ( is_viridian_domain(d) )
return cpuid_viridian_leaves(v, leaf, subleaf, res);
- /*
- * Fallthrough.
- *
- * Intel reserve up until 0x4fffffff for hypervisor use. AMD reserve
- * only until 0x400000ff, but we already use double that.
- */
+ /*
+ * Fallthrough.
+ *
+ * Intel reserve up until 0x4fffffff for hypervisor use. AMD reserve
+ * only until 0x400000ff, but we already use double that.
+ */
case 0x40000100 ... 0x400001ff:
return cpuid_hypervisor_leaves(v, leaf, subleaf, res);
@@ -904,11 +905,11 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
*/
BUILD_BUG_ON(XSTATE_XSAVES_ONLY != 0);
- /*
- * Read CPUID[0xD,0/1].EBX from hardware. They vary with
- * enabled XSTATE, and appropraite XCR0|XSS are in context.
- */
- case 0:
+ /*
+ * Read CPUID[0xD,0/1].EBX from hardware. They vary with
+ * enabled XSTATE, and appropraite XCR0|XSS are in context.
+ */
+ case 0:
res->b = cpuid_count_ebx(leaf, subleaf);
}
break;
@@ -921,7 +922,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
is_hvm_domain(d) && !hvm_long_mode_active(v) )
res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
- common_leaf1_adjustments:
+common_leaf1_adjustments:
if ( is_hvm_domain(d) )
{
/* Fast-forward MSR_APIC_BASE.EN. */
diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index a500df01ac..73ec5aa7d6 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -22,7 +22,7 @@
#include <asm/debugger.h>
#include <asm/p2m.h>
-/*
+/*
* This file for general routines common to more than one debugger, like kdb,
* gdbsx, etc..
*/
@@ -77,18 +77,18 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn)
return mfn;
}
-/*
+/*
* pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional.
- * This to assist debug of modules in the guest. The kernel address
- * space seems is always mapped, but modules are not necessarily
- * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0.
- * Modules should always be addressible if we use cr3 from init_mm.
- * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to
+ * This to assist debug of modules in the guest. The kernel address
+ * space seems is always mapped, but modules are not necessarily
+ * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0.
+ * Modules should always be addressible if we use cr3 from init_mm.
+ * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to
* do 2 level lookups.
*
* NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e
* mode.
- * Returns: mfn for the given (pv guest) vaddr
+ * Returns: mfn for the given (pv guest) vaddr
*/
static mfn_t
dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
@@ -100,7 +100,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
mfn_t mfn = maddr_to_mfn(cr3_pa(cr3));
- DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
+ DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
cr3, pgd3val);
if ( pgd3val == 0 )
@@ -154,8 +154,8 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
}
/* Returns: number of bytes remaining to be copied */
-static unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
- void * __user buf, unsigned int len,
+static unsigned int dbg_rw_guest_mem(struct domain *dp, void *__user gaddr,
+ void *__user buf, unsigned int len,
bool toaddr, uint64_t pgd3)
{
while ( len > 0 )
@@ -199,14 +199,14 @@ static unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
return len;
}
-/*
+/*
* addr is hypervisor addr if domid == DOMID_IDLE, else it's guest addr
* buf is debugger buffer.
* if toaddr, then addr = buf (write to addr), else buf = addr (rd from guest)
* pgd3: value of init_mm.pgd[3] in guest. see above.
- * Returns: number of bytes remaining to be copied.
+ * Returns: number of bytes remaining to be copied.
*/
-unsigned int dbg_rw_mem(void * __user addr, void * __user buf,
+unsigned int dbg_rw_mem(void *__user addr, void *__user buf,
unsigned int len, domid_t domid, bool toaddr,
uint64_t pgd3)
{
diff --git a/xen/arch/x86/dmi_scan.c b/xen/arch/x86/dmi_scan.c
index fcdf2d3952..4d636a90c1 100644
--- a/xen/arch/x86/dmi_scan.c
+++ b/xen/arch/x86/dmi_scan.c
@@ -17,90 +17,94 @@
#define memcpy_fromio memcpy
#define alloc_bootmem(l) xmalloc_bytes(l)
-struct __packed dmi_eps {
- char anchor[5]; /* "_DMI_" */
- u8 checksum;
- u16 size;
- u32 address;
- u16 num_structures;
- u8 revision;
+struct __packed dmi_eps
+{
+ char anchor[5]; /* "_DMI_" */
+ u8 checksum;
+ u16 size;
+ u32 address;
+ u16 num_structures;
+ u8 revision;
};
-struct __packed smbios_eps {
- char anchor[4]; /* "_SM_" */
- u8 checksum;
- u8 length;
- u8 major, minor;
- u16 max_size;
- u8 revision;
- u8 _rsrvd_[5];
- struct dmi_eps dmi;
+struct __packed smbios_eps
+{
+ char anchor[4]; /* "_SM_" */
+ u8 checksum;
+ u8 length;
+ u8 major, minor;
+ u16 max_size;
+ u8 revision;
+ u8 _rsrvd_[5];
+ struct dmi_eps dmi;
};
-struct __packed smbios3_eps {
- char anchor[5]; /* "_SM3_" */
- u8 checksum;
- u8 length;
- u8 major, minor;
- u8 docrev;
- u8 revision;
- u8 _rsrvd_;
- u32 max_size;
- u64 address;
+struct __packed smbios3_eps
+{
+ char anchor[5]; /* "_SM3_" */
+ u8 checksum;
+ u8 length;
+ u8 major, minor;
+ u8 docrev;
+ u8 revision;
+ u8 _rsrvd_;
+ u32 max_size;
+ u64 address;
};
struct dmi_header
{
- u8 type;
- u8 length;
- u16 handle;
+ u8 type;
+ u8 length;
+ u16 handle;
};
-enum dmi_entry_type {
- DMI_ENTRY_BIOS = 0,
- DMI_ENTRY_SYSTEM,
- DMI_ENTRY_BASEBOARD,
- DMI_ENTRY_CHASSIS,
- DMI_ENTRY_PROCESSOR,
- DMI_ENTRY_MEM_CONTROLLER,
- DMI_ENTRY_MEM_MODULE,
- DMI_ENTRY_CACHE,
- DMI_ENTRY_PORT_CONNECTOR,
- DMI_ENTRY_SYSTEM_SLOT,
- DMI_ENTRY_ONBOARD_DEVICE,
- DMI_ENTRY_OEMSTRINGS,
- DMI_ENTRY_SYSCONF,
- DMI_ENTRY_BIOS_LANG,
- DMI_ENTRY_GROUP_ASSOC,
- DMI_ENTRY_SYSTEM_EVENT_LOG,
- DMI_ENTRY_PHYS_MEM_ARRAY,
- DMI_ENTRY_MEM_DEVICE,
- DMI_ENTRY_32_MEM_ERROR,
- DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR,
- DMI_ENTRY_MEM_DEV_MAPPED_ADDR,
- DMI_ENTRY_BUILTIN_POINTING_DEV,
- DMI_ENTRY_PORTABLE_BATTERY,
- DMI_ENTRY_SYSTEM_RESET,
- DMI_ENTRY_HW_SECURITY,
- DMI_ENTRY_SYSTEM_POWER_CONTROLS,
- DMI_ENTRY_VOLTAGE_PROBE,
- DMI_ENTRY_COOLING_DEV,
- DMI_ENTRY_TEMP_PROBE,
- DMI_ENTRY_ELECTRICAL_CURRENT_PROBE,
- DMI_ENTRY_OOB_REMOTE_ACCESS,
- DMI_ENTRY_BIS_ENTRY,
- DMI_ENTRY_SYSTEM_BOOT,
- DMI_ENTRY_MGMT_DEV,
- DMI_ENTRY_MGMT_DEV_COMPONENT,
- DMI_ENTRY_MGMT_DEV_THRES,
- DMI_ENTRY_MEM_CHANNEL,
- DMI_ENTRY_IPMI_DEV,
- DMI_ENTRY_SYS_POWER_SUPPLY,
- DMI_ENTRY_ADDITIONAL,
- DMI_ENTRY_ONBOARD_DEV_EXT,
- DMI_ENTRY_MGMT_CONTROLLER_HOST,
- DMI_ENTRY_INACTIVE = 126,
- DMI_ENTRY_END_OF_TABLE = 127,
+enum dmi_entry_type
+{
+ DMI_ENTRY_BIOS = 0,
+ DMI_ENTRY_SYSTEM,
+ DMI_ENTRY_BASEBOARD,
+ DMI_ENTRY_CHASSIS,
+ DMI_ENTRY_PROCESSOR,
+ DMI_ENTRY_MEM_CONTROLLER,
+ DMI_ENTRY_MEM_MODULE,
+ DMI_ENTRY_CACHE,
+ DMI_ENTRY_PORT_CONNECTOR,
+ DMI_ENTRY_SYSTEM_SLOT,
+ DMI_ENTRY_ONBOARD_DEVICE,
+ DMI_ENTRY_OEMSTRINGS,
+ DMI_ENTRY_SYSCONF,
+ DMI_ENTRY_BIOS_LANG,
+ DMI_ENTRY_GROUP_ASSOC,
+ DMI_ENTRY_SYSTEM_EVENT_LOG,
+ DMI_ENTRY_PHYS_MEM_ARRAY,
+ DMI_ENTRY_MEM_DEVICE,
+ DMI_ENTRY_32_MEM_ERROR,
+ DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR,
+ DMI_ENTRY_MEM_DEV_MAPPED_ADDR,
+ DMI_ENTRY_BUILTIN_POINTING_DEV,
+ DMI_ENTRY_PORTABLE_BATTERY,
+ DMI_ENTRY_SYSTEM_RESET,
+ DMI_ENTRY_HW_SECURITY,
+ DMI_ENTRY_SYSTEM_POWER_CONTROLS,
+ DMI_ENTRY_VOLTAGE_PROBE,
+ DMI_ENTRY_COOLING_DEV,
+ DMI_ENTRY_TEMP_PROBE,
+ DMI_ENTRY_ELECTRICAL_CURRENT_PROBE,
+ DMI_ENTRY_OOB_REMOTE_ACCESS,
+ DMI_ENTRY_BIS_ENTRY,
+ DMI_ENTRY_SYSTEM_BOOT,
+ DMI_ENTRY_MGMT_DEV,
+ DMI_ENTRY_MGMT_DEV_COMPONENT,
+ DMI_ENTRY_MGMT_DEV_THRES,
+ DMI_ENTRY_MEM_CHANNEL,
+ DMI_ENTRY_IPMI_DEV,
+ DMI_ENTRY_SYS_POWER_SUPPLY,
+ DMI_ENTRY_ADDITIONAL,
+ DMI_ENTRY_ONBOARD_DEV_EXT,
+ DMI_ENTRY_MGMT_CONTROLLER_HOST,
+ DMI_ENTRY_INACTIVE = 126,
+ DMI_ENTRY_END_OF_TABLE = 127,
};
#undef DMI_DEBUG
@@ -111,88 +115,88 @@ enum dmi_entry_type {
#define dmi_printk(x)
#endif
-static char * __init dmi_string(struct dmi_header *dm, u8 s)
+static char *__init dmi_string(struct dmi_header *dm, u8 s)
{
- char *bp=(char *)dm;
- bp+=dm->length;
- if(!s)
- return "";
- s--;
- while(s>0 && *bp)
- {
- bp+=strlen(bp);
- bp++;
- s--;
- }
- return bp;
+ char *bp=(char *)dm;
+ bp+=dm->length;
+ if (!s)
+ return "";
+ s--;
+ while (s>0 && *bp)
+ {
+ bp+=strlen(bp);
+ bp++;
+ s--;
+ }
+ return bp;
}
/*
* We have to be cautious here. We have seen BIOSes with DMI pointers
* pointing to completely the wrong place for example
*/
-
+
static int __init dmi_table(paddr_t base, u32 len, int num,
- void (*decode)(struct dmi_header *))
+ void (*decode)(struct dmi_header *))
{
- u8 *buf;
- struct dmi_header *dm;
- u8 *data;
- int i=0;
-
- buf = bt_ioremap(base, len);
- if(buf==NULL)
- return -1;
-
- data = buf;
-
- /*
- * Stop when we have seen all the items the table claimed to have
- * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
- * >= 3.0 only) OR we run off the end of the table (should never
- * happen but sometimes does on bogus implementations.)
- */
- while((num < 0 || i < num) && data-buf+sizeof(struct dmi_header)<=len)
- {
- dm=(struct dmi_header *)data;
- /*
- * We want to know the total length (formated area and strings)
- * before decoding to make sure we won't run off the table in
- * dmi_decode or dmi_string
- */
- data+=dm->length;
- while(data-buf<len-1 && (data[0] || data[1]))
- data++;
- if(data-buf<len-1)
- decode(dm);
- /*
- * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
- * For tables behind a 64-bit entry point, we have no item
- * count and no exact table length, so stop on end-of-table
- * marker. For tables behind a 32-bit entry point, we have
- * seen OEM structures behind the end-of-table marker on
- * some systems, so don't trust it.
- */
- if (num < 0 && dm->type == DMI_ENTRY_END_OF_TABLE)
- break;
- data+=2;
- i++;
- }
- bt_iounmap(buf, len);
- return 0;
+ u8 *buf;
+ struct dmi_header *dm;
+ u8 *data;
+ int i=0;
+
+ buf = bt_ioremap(base, len);
+ if (buf==NULL)
+ return -1;
+
+ data = buf;
+
+ /*
+ * Stop when we have seen all the items the table claimed to have
+ * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
+ * >= 3.0 only) OR we run off the end of the table (should never
+ * happen but sometimes does on bogus implementations.)
+ */
+ while ((num < 0 || i < num) && data-buf+sizeof(struct dmi_header)<=len)
+ {
+ dm=(struct dmi_header *)data;
+ /*
+ * We want to know the total length (formated area and strings)
+ * before decoding to make sure we won't run off the table in
+ * dmi_decode or dmi_string
+ */
+ data+=dm->length;
+ while (data-buf<len-1 && (data[0] || data[1]))
+ data++;
+ if (data-buf<len-1)
+ decode(dm);
+ /*
+ * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+ * For tables behind a 64-bit entry point, we have no item
+ * count and no exact table length, so stop on end-of-table
+ * marker. For tables behind a 32-bit entry point, we have
+ * seen OEM structures behind the end-of-table marker on
+ * some systems, so don't trust it.
+ */
+ if (num < 0 && dm->type == DMI_ENTRY_END_OF_TABLE)
+ break;
+ data+=2;
+ i++;
+ }
+ bt_iounmap(buf, len);
+ return 0;
}
static inline bool __init dmi_checksum(const void __iomem *buf,
unsigned int len)
{
- u8 sum = 0;
- const u8 *p = buf;
- unsigned int a;
-
- for (a = 0; a < len; a++)
- sum += p[a];
- return sum == 0;
+ u8 sum = 0;
+ const u8 *p = buf;
+ unsigned int a;
+
+ for (a = 0; a < len; a++)
+ sum += p[a];
+ return sum == 0;
}
static u32 __initdata efi_dmi_address;
@@ -208,223 +212,246 @@ static u32 __initdata efi_smbios3_size;
*/
void __init dmi_efi_get_table(const void *smbios, const void *smbios3)
{
- const struct smbios_eps *eps = smbios;
- const struct smbios3_eps *eps3 = smbios3;
-
- if (eps3 && memcmp(eps3->anchor, "_SM3_", 5) == 0 &&
- eps3->length >= sizeof(*eps3) &&
- dmi_checksum(eps3, eps3->length)) {
- efi_smbios3_address = eps3->address;
- efi_smbios3_size = eps3->max_size;
- return;
- }
-
- if (eps && memcmp(eps->anchor, "_SM_", 4) == 0 &&
- eps->length >= sizeof(*eps) &&
- dmi_checksum(eps, eps->length)) {
- efi_smbios_address = (u32)(long)eps;
- efi_smbios_size = eps->length;
-
- if (memcmp(eps->dmi.anchor, "_DMI_", 5) == 0 &&
- dmi_checksum(&eps->dmi, sizeof(eps->dmi))) {
- efi_dmi_address = eps->dmi.address;
- efi_dmi_size = eps->dmi.size;
- }
- }
+ const struct smbios_eps *eps = smbios;
+ const struct smbios3_eps *eps3 = smbios3;
+
+ if (eps3 && memcmp(eps3->anchor, "_SM3_", 5) == 0 &&
+ eps3->length >= sizeof(*eps3) &&
+ dmi_checksum(eps3, eps3->length))
+ {
+ efi_smbios3_address = eps3->address;
+ efi_smbios3_size = eps3->max_size;
+ return;
+ }
+
+ if (eps && memcmp(eps->anchor, "_SM_", 4) == 0 &&
+ eps->length >= sizeof(*eps) &&
+ dmi_checksum(eps, eps->length))
+ {
+ efi_smbios_address = (u32)(long)eps;
+ efi_smbios_size = eps->length;
+
+ if (memcmp(eps->dmi.anchor, "_DMI_", 5) == 0 &&
+ dmi_checksum(&eps->dmi, sizeof(eps->dmi)))
+ {
+ efi_dmi_address = eps->dmi.address;
+ efi_dmi_size = eps->dmi.size;
+ }
+ }
}
const char *__init dmi_get_table(paddr_t *base, u32 *len)
{
- static unsigned int __initdata instance;
-
- if (efi_enabled(EFI_BOOT)) {
- if (efi_smbios3_size && !(instance & 1)) {
- *base = efi_smbios3_address;
- *len = efi_smbios3_size;
- instance |= 1;
- return "SMBIOSv3";
- }
- if (efi_dmi_size && !(instance & 2)) {
- *base = efi_dmi_address;
- *len = efi_dmi_size;
- instance |= 2;
- return "DMI";
- }
- if (efi_smbios_size && !(instance & 4)) {
- *base = efi_smbios_address;
- *len = efi_smbios_size;
- instance |= 4;
- return "SMBIOS";
- }
- } else {
- char __iomem *p = maddr_to_virt(0xF0000), *q;
- union {
- struct dmi_eps dmi;
- struct smbios3_eps smbios3;
- } eps;
-
- for (q = p; q <= p + 0x10000 - sizeof(eps.dmi); q += 16) {
- memcpy_fromio(&eps, q, sizeof(eps.dmi));
- if (!(instance & 1) &&
- memcmp(eps.dmi.anchor, "_DMI_", 5) == 0 &&
- dmi_checksum(&eps.dmi, sizeof(eps.dmi))) {
- *base = eps.dmi.address;
- *len = eps.dmi.size;
- instance |= 1;
- return "DMI";
- }
-
- BUILD_BUG_ON(sizeof(eps.smbios3) <= sizeof(eps.dmi));
- if ((instance & 2) ||
- q > p + 0x10000 - sizeof(eps.smbios3))
- continue;
- memcpy_fromio(&eps.dmi + 1, q + sizeof(eps.dmi),
- sizeof(eps.smbios3) - sizeof(eps.dmi));
- if (!memcmp(eps.smbios3.anchor, "_SM3_", 5) &&
- eps.smbios3.length >= sizeof(eps.smbios3) &&
- q <= p + 0x10000 - eps.smbios3.length &&
- dmi_checksum(q, eps.smbios3.length)) {
- *base = eps.smbios3.address;
- *len = eps.smbios3.max_size;
- instance |= 2;
- return "SMBIOSv3";
- }
- }
- }
- return NULL;
+ static unsigned int __initdata instance;
+
+ if (efi_enabled(EFI_BOOT))
+ {
+ if (efi_smbios3_size && !(instance & 1))
+ {
+ *base = efi_smbios3_address;
+ *len = efi_smbios3_size;
+ instance |= 1;
+ return "SMBIOSv3";
+ }
+ if (efi_dmi_size && !(instance & 2))
+ {
+ *base = efi_dmi_address;
+ *len = efi_dmi_size;
+ instance |= 2;
+ return "DMI";
+ }
+ if (efi_smbios_size && !(instance & 4))
+ {
+ *base = efi_smbios_address;
+ *len = efi_smbios_size;
+ instance |= 4;
+ return "SMBIOS";
+ }
+ }
+ else
+ {
+ char __iomem *p = maddr_to_virt(0xF0000), *q;
+ union
+ {
+ struct dmi_eps dmi;
+ struct smbios3_eps smbios3;
+ } eps;
+
+ for (q = p; q <= p + 0x10000 - sizeof(eps.dmi); q += 16)
+ {
+ memcpy_fromio(&eps, q, sizeof(eps.dmi));
+ if (!(instance & 1) &&
+ memcmp(eps.dmi.anchor, "_DMI_", 5) == 0 &&
+ dmi_checksum(&eps.dmi, sizeof(eps.dmi)))
+ {
+ *base = eps.dmi.address;
+ *len = eps.dmi.size;
+ instance |= 1;
+ return "DMI";
+ }
+
+ BUILD_BUG_ON(sizeof(eps.smbios3) <= sizeof(eps.dmi));
+ if ((instance & 2) ||
+ q > p + 0x10000 - sizeof(eps.smbios3))
+ continue;
+ memcpy_fromio(&eps.dmi + 1, q + sizeof(eps.dmi),
+ sizeof(eps.smbios3) - sizeof(eps.dmi));
+ if (!memcmp(eps.smbios3.anchor, "_SM3_", 5) &&
+ eps.smbios3.length >= sizeof(eps.smbios3) &&
+ q <= p + 0x10000 - eps.smbios3.length &&
+ dmi_checksum(q, eps.smbios3.length))
+ {
+ *base = eps.smbios3.address;
+ *len = eps.smbios3.max_size;
+ instance |= 2;
+ return "SMBIOSv3";
+ }
+ }
+ }
+ return NULL;
}
-typedef union {
- const struct smbios_eps __iomem *legacy;
- const struct smbios3_eps __iomem *v3;
+typedef union
+{
+ const struct smbios_eps __iomem *legacy;
+ const struct smbios3_eps __iomem *v3;
} smbios_eps_u __attribute__((transparent_union));
static int __init _dmi_iterate(const struct dmi_eps *dmi,
- const smbios_eps_u smbios,
- void (*decode)(struct dmi_header *))
+ const smbios_eps_u smbios,
+ void (*decode)(struct dmi_header *))
{
- int num;
- u32 len;
- paddr_t base;
-
- if (!dmi) {
- num = -1;
- len = smbios.v3->max_size;
- base = smbios.v3->address;
- printk(KERN_INFO "SMBIOS %d.%d present.\n",
- smbios.v3->major, smbios.v3->minor);
- dmi_printk((KERN_INFO "SMBIOS v3 table at 0x%"PRIpaddr".\n", base));
- } else {
- num = dmi->num_structures;
- len = dmi->size;
- base = dmi->address;
-
- /*
- * DMI version 0.0 means that the real version is taken from
- * the SMBIOS version, which we may not know at this point.
- */
- if (dmi->revision)
- printk(KERN_INFO "DMI %d.%d present.\n",
- dmi->revision >> 4, dmi->revision & 0x0f);
- else if (!smbios.legacy)
- printk(KERN_INFO "DMI present.\n");
- dmi_printk((KERN_INFO "%d structures occupying %u bytes.\n",
- num, len));
- dmi_printk((KERN_INFO "DMI table at 0x%08X.\n", (u32)base));
- }
- return dmi_table(base, len, num, decode);
+ int num;
+ u32 len;
+ paddr_t base;
+
+ if (!dmi)
+ {
+ num = -1;
+ len = smbios.v3->max_size;
+ base = smbios.v3->address;
+ printk(KERN_INFO "SMBIOS %d.%d present.\n",
+ smbios.v3->major, smbios.v3->minor);
+ dmi_printk((KERN_INFO "SMBIOS v3 table at 0x%"PRIpaddr".\n", base));
+ }
+ else
+ {
+ num = dmi->num_structures;
+ len = dmi->size;
+ base = dmi->address;
+
+ /*
+ * DMI version 0.0 means that the real version is taken from
+ * the SMBIOS version, which we may not know at this point.
+ */
+ if (dmi->revision)
+ printk(KERN_INFO "DMI %d.%d present.\n",
+ dmi->revision >> 4, dmi->revision & 0x0f);
+ else if (!smbios.legacy)
+ printk(KERN_INFO "DMI present.\n");
+ dmi_printk((KERN_INFO "%d structures occupying %u bytes.\n",
+ num, len));
+ dmi_printk((KERN_INFO "DMI table at 0x%08X.\n", (u32)base));
+ }
+ return dmi_table(base, len, num, decode);
}
static int __init dmi_iterate(void (*decode)(struct dmi_header *))
{
- struct dmi_eps dmi;
- struct smbios3_eps smbios3;
- char __iomem *p, *q;
-
- dmi.size = 0;
- smbios3.length = 0;
-
- p = maddr_to_virt(0xF0000);
- for (q = p; q < p + 0x10000; q += 16) {
- if (!dmi.size) {
- memcpy_fromio(&dmi, q, sizeof(dmi));
- if (memcmp(dmi.anchor, "_DMI_", 5) ||
- !dmi_checksum(&dmi, sizeof(dmi)))
- dmi.size = 0;
- }
- if (!smbios3.length &&
- q <= p + 0x10000 - sizeof(smbios3)) {
- memcpy_fromio(&smbios3, q, sizeof(smbios3));
- if (memcmp(smbios3.anchor, "_SM3_", 5) ||
- smbios3.length < sizeof(smbios3) ||
- q < p + 0x10000 - smbios3.length ||
- !dmi_checksum(q, smbios3.length))
- smbios3.length = 0;
- }
- }
-
- if (smbios3.length)
- return _dmi_iterate(NULL, &smbios3, decode);
- if (dmi.size)
- return _dmi_iterate(&dmi, NULL, decode);
- return -1;
+ struct dmi_eps dmi;
+ struct smbios3_eps smbios3;
+ char __iomem *p, *q;
+
+ dmi.size = 0;
+ smbios3.length = 0;
+
+ p = maddr_to_virt(0xF0000);
+ for (q = p; q < p + 0x10000; q += 16)
+ {
+ if (!dmi.size)
+ {
+ memcpy_fromio(&dmi, q, sizeof(dmi));
+ if (memcmp(dmi.anchor, "_DMI_", 5) ||
+ !dmi_checksum(&dmi, sizeof(dmi)))
+ dmi.size = 0;
+ }
+ if (!smbios3.length &&
+ q <= p + 0x10000 - sizeof(smbios3))
+ {
+ memcpy_fromio(&smbios3, q, sizeof(smbios3));
+ if (memcmp(smbios3.anchor, "_SM3_", 5) ||
+ smbios3.length < sizeof(smbios3) ||
+ q < p + 0x10000 - smbios3.length ||
+ !dmi_checksum(q, smbios3.length))
+ smbios3.length = 0;
+ }
+ }
+
+ if (smbios3.length)
+ return _dmi_iterate(NULL, &smbios3, decode);
+ if (dmi.size)
+ return _dmi_iterate(&dmi, NULL, decode);
+ return -1;
}
static int __init dmi_efi_iterate(void (*decode)(struct dmi_header *))
{
- int ret = -1;
-
- while (efi.smbios3 != EFI_INVALID_TABLE_ADDR) {
- struct smbios3_eps eps;
- const struct smbios3_eps __iomem *p;
-
- p = bt_ioremap(efi.smbios3, sizeof(eps));
- if (!p)
- break;
- memcpy_fromio(&eps, p, sizeof(eps));
- bt_iounmap(p, sizeof(eps));
-
- if (memcmp(eps.anchor, "_SM3_", 5) ||
- eps.length < sizeof(eps))
- break;
-
- p = bt_ioremap(efi.smbios3, eps.length);
- if (!p)
- break;
- if (dmi_checksum(p, eps.length))
- ret = _dmi_iterate(NULL, p, decode);
- bt_iounmap(p, eps.length);
- break;
- }
-
- if (ret != 0 && efi.smbios != EFI_INVALID_TABLE_ADDR) {
- struct smbios_eps eps;
- const struct smbios_eps __iomem *p;
-
- p = bt_ioremap(efi.smbios, sizeof(eps));
- if (!p)
- return -1;
- memcpy_fromio(&eps, p, sizeof(eps));
- bt_iounmap(p, sizeof(eps));
-
- if (memcmp(eps.anchor, "_SM_", 4) ||
- eps.length < sizeof(eps))
- return -1;
-
- p = bt_ioremap(efi.smbios, eps.length);
- if (!p)
- return -1;
- if (dmi_checksum(p, eps.length) &&
- memcmp(eps.dmi.anchor, "_DMI_", 5) == 0 &&
- dmi_checksum(&eps.dmi, sizeof(eps.dmi))) {
- printk(KERN_INFO "SMBIOS %d.%d present.\n",
- eps.major, eps.minor);
- ret = _dmi_iterate(&eps.dmi, p, decode);
- }
- bt_iounmap(p, eps.length);
- }
-
- return ret;
+ int ret = -1;
+
+ while (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
+ {
+ struct smbios3_eps eps;
+ const struct smbios3_eps __iomem *p;
+
+ p = bt_ioremap(efi.smbios3, sizeof(eps));
+ if (!p)
+ break;
+ memcpy_fromio(&eps, p, sizeof(eps));
+ bt_iounmap(p, sizeof(eps));
+
+ if (memcmp(eps.anchor, "_SM3_", 5) ||
+ eps.length < sizeof(eps))
+ break;
+
+ p = bt_ioremap(efi.smbios3, eps.length);
+ if (!p)
+ break;
+ if (dmi_checksum(p, eps.length))
+ ret = _dmi_iterate(NULL, p, decode);
+ bt_iounmap(p, eps.length);
+ break;
+ }
+
+ if (ret != 0 && efi.smbios != EFI_INVALID_TABLE_ADDR)
+ {
+ struct smbios_eps eps;
+ const struct smbios_eps __iomem *p;
+
+ p = bt_ioremap(efi.smbios, sizeof(eps));
+ if (!p)
+ return -1;
+ memcpy_fromio(&eps, p, sizeof(eps));
+ bt_iounmap(p, sizeof(eps));
+
+ if (memcmp(eps.anchor, "_SM_", 4) ||
+ eps.length < sizeof(eps))
+ return -1;
+
+ p = bt_ioremap(efi.smbios, eps.length);
+ if (!p)
+ return -1;
+ if (dmi_checksum(p, eps.length) &&
+ memcmp(eps.dmi.anchor, "_DMI_", 5) == 0 &&
+ dmi_checksum(&eps.dmi, sizeof(eps.dmi)))
+ {
+ printk(KERN_INFO "SMBIOS %d.%d present.\n",
+ eps.major, eps.minor);
+ ret = _dmi_iterate(&eps.dmi, p, decode);
+ }
+ bt_iounmap(p, eps.length);
+ }
+
+ return ret;
}
static char *__initdata dmi_ident[DMI_STRING_MAX];
@@ -432,20 +459,20 @@ static char *__initdata dmi_ident[DMI_STRING_MAX];
/*
* Save a DMI string
*/
-
+
static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
{
- char *d = (char*)dm;
- char *p = dmi_string(dm, d[string]);
- if(p==NULL || *p == 0)
- return;
- if (dmi_ident[slot])
- return;
- dmi_ident[slot] = alloc_bootmem(strlen(p)+1);
- if(dmi_ident[slot])
- strlcpy(dmi_ident[slot], p, strlen(p)+1);
- else
- printk(KERN_ERR "dmi_save_ident: out of memory.\n");
+ char *d = (char *)dm;
+ char *p = dmi_string(dm, d[string]);
+ if (p==NULL || *p == 0)
+ return;
+ if (dmi_ident[slot])
+ return;
+ dmi_ident[slot] = alloc_bootmem(strlen(p)+1);
+ if (dmi_ident[slot])
+ strlcpy(dmi_ident[slot], p, strlen(p)+1);
+ else
+ printk(KERN_ERR "dmi_save_ident: out of memory.\n");
}
/*
@@ -461,8 +488,9 @@ static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
static __init int broken_toshiba_keyboard(struct dmi_blacklist *d)
{
- printk(KERN_WARNING "Toshiba with broken keyboard detected. If your keyboard sometimes generates 3 keypresses instead of one, see http://davyd.ucc.asn.au/projects/toshiba/README\n");
- return 0;
+ printk(KERN_WARNING
+ "Toshiba with broken keyboard detected. If your keyboard sometimes generates 3 keypresses instead of one, see http://davyd.ucc.asn.au/projects/toshiba/README\n");
+ return 0;
}
static int __init ich10_bios_quirk(struct dmi_system_id *d)
@@ -472,7 +500,8 @@ static int __init ich10_bios_quirk(struct dmi_system_id *d)
if ( pci_conf_read16(0, 0, 0x1f, 0, PCI_VENDOR_ID) != 0x8086 )
return 0;
- switch ( pci_conf_read16(0, 0, 0x1f, 0, PCI_DEVICE_ID) ) {
+ switch ( pci_conf_read16(0, 0, 0x1f, 0, PCI_DEVICE_ID) )
+ {
case 0x3a14:
case 0x3a16:
case 0x3a18:
@@ -491,154 +520,205 @@ static int __init ich10_bios_quirk(struct dmi_system_id *d)
#ifdef CONFIG_ACPI_SLEEP
static __init int reset_videomode_after_s3(struct dmi_blacklist *d)
{
- /* See acpi_wakeup.S */
- acpi_video_flags |= 2;
- return 0;
+ /* See acpi_wakeup.S */
+ acpi_video_flags |= 2;
+ return 0;
}
#endif
-static __init int dmi_disable_acpi(struct dmi_blacklist *d)
-{
- if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: acpi off\n",d->ident);
- disable_acpi();
- } else {
- printk(KERN_NOTICE
- "Warning: DMI blacklist says broken, but acpi forced\n");
- }
- return 0;
-}
+static __init int dmi_disable_acpi(struct dmi_blacklist *d)
+{
+ if (!acpi_force)
+ {
+ printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
+ disable_acpi();
+ }
+ else
+ {
+ printk(KERN_NOTICE
+ "Warning: DMI blacklist says broken, but acpi forced\n");
+ }
+ return 0;
+}
/*
* Limit ACPI to CPU enumeration for HT
*/
-static __init int force_acpi_ht(struct dmi_blacklist *d)
-{
- if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident);
- disable_acpi();
- acpi_ht = 1;
- } else {
- printk(KERN_NOTICE
- "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
- }
- return 0;
-}
+static __init int force_acpi_ht(struct dmi_blacklist *d)
+{
+ if (!acpi_force)
+ {
+ printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident);
+ disable_acpi();
+ acpi_ht = 1;
+ }
+ else
+ {
+ printk(KERN_NOTICE
+ "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
+ }
+ return 0;
+}
/*
* Process the DMI blacklists
*/
-
+
/*
- * This will be expanded over time to force things like the APM
+ * This will be expanded over time to force things like the APM
* interrupt mask settings according to the laptop
*/
-
-static __initdata struct dmi_blacklist dmi_blacklist[]={
- { broken_toshiba_keyboard, "Toshiba Satellite 4030cdt", { /* Keyboard generates spurious repeats */
- MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
- NO_MATCH, NO_MATCH, NO_MATCH
- } },
+static __initdata struct dmi_blacklist dmi_blacklist[]=
+{
+
+ {
+ broken_toshiba_keyboard, "Toshiba Satellite 4030cdt", { /* Keyboard generates spurious repeats */
+ MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
+ NO_MATCH, NO_MATCH, NO_MATCH
+ }
+ },
#ifdef CONFIG_ACPI_SLEEP
- { reset_videomode_after_s3, "Toshiba Satellite 4030cdt", { /* Reset video mode after returning from ACPI S3 sleep */
- MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
- NO_MATCH, NO_MATCH, NO_MATCH
- } },
+ {
+ reset_videomode_after_s3, "Toshiba Satellite 4030cdt", { /* Reset video mode after returning from ACPI S3 sleep */
+ MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
+ NO_MATCH, NO_MATCH, NO_MATCH
+ }
+ },
#endif
- { ich10_bios_quirk, "Intel board & BIOS",
- /*
- * BIOS leaves legacy USB emulation enabled while
- * SMM can't properly handle it.
- */
- {
- MATCH(DMI_BOARD_VENDOR, "Intel Corp"),
- MATCH(DMI_BIOS_VENDOR, "Intel Corp"),
- NO_MATCH, NO_MATCH
- }
- },
-
- /*
- * If your system is blacklisted here, but you find that acpi=force
- * works for you, please contact acpi-devel@sourceforge.net
- */
-
- /*
- * Boxes that need ACPI disabled
- */
-
- { dmi_disable_acpi, "IBM Thinkpad", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_BOARD_NAME, "2629H1G"),
- NO_MATCH, NO_MATCH }},
-
- /*
- * Boxes that need acpi=ht
- */
-
- { force_acpi_ht, "FSC Primergy T850", {
- MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "DELL GX240", {
- MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
- MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "HP VISUALIZE NT Workstation", {
- MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "Compaq Workstation W8000", {
- MATCH(DMI_SYS_VENDOR, "Compaq"),
- MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "ASUS P4B266", {
- MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- MATCH(DMI_BOARD_NAME, "P4B266"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "ASUS P2B-DS", {
- MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- MATCH(DMI_BOARD_NAME, "P2B-DS"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "ASUS CUR-DLS", {
- MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- MATCH(DMI_BOARD_NAME, "CUR-DLS"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "ABIT i440BX-W83977", {
- MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
- MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "IBM Bladecenter", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "IBM eServer xSeries 360", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "IBM eserver xSeries 330", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
- NO_MATCH, NO_MATCH }},
-
- { force_acpi_ht, "IBM eserver xSeries 440", {
- MATCH(DMI_BOARD_VENDOR, "IBM"),
- MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
- NO_MATCH, NO_MATCH }},
-
- { NULL, }
+ {
+ ich10_bios_quirk, "Intel board & BIOS",
+ /*
+ * BIOS leaves legacy USB emulation enabled while
+ * SMM can't properly handle it.
+ */
+ {
+ MATCH(DMI_BOARD_VENDOR, "Intel Corp"),
+ MATCH(DMI_BIOS_VENDOR, "Intel Corp"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ /*
+ * If your system is blacklisted here, but you find that acpi=force
+ * works for you, please contact acpi-devel@sourceforge.net
+ */
+
+ /*
+ * Boxes that need ACPI disabled
+ */
+
+ {
+ dmi_disable_acpi, "IBM Thinkpad", {
+ MATCH(DMI_BOARD_VENDOR, "IBM"),
+ MATCH(DMI_BOARD_NAME, "2629H1G"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ /*
+ * Boxes that need acpi=ht
+ */
+
+ {
+ force_acpi_ht, "FSC Primergy T850", {
+ MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "DELL GX240", {
+ MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
+ MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "HP VISUALIZE NT Workstation", {
+ MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "Compaq Workstation W8000", {
+ MATCH(DMI_SYS_VENDOR, "Compaq"),
+ MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "ASUS P4B266", {
+ MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ MATCH(DMI_BOARD_NAME, "P4B266"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "ASUS P2B-DS", {
+ MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ MATCH(DMI_BOARD_NAME, "P2B-DS"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "ASUS CUR-DLS", {
+ MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ MATCH(DMI_BOARD_NAME, "CUR-DLS"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "ABIT i440BX-W83977", {
+ MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
+ MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "IBM Bladecenter", {
+ MATCH(DMI_BOARD_VENDOR, "IBM"),
+ MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "IBM eServer xSeries 360", {
+ MATCH(DMI_BOARD_VENDOR, "IBM"),
+ MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "IBM eserver xSeries 330", {
+ MATCH(DMI_BOARD_VENDOR, "IBM"),
+ MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ {
+ force_acpi_ht, "IBM eserver xSeries 440", {
+ MATCH(DMI_BOARD_VENDOR, "IBM"),
+ MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
+ NO_MATCH, NO_MATCH
+ }
+ },
+
+ { NULL, }
};
/*
@@ -650,56 +730,56 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={
static void __init dmi_decode(struct dmi_header *dm)
{
#ifdef DMI_DEBUG
- u8 *data = (u8 *)dm;
+ u8 *data = (u8 *)dm;
#endif
-
- switch(dm->type)
- {
- case DMI_ENTRY_BIOS:
- dmi_printk(("BIOS Vendor: %s\n",
- dmi_string(dm, data[4])));
- dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
- dmi_printk(("BIOS Version: %s\n",
- dmi_string(dm, data[5])));
- dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
- dmi_printk(("BIOS Release: %s\n",
- dmi_string(dm, data[8])));
- dmi_save_ident(dm, DMI_BIOS_DATE, 8);
- break;
- case DMI_ENTRY_SYSTEM:
- dmi_printk(("System Vendor: %s\n",
- dmi_string(dm, data[4])));
- dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
- dmi_printk(("Product Name: %s\n",
- dmi_string(dm, data[5])));
- dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
- dmi_printk(("Version: %s\n",
- dmi_string(dm, data[6])));
- dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
- dmi_printk(("Serial Number: %s\n",
- dmi_string(dm, data[7])));
- break;
- case DMI_ENTRY_BASEBOARD:
- dmi_printk(("Board Vendor: %s\n",
- dmi_string(dm, data[4])));
- dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
- dmi_printk(("Board Name: %s\n",
- dmi_string(dm, data[5])));
- dmi_save_ident(dm, DMI_BOARD_NAME, 5);
- dmi_printk(("Board Version: %s\n",
- dmi_string(dm, data[6])));
- dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
- break;
- }
+
+ switch (dm->type)
+ {
+ case DMI_ENTRY_BIOS:
+ dmi_printk(("BIOS Vendor: %s\n",
+ dmi_string(dm, data[4])));
+ dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
+ dmi_printk(("BIOS Version: %s\n",
+ dmi_string(dm, data[5])));
+ dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
+ dmi_printk(("BIOS Release: %s\n",
+ dmi_string(dm, data[8])));
+ dmi_save_ident(dm, DMI_BIOS_DATE, 8);
+ break;
+ case DMI_ENTRY_SYSTEM:
+ dmi_printk(("System Vendor: %s\n",
+ dmi_string(dm, data[4])));
+ dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
+ dmi_printk(("Product Name: %s\n",
+ dmi_string(dm, data[5])));
+ dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
+ dmi_printk(("Version: %s\n",
+ dmi_string(dm, data[6])));
+ dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
+ dmi_printk(("Serial Number: %s\n",
+ dmi_string(dm, data[7])));
+ break;
+ case DMI_ENTRY_BASEBOARD:
+ dmi_printk(("Board Vendor: %s\n",
+ dmi_string(dm, data[4])));
+ dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
+ dmi_printk(("Board Name: %s\n",
+ dmi_string(dm, data[5])));
+ dmi_save_ident(dm, DMI_BOARD_NAME, 5);
+ dmi_printk(("Board Version: %s\n",
+ dmi_string(dm, data[6])));
+ dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
+ break;
+ }
}
void __init dmi_scan_machine(void)
{
- if ((!efi_enabled(EFI_BOOT) ? dmi_iterate(dmi_decode) :
- dmi_efi_iterate(dmi_decode)) == 0)
- dmi_check_system(dmi_blacklist);
- else
- printk(KERN_INFO "DMI not present.\n");
+ if ((!efi_enabled(EFI_BOOT) ? dmi_iterate(dmi_decode) :
+ dmi_efi_iterate(dmi_decode)) == 0)
+ dmi_check_system(dmi_blacklist);
+ else
+ printk(KERN_INFO "DMI not present.\n");
}
@@ -713,26 +793,29 @@ void __init dmi_scan_machine(void)
*/
int __init dmi_check_system(struct dmi_system_id *list)
{
- int i, count = 0;
- struct dmi_system_id *d = list;
-
- while (d->ident) {
- for (i = 0; i < ARRAY_SIZE(d->matches); i++) {
- int s = d->matches[i].slot;
- if (s == DMI_NONE)
- continue;
- if (dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr))
- continue;
- /* No match */
- goto fail;
- }
- if (d->callback && d->callback(d))
- break;
- count++;
-fail: d++;
- }
-
- return count;
+ int i, count = 0;
+ struct dmi_system_id *d = list;
+
+ while (d->ident)
+ {
+ for (i = 0; i < ARRAY_SIZE(d->matches); i++)
+ {
+ int s = d->matches[i].slot;
+ if (s == DMI_NONE)
+ continue;
+ if (dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr))
+ continue;
+ /* No match */
+ goto fail;
+ }
+ if (d->callback && d->callback(d))
+ break;
+ count++;
+fail:
+ d++;
+ }
+
+ return count;
}
/**
@@ -755,54 +838,56 @@ fail: d++;
*/
bool __init dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
- int year = 0, month = 0, day = 0;
- bool exists;
- const char *s, *e, *y;
-
- s = field < DMI_STRING_MAX ? dmi_ident[field] : NULL;
- exists = !!s;
- if (!exists)
- goto out;
-
- /*
- * Determine year first. We assume the date string resembles
- * mm/dd/yy[yy] but the original code extracted only the year
- * from the end. Keep the behavior in the spirit of no
- * surprises.
- */
- y = strrchr(s, '/');
- if (!y)
- goto out;
-
- y++;
- year = simple_strtoul(y, &e, 10);
- if (y != e && year < 100) { /* 2-digit year */
- year += 1900;
- if (year < 1996) /* no dates < spec 1.0 */
- year += 100;
- }
- if (year > 9999) /* year should fit in %04d */
- year = 0;
-
- /* parse the mm and dd */
- month = simple_strtoul(s, &e, 10);
- if (s == e || *e != '/' || !month || month > 12) {
- month = 0;
- goto out;
- }
-
- s = e + 1;
- day = simple_strtoul(s, &e, 10);
- if (s == y || s == e || *e != '/' || day > 31)
- day = 0;
+ int year = 0, month = 0, day = 0;
+ bool exists;
+ const char *s, *e, *y;
+
+ s = field < DMI_STRING_MAX ? dmi_ident[field] : NULL;
+ exists = !!s;
+ if (!exists)
+ goto out;
+
+ /*
+ * Determine year first. We assume the date string resembles
+ * mm/dd/yy[yy] but the original code extracted only the year
+ * from the end. Keep the behavior in the spirit of no
+ * surprises.
+ */
+ y = strrchr(s, '/');
+ if (!y)
+ goto out;
+
+ y++;
+ year = simple_strtoul(y, &e, 10);
+ if (y != e && year < 100) /* 2-digit year */
+ {
+ year += 1900;
+ if (year < 1996) /* no dates < spec 1.0 */
+ year += 100;
+ }
+ if (year > 9999) /* year should fit in %04d */
+ year = 0;
+
+ /* parse the mm and dd */
+ month = simple_strtoul(s, &e, 10);
+ if (s == e || *e != '/' || !month || month > 12)
+ {
+ month = 0;
+ goto out;
+ }
+
+ s = e + 1;
+ day = simple_strtoul(s, &e, 10);
+ if (s == y || s == e || *e != '/' || day > 31)
+ day = 0;
out:
- if (yearp)
- *yearp = year;
- if (monthp)
- *monthp = month;
- if (dayp)
- *dayp = day;
- return exists;
+ if (yearp)
+ *yearp = year;
+ if (monthp)
+ *monthp = month;
+ if (dayp)
+ *dayp = day;
+ return exists;
}
void __init dmi_end_boot(void)
diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index c69570920c..4deec5e699 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -1,6 +1,6 @@
/******************************************************************************
* dom0_build.c
- *
+ *
* Copyright (c) 2002-2005, K A Fraser
*/
@@ -20,7 +20,8 @@
#include <asm/p2m.h>
#include <asm/setup.h>
-struct memsize {
+struct memsize
+{
long nr_pages;
unsigned int percent;
bool minus;
@@ -127,7 +128,8 @@ static int __init parse_dom0_mem(const char *s)
return 0;
}
- do {
+ do
+ {
if ( !strncmp(s, "min:", 4) )
ret = parse_amt(s + 4, &s, &dom0_min_size);
else if ( !strncmp(s, "max:", 4) )
@@ -164,12 +166,13 @@ custom_param("dom0_max_vcpus", parse_dom0_max_vcpus);
static __initdata unsigned int dom0_nr_pxms;
static __initdata unsigned int dom0_pxms[MAX_NUMNODES] =
- { [0 ... MAX_NUMNODES - 1] = ~0 };
+{ [0 ... MAX_NUMNODES - 1] = ~0 };
static __initdata bool dom0_affinity_relaxed;
static int __init parse_dom0_nodes(const char *s)
{
- do {
+ do
+ {
if ( isdigit(*s) )
{
if ( dom0_nr_pxms >= ARRAY_SIZE(dom0_pxms) )
@@ -208,9 +211,7 @@ struct vcpu *__init dom0_setup_vcpu(struct domain *d,
if ( v )
{
if ( pv_shim )
- {
sched_set_affinity(v, cpumask_of(vcpu_id), cpumask_of(vcpu_id));
- }
else
{
if ( !opt_dom0_vcpus_pin && !dom0_affinity_relaxed )
@@ -251,7 +252,7 @@ unsigned int __init dom0_max_vcpus(void)
if ( nodes_empty(dom0_nodes) )
dom0_nodes = node_online_map;
for_each_node_mask ( node, dom0_nodes )
- cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node));
+ cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node));
cpumask_and(&dom0_cpus, &dom0_cpus, cpupool0->cpu_valid);
if ( cpumask_empty(&dom0_cpus) )
cpumask_copy(&dom0_cpus, cpupool0->cpu_valid);
@@ -288,7 +289,8 @@ static int __init parse_dom0_param(const char *s)
const char *ss;
int rc = 0;
- do {
+ do
+ {
int val;
ss = strchr(s, ',');
@@ -345,8 +347,8 @@ unsigned long __init dom0_compute_nr_pages(
parse_dom0_mem(CONFIG_DOM0_MEM);
for_each_node_mask ( node, dom0_nodes )
- avail += avail_domheap_pages_region(node, 0, 0) +
- initial_images_nrpages(node);
+ avail += avail_domheap_pages_region(node, 0, 0) +
+ initial_images_nrpages(node);
/* Reserve memory for further dom0 vcpu-struct allocations... */
avail -= (d->max_vcpus - 1UL)
@@ -365,7 +367,7 @@ unsigned long __init dom0_compute_nr_pages(
}
need_paging = is_hvm_domain(d) &&
- (!iommu_hap_pt_share || !paging_mode_hap(d));
+ (!iommu_hap_pt_share || !paging_mode_hap(d));
for ( ; ; need_paging = false )
{
nr_pages = get_memsize(&dom0_size, avail);
@@ -380,7 +382,7 @@ unsigned long __init dom0_compute_nr_pages(
if ( !nr_pages )
{
nr_pages = avail - (pv_shim ? pv_shim_mem(avail)
- : min(avail / 16, 128UL << (20 - PAGE_SHIFT)));
+ : min(avail / 16, 128UL << (20 - PAGE_SHIFT)));
if ( is_hvm_domain(d) && !need_paging )
/*
* Temporary workaround message until internal (paging) memory
@@ -456,7 +458,7 @@ static void __init process_dom0_ioports_disable(struct domain *dom0)
io_from = simple_strtoul(t, &u, 16);
if ( u == t )
{
- parse_error:
+parse_error:
printk("Invalid ioport range <%s> "
"in dom0_ioports_disable, skipping\n", t);
continue;
@@ -473,7 +475,7 @@ static void __init process_dom0_ioports_disable(struct domain *dom0)
goto parse_error;
printk("Disabling dom0 access to ioport range %04lx-%04lx\n",
- io_from, io_to);
+ io_from, io_to);
if ( ioports_deny_access(dom0, io_from, io_to) != 0 )
BUG();
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 147f96a09e..bf507b9e0f 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -120,7 +120,7 @@ void play_dead(void)
* this case, heap corruption or #PF can occur (when heap debugging is
* enabled). For example, even printk() can involve tasklet scheduling,
* which touches per-cpu vars.
- *
+ *
* Consider very carefully when adding code to *dead_idle. Most hypervisor
* subsystems are unsafe to call.
*/
@@ -148,7 +148,7 @@ static void idle_loop(void)
* while we were scrubbing.
*/
else if ( !softirq_pending(cpu) && !scrub_free_pages() &&
- !softirq_pending(cpu) )
+ !softirq_pending(cpu) )
pm_idle();
do_softirq();
/*
@@ -196,9 +196,7 @@ void dump_pageframe_info(struct domain *d)
printk("Memory pages belonging to domain %u:\n", d->domain_id);
if ( d->tot_pages >= 10 && d->is_dying < DOMDYING_dead )
- {
printk(" DomPage list too long to display\n");
- }
else
{
unsigned long total[MASK_EXTR(PGT_type_mask, PGT_type_mask) + 1] = {};
@@ -301,7 +299,7 @@ struct domain *alloc_domain_struct(void)
static unsigned int __read_mostly bits;
if ( unlikely(!bits) )
- bits = _domain_struct_bits();
+ bits = _domain_struct_bits();
#endif
@@ -413,7 +411,7 @@ int arch_vcpu_create(struct vcpu *v)
return rc;
- fail:
+fail:
vcpu_destroy_fpu(v);
xfree(v->arch.msrs);
v->arch.msrs = NULL;
@@ -504,7 +502,8 @@ int arch_domain_create(struct domain *d,
/* Minimal initialisation for the idle domain. */
if ( unlikely(is_idle_domain(d)) )
{
- static const struct arch_csw idle_csw = {
+ static const struct arch_csw idle_csw =
+ {
.from = paravirt_ctxt_switch_from,
.to = paravirt_ctxt_switch_to,
.tail = continue_idle_domain,
@@ -635,7 +634,7 @@ int arch_domain_create(struct domain *d,
return 0;
- fail:
+fail:
d->is_dying = DOMDYING_dead;
psr_domain_free(d);
iommu_domain_destroy(d);
@@ -776,9 +775,9 @@ int arch_domain_soft_reset(struct domain *d)
" Dom%d's shared_info frame %lx\n", d->domain_id, gfn);
free_domheap_page(new_page);
}
- exit_put_gfn:
+exit_put_gfn:
put_gfn(d, gfn);
- exit_put_page:
+exit_put_page:
put_page(page);
return ret;
@@ -939,7 +938,9 @@ int arch_set_info_guest(
pfn = pagetable_get_pfn(v->arch.guest_table_user);
fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1];
}
- } else {
+ }
+ else
+ {
l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn));
pfn = l4e_get_pfn(*l4tab);
@@ -955,7 +956,7 @@ int arch_set_info_guest(
fail |= v->arch.pv.ldt_ents != c(ldt_ents);
if ( fail )
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
v->arch.pv.kernel_ss = c(kernel_ss);
@@ -1091,7 +1092,7 @@ int arch_set_info_guest(
if ( !rc )
rc = get_page_type_preemptible(cr3_page,
!compat ? PGT_root_page_table
- : PGT_l3_page_table);
+ : PGT_l3_page_table);
switch ( rc )
{
case -EINTR:
@@ -1128,7 +1129,7 @@ int arch_set_info_guest(
{
case -EINTR:
rc = -ERESTART;
- /* Fallthrough */
+ /* Fallthrough */
case -ERESTART:
v->arch.old_guest_ptpg = NULL;
v->arch.old_guest_table =
@@ -1146,7 +1147,7 @@ int arch_set_info_guest(
}
}
if ( !rc )
- v->arch.guest_table_user = pagetable_from_page(cr3_page);
+ v->arch.guest_table_user = pagetable_from_page(cr3_page);
}
}
else
@@ -1155,7 +1156,7 @@ int arch_set_info_guest(
l4tab = map_domain_page(pagetable_get_mfn(v->arch.guest_table));
*l4tab = l4e_from_mfn(page_to_mfn(cr3_page),
- _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
+ _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
unmap_domain_page(l4tab);
}
if ( rc )
@@ -1180,7 +1181,7 @@ int arch_set_info_guest(
update_cr3(v);
#endif /* CONFIG_PV */
- out:
+out:
if ( flags & VGCF_online )
clear_bit(_VPF_down, &v->pause_flags);
else
@@ -1333,9 +1334,9 @@ static void load_segments(struct vcpu *n)
!(read_cr4() & X86_CR4_FSGSBASE) && !((uregs->fs | uregs->gs) & ~3) )
{
unsigned long gsb = n->arch.flags & TF_kernel_mode
- ? n->arch.pv.gs_base_kernel : n->arch.pv.gs_base_user;
+ ? n->arch.pv.gs_base_kernel : n->arch.pv.gs_base_user;
unsigned long gss = n->arch.flags & TF_kernel_mode
- ? n->arch.pv.gs_base_user : n->arch.pv.gs_base_kernel;
+ ? n->arch.pv.gs_base_user : n->arch.pv.gs_base_kernel;
fs_gs_done = svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n),
uregs->fs, n->arch.pv.fs_base,
@@ -1424,7 +1425,7 @@ static void load_segments(struct vcpu *n)
/* CS longword also contains full evtchn_upcall_mask. */
cs_and_mask = (unsigned short)regs->cs |
- ((unsigned int)vcpu_info(n, evtchn_upcall_mask) << 16);
+ ((unsigned int)vcpu_info(n, evtchn_upcall_mask) << 16);
if ( !ring_1(regs) )
{
@@ -1467,7 +1468,7 @@ static void load_segments(struct vcpu *n)
/* CS longword also contains full evtchn_upcall_mask. */
cs_and_mask = (unsigned long)regs->cs |
- ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32);
+ ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32);
if ( put_user(regs->ss, rsp- 1) |
put_user(regs->rsp, rsp- 2) |
@@ -1543,7 +1544,7 @@ static void save_segments(struct vcpu *v)
v->arch.pv.gs_base_user = 0;
}
if ( v->arch.flags & TF_kernel_mode ? v->arch.pv.gs_base_kernel
- : v->arch.pv.gs_base_user )
+ : v->arch.pv.gs_base_user )
dirty_segment_mask |= DIRTY_GS_BASE;
this_cpu(dirty_segment_mask) = dirty_segment_mask;
@@ -1594,8 +1595,8 @@ bool update_runstate_area(struct vcpu *v)
if ( VM_ASSIST(v->domain, runstate_update_flag) )
{
guest_handle = has_32bit_shinfo(v->domain)
- ? &v->runstate_guest.compat.p->state_entry_time + 1
- : &v->runstate_guest.native.p->state_entry_time + 1;
+ ? &v->runstate_guest.compat.p->state_entry_time + 1
+ : &v->runstate_guest.native.p->state_entry_time + 1;
guest_handle--;
v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
__raw_copy_to_guest(guest_handle,
@@ -1667,12 +1668,13 @@ static void update_xen_slot_in_full_gdt(const struct vcpu *v, unsigned int cpu)
{
l1e_write(pv_gdt_ptes(v) + FIRST_RESERVED_GDT_PAGE,
!is_pv_32bit_vcpu(v) ? per_cpu(gdt_table_l1e, cpu)
- : per_cpu(compat_gdt_table_l1e, cpu));
+ : per_cpu(compat_gdt_table_l1e, cpu));
}
static void load_full_gdt(const struct vcpu *v)
{
- struct desc_ptr gdt_desc = {
+ struct desc_ptr gdt_desc =
+ {
.limit = LAST_RESERVED_GDT_BYTE,
.base = GDT_VIRT_START(v),
};
@@ -1682,7 +1684,8 @@ static void load_full_gdt(const struct vcpu *v)
static void load_default_gdt(unsigned int cpu)
{
- struct desc_ptr gdt_desc = {
+ struct desc_ptr gdt_desc =
+ {
.limit = LAST_RESERVED_GDT_BYTE,
.base = (unsigned long)(per_cpu(gdt_table, cpu) -
FIRST_RESERVED_GDT_ENTRY),
@@ -1796,9 +1799,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
if ( (per_cpu(curr_vcpu, cpu) == next) ||
(is_idle_domain(nextd) && cpu_online(cpu)) )
- {
local_irq_enable();
- }
else
{
__context_switch();
@@ -2007,7 +2008,7 @@ static int relinquish_memory(
/* list is empty at this point. */
page_list_move(list, &d->arch.relmem_list);
- out:
+out:
spin_unlock_recursive(&d->page_alloc_lock);
return ret;
}
@@ -2039,7 +2040,8 @@ int domain_relinquish_resources(struct domain *d)
#define PROGRESS(x) \
d->arch.rel_priv = PROG_ ## x; /* Fallthrough */ case PROG_ ## x
- enum {
+ enum
+ {
PROG_paging = 1,
PROG_vcpu_pagetables,
PROG_shared,
@@ -2055,17 +2057,17 @@ int domain_relinquish_resources(struct domain *d)
if ( ret )
return ret;
- PROGRESS(paging):
+ PROGRESS(paging):
- /* Tear down paging-assistance stuff. */
- ret = paging_teardown(d);
+ /* Tear down paging-assistance stuff. */
+ ret = paging_teardown(d);
if ( ret )
return ret;
- PROGRESS(vcpu_pagetables):
+ PROGRESS(vcpu_pagetables):
- /* Drop the in-use references to page-table bases. */
- for_each_vcpu ( d, v )
+ /* Drop the in-use references to page-table bases. */
+ for_each_vcpu ( d, v )
{
ret = vcpu_destroy_pagetables(v);
if ( ret )
@@ -2075,7 +2077,7 @@ int domain_relinquish_resources(struct domain *d)
if ( altp2m_active(d) )
{
for_each_vcpu ( d, v )
- altp2m_vcpu_disable_ve(v);
+ altp2m_vcpu_disable_ve(v);
}
if ( is_pv_domain(d) )
@@ -2096,9 +2098,9 @@ int domain_relinquish_resources(struct domain *d)
d->arch.auto_unmask = 0;
}
- PROGRESS(shared):
+ PROGRESS(shared):
- if ( is_hvm_domain(d) )
+ if ( is_hvm_domain(d) )
{
/* If the domain has shared pages, relinquish them allowing
* for preemption. */
@@ -2112,32 +2114,32 @@ int domain_relinquish_resources(struct domain *d)
INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
spin_unlock(&d->page_alloc_lock);
- PROGRESS(xen):
+ PROGRESS(xen):
- ret = relinquish_memory(d, &d->xenpage_list, ~0UL);
+ ret = relinquish_memory(d, &d->xenpage_list, ~0UL);
if ( ret )
return ret;
- PROGRESS(l4):
+ PROGRESS(l4):
- ret = relinquish_memory(d, &d->page_list, PGT_l4_page_table);
+ ret = relinquish_memory(d, &d->page_list, PGT_l4_page_table);
if ( ret )
return ret;
- PROGRESS(l3):
+ PROGRESS(l3):
- ret = relinquish_memory(d, &d->page_list, PGT_l3_page_table);
+ ret = relinquish_memory(d, &d->page_list, PGT_l3_page_table);
if ( ret )
return ret;
- PROGRESS(l2):
+ PROGRESS(l2):
- ret = relinquish_memory(d, &d->page_list, PGT_l2_page_table);
+ ret = relinquish_memory(d, &d->page_list, PGT_l2_page_table);
if ( ret )
return ret;
- PROGRESS(done):
- break;
+ PROGRESS(done):
+ break;
#undef PROGRESS
@@ -2182,7 +2184,7 @@ void vcpu_kick(struct vcpu *v)
* pending flag. These values may fluctuate (after all, we hold no
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
- *
+ *
* NB2. We save the running flag across the unblock to avoid a needless
* IPI for domains that we IPI'd to unblock.
*/
@@ -2196,7 +2198,7 @@ void vcpu_kick(struct vcpu *v)
void vcpu_mark_events_pending(struct vcpu *v)
{
int already_pending = test_and_set_bit(
- 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
+ 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
if ( already_pending )
return;
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 4a07cfb18e..273471be84 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -137,7 +137,8 @@ void *map_domain_page(mfn_t mfn)
{
/* Replace a hash entry instead. */
i = MAPHASH_HASHFN(mfn_x(mfn));
- do {
+ do
+ {
hashent = &vcache->hash[i];
if ( hashent->idx != MAPHASHENT_NOTINUSE && !hashent->refcnt )
{
@@ -168,7 +169,7 @@ void *map_domain_page(mfn_t mfn)
l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_mfn(mfn, __PAGE_HYPERVISOR_RW));
- out:
+out:
local_irq_restore(flags);
return (void *)MAPCACHE_VIRT_START + pfn_to_paddr(idx);
}
@@ -244,7 +245,7 @@ int mapcache_domain_init(struct domain *d)
#endif
BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 +
- 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) >
+ 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) >
MAPCACHE_VIRT_START + (PERDOMAIN_SLOT_MBYTES << 20));
bitmap_pages = PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long));
dcache->inuse = (void *)MAPCACHE_VIRT_END + PAGE_SIZE;
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index c827790202..a8b842d892 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -38,7 +38,7 @@
static int gdbsx_guest_mem_io(domid_t domid, struct xen_domctl_gdbsx_memio *iop)
{
- void * __user gva = (void *)iop->gva, * __user uva = (void *)iop->uva;
+ void *__user gva = (void *)iop->gva, * __user uva = (void *)iop->uva;
iop->remain = dbg_rw_mem(gva, uva, iop->len, domid,
!!iop->gwr, iop->pgd3val);
@@ -80,8 +80,9 @@ static int update_domain_cpuid_info(struct domain *d,
return 0;
break;
- case 0x40000000: case 0x40000100:
- /* Only care about the max_leaf limit. */
+ case 0x40000000:
+ case 0x40000100:
+ /* Only care about the max_leaf limit. */
case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
break;
@@ -247,7 +248,7 @@ static int update_domain_cpuid_info(struct domain *d,
struct vcpu *v;
for_each_vcpu ( d, v )
- vpmu_destroy(v);
+ vpmu_destroy(v);
}
break;
@@ -330,7 +331,7 @@ static int update_domain_cpuid_info(struct domain *d,
struct vcpu *v;
for_each_vcpu( d, v )
- cpuid_policy_updated(v);
+ cpuid_policy_updated(v);
}
return 0;
@@ -347,7 +348,8 @@ static int vcpu_set_vmce(struct vcpu *v,
#define VMCE_SIZE(field) \
(offsetof(typeof(evc->vmce), field) + sizeof(evc->vmce.field))
- static const unsigned int valid_sizes[] = {
+ static const unsigned int valid_sizes[] =
+ {
sizeof(evc->vmce),
VMCE_SIZE(mci_ctl2_bank1),
VMCE_SIZE(caps),
@@ -465,7 +467,7 @@ long arch_do_domctl(
}
else
{
- switch( page->u.inuse.type_info & PGT_type_mask )
+ switch ( page->u.inuse.type_info & PGT_type_mask )
{
case PGT_l1_page_table:
type = XEN_DOMCTL_PFINFO_L1TAB;
@@ -550,7 +552,7 @@ long arch_do_domctl(
ret = hvm_load(d, &c);
domain_unpause(d);
- sethvmcontext_out:
+sethvmcontext_out:
xfree(c.data);
break;
}
@@ -592,7 +594,7 @@ long arch_do_domctl(
if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
ret = -EFAULT;
- gethvmcontext_out:
+gethvmcontext_out:
copyback = true;
xfree(c.data);
break;
@@ -775,7 +777,7 @@ long arch_do_domctl(
ret = -EINVAL;
if ( ((fgp | fmp | (np - 1)) >= MAX_IOPORTS) ||
- ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
+ ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
{
printk(XENLOG_G_ERR
"ioport_map:invalid:dom%d gport=%x mport=%x nr=%x\n",
@@ -799,13 +801,13 @@ long arch_do_domctl(
d->domain_id, fgp, fmp, np);
list_for_each_entry(g2m_ioport, &hvm->g2m_ioport_list, list)
- if (g2m_ioport->mport == fmp )
- {
- g2m_ioport->gport = fgp;
- g2m_ioport->np = np;
- found = 1;
- break;
- }
+ if (g2m_ioport->mport == fmp )
+ {
+ g2m_ioport->gport = fgp;
+ g2m_ioport->np = np;
+ found = 1;
+ break;
+ }
if ( !found )
{
g2m_ioport = xmalloc(struct g2m_ioport);
@@ -833,12 +835,12 @@ long arch_do_domctl(
"ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
list_for_each_entry(g2m_ioport, &hvm->g2m_ioport_list, list)
- if ( g2m_ioport->mport == fmp )
- {
- list_del(&g2m_ioport->list);
- xfree(g2m_ioport);
- break;
- }
+ if ( g2m_ioport->mport == fmp )
+ {
+ list_del(&g2m_ioport->list);
+ xfree(g2m_ioport);
+ break;
+ }
ret = ioports_deny_access(d, fmp, fmp + np - 1);
if ( ret && is_hardware_domain(currd) )
printk(XENLOG_ERR
@@ -1016,7 +1018,7 @@ long arch_do_domctl(
domctl->u.gdbsx_guest_memio.remain = domctl->u.gdbsx_guest_memio.len;
ret = gdbsx_guest_mem_io(domctl->domain, &domctl->u.gdbsx_guest_memio);
if ( !ret )
- copyback = true;
+ copyback = true;
break;
case XEN_DOMCTL_gdbsx_pausevcpu:
@@ -1152,9 +1154,9 @@ long arch_do_domctl(
if ( copy_to_guest_offset(evc->buffer, offset, xsave_area,
xsave_size) )
- ret = -EFAULT;
+ ret = -EFAULT;
xfree(xsave_area);
- }
+ }
vcpu_unpause(v);
@@ -1230,7 +1232,7 @@ long arch_do_domctl(
#undef PV_XSAVE_HDR_SIZE
#undef PV_XSAVE_SIZE
- vcpuextstate_out:
+vcpuextstate_out:
if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate )
copyback = true;
break;
@@ -1276,7 +1278,8 @@ long arch_do_domctl(
struct xen_domctl_vcpu_msrs *vmsrs = &domctl->u.vcpu_msrs;
struct xen_domctl_vcpu_msr msr = {};
struct vcpu *v;
- static const uint32_t msrs_to_send[] = {
+ static const uint32_t msrs_to_send[] =
+ {
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
MSR_TSC_AUX,
@@ -1303,7 +1306,8 @@ long arch_do_domctl(
if ( domctl->cmd == XEN_DOMCTL_get_vcpu_msrs )
{
- ret = 0; copyback = true;
+ ret = 0;
+ copyback = true;
/* NULL guest handle is a request for max size. */
if ( guest_handle_is_null(vmsrs->msrs) )
@@ -1515,18 +1519,18 @@ long arch_do_domctl(
if ( guest_handle_is_null(domctl->u.cpu_policy.cpuid_policy) )
domctl->u.cpu_policy.nr_leaves = CPUID_MAX_SERIALISED_LEAVES;
else if ( (ret = x86_cpuid_copy_to_buffer(
- d->arch.cpuid,
- domctl->u.cpu_policy.cpuid_policy,
- &domctl->u.cpu_policy.nr_leaves)) )
+ d->arch.cpuid,
+ domctl->u.cpu_policy.cpuid_policy,
+ &domctl->u.cpu_policy.nr_leaves)) )
break;
/* Process the MSR entries. */
if ( guest_handle_is_null(domctl->u.cpu_policy.msr_policy) )
domctl->u.cpu_policy.nr_msrs = MSR_MAX_SERIALISED_ENTRIES;
else if ( (ret = x86_msr_copy_to_buffer(
- d->arch.msr,
- domctl->u.cpu_policy.msr_policy,
- &domctl->u.cpu_policy.nr_msrs)) )
+ d->arch.msr,
+ domctl->u.cpu_policy.msr_policy,
+ &domctl->u.cpu_policy.nr_msrs)) )
break;
copyback = true;
@@ -1584,7 +1588,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
c(debugreg[i] = v->arch.dr[i]);
c(debugreg[6] = v->arch.dr6);
c(debugreg[7] = v->arch.dr7 |
- (is_pv_domain(d) ? v->arch.pv.dr7_emul : 0));
+ (is_pv_domain(d) ? v->arch.pv.dr7_emul : 0));
if ( is_hvm_domain(d) )
{
@@ -1655,7 +1659,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
if ( !compat )
{
c.nat->ctrlreg[3] = xen_pfn_to_cr3(
- pagetable_get_pfn(v->arch.guest_table));
+ pagetable_get_pfn(v->arch.guest_table));
c.nat->ctrlreg[1] =
pagetable_is_null(v->arch.guest_table_user) ? 0
: xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user));
diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c
index 8e8a2c4e1b..20f70da386 100644
--- a/xen/arch/x86/e820.c
+++ b/xen/arch/x86/e820.c
@@ -44,30 +44,31 @@ struct e820map __initdata e820_raw;
*/
int __init e820_all_mapped(u64 start, u64 end, unsigned type)
{
- unsigned int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
-
- if (type && ei->type != type)
- continue;
- /* is the region (part) in overlap with the current region ?*/
- if (ei->addr >= end || ei->addr + ei->size <= start)
- continue;
-
- /* if the region is at the beginning of <start,end> we move
- * start to the end of the region since it's ok until there
- */
- if (ei->addr <= start)
- start = ei->addr + ei->size;
- /*
- * if start is now at or beyond end, we're done, full
- * coverage
- */
- if (start >= end)
- return 1;
- }
- return 0;
+ unsigned int i;
+
+ for (i = 0; i < e820.nr_map; i++)
+ {
+ struct e820entry *ei = &e820.map[i];
+
+ if (type && ei->type != type)
+ continue;
+ /* is the region (part) in overlap with the current region ?*/
+ if (ei->addr >= end || ei->addr + ei->size <= start)
+ continue;
+
+ /* if the region is at the beginning of <start,end> we move
+ * start to the end of the region since it's ok until there
+ */
+ if (ei->addr <= start)
+ start = ei->addr + ei->size;
+ /*
+ * if start is now at or beyond end, we're done, full
+ * coverage
+ */
+ if (start >= end)
+ return 1;
+ }
+ return 0;
}
static void __init add_memory_region(unsigned long long start,
@@ -75,7 +76,8 @@ static void __init add_memory_region(unsigned long long start,
{
unsigned int x = e820.nr_map;
- if (x == ARRAY_SIZE(e820.map)) {
+ if (x == ARRAY_SIZE(e820.map))
+ {
printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
return;
}
@@ -86,15 +88,18 @@ static void __init add_memory_region(unsigned long long start,
e820.nr_map++;
}
-static void __init print_e820_memory_map(struct e820entry *map, unsigned int entries)
+static void __init print_e820_memory_map(struct e820entry *map,
+ unsigned int entries)
{
unsigned int i;
- for (i = 0; i < entries; i++) {
+ for (i = 0; i < entries; i++)
+ {
printk(" %016Lx - %016Lx ",
(unsigned long long)(map[i].addr),
(unsigned long long)(map[i].addr + map[i].size));
- switch (map[i].type) {
+ switch (map[i].type)
+ {
case E820_RAM:
printk("(usable)\n");
break;
@@ -120,11 +125,12 @@ static void __init print_e820_memory_map(struct e820entry *map, unsigned int ent
/*
* Sanitize the BIOS e820 map.
*
- * Some e820 responses include overlapping entries. The following
+ * Some e820 responses include overlapping entries. The following
* replaces the original e820 map with a new one, removing overlaps.
*
*/
-struct change_member {
+struct change_member
+{
struct e820entry *pbios; /* pointer to original bios entry */
unsigned long long addr; /* address for this change point */
};
@@ -196,8 +202,10 @@ int __init sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map)
/* record all known change-points (starting and ending addresses),
omitting those that are for empty memory regions */
chgidx = 0;
- for (i=0; i < old_nr; i++) {
- if (biosmap[i].size != 0) {
+ for (i=0; i < old_nr; i++)
+ {
+ if (biosmap[i].size != 0)
+ {
change_point[chgidx]->addr = biosmap[i].addr;
change_point[chgidx++]->pbios = &biosmap[i];
change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
@@ -208,16 +216,18 @@ int __init sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map)
/* sort change-point list by memory addresses (low -> high) */
still_changing = true;
- while (still_changing) {
+ while (still_changing)
+ {
still_changing = false;
- for (i=1; i < chg_nr; i++) {
+ for (i=1; i < chg_nr; i++)
+ {
/* if <current_addr> > <last_addr>, swap */
/* or, if current=<start_addr> & last=<end_addr>, swap */
if ((change_point[i]->addr < change_point[i-1]->addr) ||
((change_point[i]->addr == change_point[i-1]->addr) &&
(change_point[i]->addr == change_point[i]->pbios->addr) &&
(change_point[i-1]->addr != change_point[i-1]->pbios->addr))
- )
+ )
{
change_tmp = change_point[i];
change_point[i] = change_point[i-1];
@@ -258,16 +268,19 @@ int __init sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map)
if (overlap_list[i]->type > current_type)
current_type = overlap_list[i]->type;
/* continue building up new bios map based on this information */
- if (current_type != last_type) {
- if (last_type != 0) {
+ if (current_type != last_type)
+ {
+ if (last_type != 0)
+ {
new_bios[new_bios_entry].size =
change_point[chgidx]->addr - last_addr;
- /* move forward only if the new size was non-zero */
+ /* move forward only if the new size was non-zero */
if (new_bios[new_bios_entry].size != 0)
if (++new_bios_entry >= ARRAY_SIZE(new_bios))
break; /* no more space left for new bios entries */
}
- if (current_type != 0) {
+ if (current_type != 0)
+ {
new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
new_bios[new_bios_entry].type = current_type;
last_addr=change_point[chgidx]->addr;
@@ -300,13 +313,14 @@ int __init sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map)
* thinkpad 560x, for example, does not cooperate with the memory
* detection code.)
*/
-static int __init copy_e820_map(struct e820entry * biosmap, unsigned int nr_map)
+static int __init copy_e820_map(struct e820entry *biosmap, unsigned int nr_map)
{
/* Only one memory region? Ignore it */
if (nr_map < 2)
return -1;
- do {
+ do
+ {
unsigned long long start = biosmap->addr;
unsigned long long size = biosmap->size;
unsigned long long end = start + size;
@@ -320,8 +334,10 @@ static int __init copy_e820_map(struct e820entry * biosmap, unsigned int nr_map)
* Some BIOSes claim RAM in the 640k - 1M region.
* Not right. Fix it up.
*/
- if (type == E820_RAM) {
- if (start < 0x100000ULL && end > 0xA0000ULL) {
+ if (type == E820_RAM)
+ {
+ if (start < 0x100000ULL && end > 0xA0000ULL)
+ {
if (start < 0xA0000ULL)
add_memory_region(start, 0xA0000ULL-start, type);
if (end <= 0x100000ULL)
@@ -331,7 +347,7 @@ static int __init copy_e820_map(struct e820entry * biosmap, unsigned int nr_map)
}
}
add_memory_region(start, size, type);
- } while (biosmap++,--nr_map);
+ } while (biosmap++, --nr_map);
return 0;
}
@@ -344,7 +360,8 @@ static unsigned long __init find_max_pfn(void)
unsigned int i;
unsigned long max_pfn = 0;
- for (i = 0; i < e820.nr_map; i++) {
+ for (i = 0; i < e820.nr_map; i++)
+ {
unsigned long start, end;
/* RAM? */
if (e820.map[i].type != E820_RAM)
@@ -376,10 +393,10 @@ static void __init clip_to_limit(uint64_t limit, char *warnmsg)
/* If none found, we are done. */
if ( i == e820.nr_map )
- break;
+ break;
old_limit = max_t(
- uint64_t, old_limit, e820.map[i].addr + e820.map[i].size);
+ uint64_t, old_limit, e820.map[i].addr + e820.map[i].size);
/* We try to convert clipped RAM areas to E820_UNUSABLE. */
if ( e820_change_range_type(&e820, max(e820.map[i].addr, limit),
@@ -388,13 +405,11 @@ static void __init clip_to_limit(uint64_t limit, char *warnmsg)
continue;
/*
- * If the type change fails (e.g., not space in table) then we clip or
+ * If the type change fails (e.g., not space in table) then we clip or
* delete the region as appropriate.
*/
if ( e820.map[i].addr < limit )
- {
e820.map[i].size = limit - e820.map[i].addr;
- }
else
{
memmove(&e820.map[i], &e820.map[i+1],
@@ -443,7 +458,7 @@ static uint64_t __init mtrr_top_of_ram(void)
/* Does the CPU support architectural MTRRs? */
cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
if ( !test_bit(X86_FEATURE_MTRR & 31, &edx) )
- return 0;
+ return 0;
/* Find the physical address size for this CPU. */
eax = cpuid_eax(0x80000000);
@@ -615,9 +630,7 @@ int __init e820_change_range_type(
return 0;
if ( (s == rs) && (e == re) )
- {
e820->map[i].type = new_type;
- }
else if ( (s == rs) || (e == re) )
{
if ( (e820->nr_map + 1) > ARRAY_SIZE(e820->map) )
@@ -674,7 +687,7 @@ int __init e820_change_range_type(
return 1;
- overflow:
+overflow:
printk("Overflow in e820 while reserving region %"PRIx64"-%"PRIx64"\n",
s, e);
return 0;
diff --git a/xen/arch/x86/efi/efi-boot.h b/xen/arch/x86/efi/efi-boot.h
index 7a13a30bc0..5c3f874af6 100644
--- a/xen/arch/x86/efi/efi-boot.h
+++ b/xen/arch/x86/efi/efi-boot.h
@@ -10,7 +10,8 @@
#include <asm/processor.h>
static struct file __initdata ucode;
-static multiboot_info_t __initdata mbi = {
+static multiboot_info_t __initdata mbi =
+{
.flags = MBI_MODULES | MBI_LOADERNAME
};
/*
@@ -22,12 +23,12 @@ static module_t __initdata mb_modules[5];
static void __init edd_put_string(u8 *dst, size_t n, const char *src)
{
while ( n-- && *src )
- *dst++ = *src++;
+ *dst++ = *src++;
if ( *src )
- PrintErrMesg(L"Internal error populating EDD info",
- EFI_BUFFER_TOO_SMALL);
+ PrintErrMesg(L"Internal error populating EDD info",
+ EFI_BUFFER_TOO_SMALL);
while ( n-- )
- *dst++ = ' ';
+ *dst++ = ' ';
}
#define edd_put_string(d, s) edd_put_string(d, ARRAY_SIZE(d), s)
@@ -39,7 +40,8 @@ extern const intpte_t __page_tables_start[], __page_tables_end[];
#define PE_BASE_RELOC_HIGHLOW 3
#define PE_BASE_RELOC_DIR64 10
-extern const struct pe_base_relocs {
+extern const struct pe_base_relocs
+{
u32 rva;
u32 size;
u16 entries[];
@@ -170,23 +172,23 @@ static void __init efi_arch_process_memory_map(EFI_SYSTEM_TABLE *SystemTable,
case EfiBootServicesData:
if ( map_bs )
{
- default:
+ default:
type = E820_RESERVED;
break;
}
- /* fall through */
+ /* fall through */
case EfiConventionalMemory:
if ( !trampoline_phys && desc->PhysicalStart + len <= 0x100000 &&
len >= cfg.size && desc->PhysicalStart + len > cfg.addr )
cfg.addr = (desc->PhysicalStart + len - cfg.size) & PAGE_MASK;
- /* fall through */
+ /* fall through */
case EfiLoaderCode:
case EfiLoaderData:
if ( desc->Attribute & EFI_MEMORY_WB )
type = E820_RAM;
else
- case EfiUnusableMemory:
- type = E820_UNUSABLE;
+ case EfiUnusableMemory:
+ type = E820_UNUSABLE;
break;
case EfiACPIReclaimMemory:
type = E820_ACPI;
@@ -261,21 +263,23 @@ static void __init noreturn efi_arch_post_exit_boot(void)
"mov %[rip], (%%rsp)\n\t"
"lretq %[stkoff]-16"
: [rip] "=&r" (efer/* any dead 64-bit variable */),
- [cr4] "+&r" (cr4)
+ [cr4] "+&r" (cr4)
: [cr3] "r" (idle_pg_table),
- [cs] "ir" (__HYPERVISOR_CS),
- [ds] "r" (__HYPERVISOR_DS),
- [stkoff] "i" (STACK_SIZE - sizeof(struct cpu_info)),
- "D" (&mbi)
+ [cs] "ir" (__HYPERVISOR_CS),
+ [ds] "r" (__HYPERVISOR_DS),
+ [stkoff] "i" (STACK_SIZE - sizeof(struct cpu_info)),
+ "D" (&mbi)
: "memory" );
- for( ; ; ); /* not reached */
+ for ( ; ; ); /* not reached */
}
-static void __init efi_arch_cfg_file_early(EFI_FILE_HANDLE dir_handle, char *section)
+static void __init efi_arch_cfg_file_early(EFI_FILE_HANDLE dir_handle,
+ char *section)
{
}
-static void __init efi_arch_cfg_file_late(EFI_FILE_HANDLE dir_handle, char *section)
+static void __init efi_arch_cfg_file_late(EFI_FILE_HANDLE dir_handle,
+ char *section)
{
union string name;
@@ -460,7 +464,7 @@ static void __init efi_arch_edd(void)
boot_mbr_signature_nr < EDD_MBR_SIG_MAX )
{
struct mbr_signature *sig = boot_mbr_signature +
- boot_mbr_signature_nr;
+ boot_mbr_signature_nr;
sig->device = 0x80 + boot_edd_info_nr; /* fake */
memcpy(&sig->signature, devp.HardDrive->Signature,
@@ -533,7 +537,7 @@ static void __init efi_arch_video_init(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop,
&vga_console_info.u.vesa_lfb.rsvd_size);
if ( bpp > 0 )
break;
- /* fall through */
+ /* fall through */
default:
PrintErr(L"Current graphics mode is unsupported!\r\n");
bpp = 0;
@@ -675,7 +679,8 @@ static bool __init efi_arch_use_config_file(EFI_SYSTEM_TABLE *SystemTable)
static void __init efi_arch_flush_dcache_area(const void *vaddr, UINTN size) { }
-void __init efi_multiboot2(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable)
+void __init efi_multiboot2(EFI_HANDLE ImageHandle,
+ EFI_SYSTEM_TABLE *SystemTable)
{
EFI_GRAPHICS_OUTPUT_PROTOCOL *gop;
UINTN cols, gop_mode = ~0, rows;
diff --git a/xen/arch/x86/efi/mkreloc.c b/xen/arch/x86/efi/mkreloc.c
index 1aca79672a..e7116892ed 100644
--- a/xen/arch/x86/efi/mkreloc.c
+++ b/xen/arch/x86/efi/mkreloc.c
@@ -9,7 +9,8 @@
#include <sys/mman.h>
#include <unistd.h>
-struct mz_hdr {
+struct mz_hdr
+{
uint16_t signature;
#define MZ_SIGNATURE 0x5a4d
uint16_t last_page_size;
@@ -29,7 +30,8 @@ struct mz_hdr {
uint32_t extended_header_base;
};
-struct pe_hdr {
+struct pe_hdr
+{
uint32_t signature;
#define PE_SIGNATURE 0x00004550
uint16_t cpu;
@@ -39,7 +41,8 @@ struct pe_hdr {
uint32_t symbol_count;
uint16_t opt_hdr_size;
uint16_t flags;
- struct {
+ struct
+ {
uint16_t magic;
#define PE_MAGIC_EXE32 0x010b
#define PE_MAGIC_EXE32PLUS 0x020b
@@ -55,7 +58,8 @@ struct pe_hdr {
#define PE_BASE_RELOC_HIGHLOW 3
#define PE_BASE_RELOC_DIR64 10
-struct coff_section {
+struct coff_section
+{
char name[8];
uint32_t size;
uint32_t rva;
@@ -142,10 +146,10 @@ static unsigned int load(const char *name, int *handle,
}
if ( lseek(in,
mz_hdr.extended_header_base + offsetof(struct pe_hdr, opt_hdr) +
- pe_hdr.opt_hdr_size,
+ pe_hdr.opt_hdr_size,
SEEK_SET) < 0 ||
read(in, *sections, pe_hdr.section_count * sizeof(**sections)) !=
- pe_hdr.section_count * sizeof(**sections) )
+ pe_hdr.section_count * sizeof(**sections) )
{
perror(name);
exit(4);
@@ -211,13 +215,14 @@ static void diff_sections(const unsigned char *ptr1, const unsigned char *ptr2,
for ( i = 0; i < sec->file_size; ++i )
{
uint_fast32_t rva;
- union {
+ union
+ {
uint32_t u32;
uint64_t u64;
} val1, val2;
int_fast64_t delta;
unsigned int reloc = (width == 4 ? PE_BASE_RELOC_HIGHLOW :
- PE_BASE_RELOC_DIR64);
+ PE_BASE_RELOC_DIR64);
if ( ptr1[i] == ptr2[i] )
continue;
@@ -250,11 +255,11 @@ static void diff_sections(const unsigned char *ptr1, const unsigned char *ptr2,
reloc_size += reloc_size & 2;
if ( reloc_size )
printf("\t.equ rva_%08" PRIxFAST32 "_relocs,"
- " %#08" PRIxFAST32 "\n",
+ " %#08" PRIxFAST32 "\n",
cur_rva, reloc_size);
printf("\t.balign 4\n"
"\t.long %#08" PRIxFAST32 ","
- " rva_%08" PRIxFAST32 "_relocs\n",
+ " rva_%08" PRIxFAST32 "_relocs\n",
rva, rva);
cur_rva = rva;
reloc_size = 8;
diff --git a/xen/arch/x86/efi/stub.c b/xen/arch/x86/efi/stub.c
index c578bffc71..f14f5624ee 100644
--- a/xen/arch/x86/efi/stub.c
+++ b/xen/arch/x86/efi/stub.c
@@ -35,12 +35,12 @@ void __init noreturn efi_multiboot2(EFI_HANDLE ImageHandle,
* not be directly supported by C compiler.
*/
asm volatile(
- " call *%[outstr] \n"
- "0: hlt \n"
- " jmp 0b \n"
- : "+c" (StdErr), "=d" (StdErr) ASM_CALL_CONSTRAINT
- : "1" (err), [outstr] "rm" (StdErr->OutputString)
- : "rax", "r8", "r9", "r10", "r11", "memory");
+ " call *%[outstr] \n"
+ "0: hlt \n"
+ " jmp 0b \n"
+ : "+c" (StdErr), "=d" (StdErr) ASM_CALL_CONSTRAINT
+ : "1" (err), [outstr] "rm" (StdErr->OutputString)
+ : "rax", "r8", "r9", "r10", "r11", "memory");
unreachable();
}
@@ -74,7 +74,7 @@ int efi_get_info(uint32_t idx, union xenpf_efi_info *info)
}
int efi_compat_get_info(uint32_t idx, union compat_pf_efi_info *)
- __attribute__((__alias__("efi_get_info")));
+__attribute__((__alias__("efi_get_info")));
int efi_runtime_call(struct xenpf_efi_runtime_call *op)
{
@@ -82,4 +82,4 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op)
}
int efi_compat_runtime_call(struct compat_pf_efi_runtime_call *)
- __attribute__((__alias__("efi_runtime_call")));
+__attribute__((__alias__("efi_runtime_call")));
diff --git a/xen/arch/x86/emul-i8254.c b/xen/arch/x86/emul-i8254.c
index 73be4188ad..767133eb3f 100644
--- a/xen/arch/x86/emul-i8254.c
+++ b/xen/arch/x86/emul-i8254.c
@@ -1,10 +1,10 @@
/*
* QEMU 8253/8254 interval timer emulation
- *
+ *
* Copyright (c) 2003-2004 Fabrice Bellard
* Copyright (c) 2006 Intel Corperation
* Copyright (c) 2007 Keir Fraser, XenSource Inc.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@@ -96,7 +96,7 @@ static int pit_get_out(PITState *pit, int channel)
ASSERT(spin_is_locked(&pit->lock));
- d = muldiv64(get_guest_time(v) - pit->count_load_time[channel],
+ d = muldiv64(get_guest_time(v) - pit->count_load_time[channel],
PIT_FREQ, SYSTEM_TIME_HZ);
switch ( s->mode )
@@ -190,7 +190,7 @@ static void pit_load_count(PITState *pit, int channel, int val)
case 3:
/* Periodic timer. */
TRACE_2D(TRC_HVM_EMUL_PIT_START_TIMER, period, period);
- create_periodic_time(v, &pit->pt0, period, period, 0, pit_time_fired,
+ create_periodic_time(v, &pit->pt0, period, period, 0, pit_time_fired,
&pit->count_load_time[channel], false);
break;
case 1:
@@ -271,9 +271,7 @@ static void pit_ioport_write(struct PITState *pit, uint32_t addr, uint32_t val)
s = &pit->hw.channels[channel];
access = (val >> 4) & 3;
if ( access == 0 )
- {
pit_latch_count(pit, channel);
- }
else
{
s->rw_mode = access;
@@ -318,7 +316,7 @@ static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr)
{
int ret, count;
struct hvm_hw_pit_channel *s;
-
+
addr &= 3;
s = &pit->hw.channels[addr];
@@ -401,7 +399,7 @@ static int pit_save(struct vcpu *v, hvm_domain_context_t *h)
return 0;
spin_lock(&pit->lock);
-
+
rc = hvm_save_entry(PIT, 0, h, &pit->hw);
spin_unlock(&pit->lock);
@@ -424,10 +422,10 @@ static int pit_load(struct domain *d, hvm_domain_context_t *h)
spin_unlock(&pit->lock);
return 1;
}
-
+
/*
- * Recreate platform timers from hardware state. There will be some
- * time jitter here, but the wall-clock will have jumped massively, so
+ * Recreate platform timers from hardware state. There will be some
+ * time jitter here, but the wall-clock will have jumped massively, so
* we hope the guest can handle it.
*/
pit->pt0.last_plt_gtime = get_guest_time(d->vcpu[0]);
@@ -503,7 +501,7 @@ void pit_deinit(struct domain *d)
}
}
-/* the intercept action for PIT DM retval:0--not handled; 1--handled */
+/* the intercept action for PIT DM retval:0--not handled; 1--handled */
static int handle_pit_io(
int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
@@ -517,9 +515,7 @@ static int handle_pit_io(
}
if ( dir == IOREQ_WRITE )
- {
pit_ioport_write(vpit, port, *val);
- }
else
{
if ( (port & 3) != 3 )
@@ -568,7 +564,8 @@ static int handle_speaker_io(
int pv_pit_handler(int port, int data, int write)
{
- ioreq_t ioreq = {
+ ioreq_t ioreq =
+ {
.size = 1,
.type = IOREQ_TYPE_PIO,
.addr = port,
diff --git a/xen/arch/x86/extable.c b/xen/arch/x86/extable.c
index 70972f1085..3b0ace3d6e 100644
--- a/xen/arch/x86/extable.c
+++ b/xen/arch/x86/extable.c
@@ -15,44 +15,44 @@
static inline unsigned long ex_addr(const struct exception_table_entry *x)
{
- return EX_FIELD(x, addr);
+ return EX_FIELD(x, addr);
}
static inline unsigned long ex_cont(const struct exception_table_entry *x)
{
- return EX_FIELD(x, cont);
+ return EX_FIELD(x, cont);
}
static int init_or_livepatch cmp_ex(const void *a, const void *b)
{
- const struct exception_table_entry *l = a, *r = b;
- unsigned long lip = ex_addr(l);
- unsigned long rip = ex_addr(r);
-
- /* avoid overflow */
- if (lip > rip)
- return 1;
- if (lip < rip)
- return -1;
- return 0;
+ const struct exception_table_entry *l = a, *r = b;
+ unsigned long lip = ex_addr(l);
+ unsigned long rip = ex_addr(r);
+
+ /* avoid overflow */
+ if (lip > rip)
+ return 1;
+ if (lip < rip)
+ return -1;
+ return 0;
}
#ifndef swap_ex
static void init_or_livepatch swap_ex(void *a, void *b, int size)
{
- struct exception_table_entry *l = a, *r = b, tmp;
- long delta = b - a;
-
- tmp = *l;
- l->addr = r->addr + delta;
- l->cont = r->cont + delta;
- r->addr = tmp.addr - delta;
- r->cont = tmp.cont - delta;
+ struct exception_table_entry *l = a, *r = b, tmp;
+ long delta = b - a;
+
+ tmp = *l;
+ l->addr = r->addr + delta;
+ l->cont = r->cont + delta;
+ r->addr = tmp.addr - delta;
+ r->cont = tmp.cont - delta;
}
#endif
void init_or_livepatch sort_exception_table(struct exception_table_entry *start,
- const struct exception_table_entry *stop)
+ const struct exception_table_entry *stop)
{
sort(start, stop - start,
sizeof(struct exception_table_entry), cmp_ex, swap_ex);
@@ -112,7 +112,8 @@ search_exception_table(const struct cpu_user_regs *regs)
* Put trap number and error code on the stack (in place of the
* original return address) for recovery code to pick up.
*/
- union stub_exception_token token = {
+ union stub_exception_token token =
+ {
.fields.ec = regs->error_code,
.fields.trapnr = regs->entry_vector,
};
@@ -128,21 +129,31 @@ search_exception_table(const struct cpu_user_regs *regs)
#ifndef NDEBUG
static int __init stub_selftest(void)
{
- static const struct {
+ static const struct
+ {
uint8_t opc[4];
uint64_t rax;
union stub_exception_token res;
- } tests[] __initconst = {
- { .opc = { 0x0f, 0xb9, 0xc3, 0xc3 }, /* ud1 */
- .res.fields.trapnr = TRAP_invalid_op },
- { .opc = { 0x90, 0x02, 0x00, 0xc3 }, /* nop; add (%rax),%al */
- .rax = 0x0123456789abcdef,
- .res.fields.trapnr = TRAP_gp_fault },
- { .opc = { 0x02, 0x04, 0x04, 0xc3 }, /* add (%rsp,%rax),%al */
- .rax = 0xfedcba9876543210,
- .res.fields.trapnr = TRAP_stack_error },
- { .opc = { 0xcc, 0xc3, 0xc3, 0xc3 }, /* int3 */
- .res.fields.trapnr = TRAP_int3 },
+ } tests[] __initconst =
+ {
+ {
+ .opc = { 0x0f, 0xb9, 0xc3, 0xc3 }, /* ud1 */
+ .res.fields.trapnr = TRAP_invalid_op
+ },
+ {
+ .opc = { 0x90, 0x02, 0x00, 0xc3 }, /* nop; add (%rax),%al */
+ .rax = 0x0123456789abcdef,
+ .res.fields.trapnr = TRAP_gp_fault
+ },
+ {
+ .opc = { 0x02, 0x04, 0x04, 0xc3 }, /* add (%rsp,%rax),%al */
+ .rax = 0xfedcba9876543210,
+ .res.fields.trapnr = TRAP_stack_error
+ },
+ {
+ .opc = { 0xcc, 0xc3, 0xc3, 0xc3 }, /* int3 */
+ .res.fields.trapnr = TRAP_int3
+ },
};
unsigned long addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2;
unsigned int i;
@@ -196,7 +207,7 @@ search_pre_exception_table(struct cpu_user_regs *regs)
{
unsigned long addr = regs->rip;
unsigned long fixup = search_one_extable(
- __start___pre_ex_table, __stop___pre_ex_table-1, addr);
+ __start___pre_ex_table, __stop___pre_ex_table-1, addr);
if ( fixup )
{
dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup));
diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c
index 4004129c49..a23f70c632 100644
--- a/xen/arch/x86/flushtlb.c
+++ b/xen/arch/x86/flushtlb.c
@@ -1,9 +1,9 @@
/******************************************************************************
* flushtlb.c
- *
+ *
* TLB flushes are timestamped using a global virtual 'clock' which ticks
* on any TLB flush on any processor.
- *
+ *
* Copyright (c) 2003-2006, K A Fraser
*/
@@ -29,7 +29,7 @@ DEFINE_PER_CPU(u32, tlbflush_time);
/*
* pre_flush(): Increment the virtual TLB-flush clock. Returns new clock value.
- *
+ *
* This must happen *before* we flush the TLB. If we do it after, we race other
* CPUs invalidating PTEs. For example, a page invalidated after the flush
* might get the old timestamp, but this CPU can speculatively fetch the
@@ -40,20 +40,20 @@ static u32 pre_flush(void)
u32 t, t1, t2;
t = tlbflush_clock;
- do {
+ do
+ {
t1 = t2 = t;
/* Clock wrapped: someone else is leading a global TLB shootdown. */
if ( unlikely(t1 == 0) )
goto skip_clocktick;
t2 = (t + 1) & WRAP_MASK;
- }
- while ( unlikely((t = cmpxchg(&tlbflush_clock, t1, t2)) != t1) );
+ } while ( unlikely((t = cmpxchg(&tlbflush_clock, t1, t2)) != t1) );
/* Clock wrapped: we will lead a global TLB shootdown. */
if ( unlikely(t2 == 0) )
raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
- skip_clocktick:
+skip_clocktick:
hvm_flush_guest_tlbs();
return t2;
@@ -61,14 +61,14 @@ static u32 pre_flush(void)
/*
* post_flush(): Update this CPU's timestamp with specified clock value.
- *
- * Note that this happens *after* flushing the TLB, as otherwise we can race a
- * NEED_FLUSH() test on another CPU. (e.g., other CPU sees the updated CPU
+ *
+ * Note that this happens *after* flushing the TLB, as otherwise we can race a
+ * NEED_FLUSH() test on another CPU. (e.g., other CPU sees the updated CPU
* stamp and so does not force a synchronous TLB flush, but the flush in this
- * function hasn't yet occurred and so the TLB might be stale). The ordering
- * would only actually matter if this function were interruptible, and
- * something that abuses the stale mapping could exist in an interrupt
- * handler. In fact neither of these is the case, so really we are being ultra
+ * function hasn't yet occurred and so the TLB might be stale). The ordering
+ * would only actually matter if this function were interruptible, and
+ * something that abuses the stale mapping could exist in an interrupt
+ * handler. In fact neither of these is the case, so really we are being ultra
* paranoid.
*/
static void post_flush(u32 t)
@@ -255,9 +255,7 @@ unsigned int flush_area_local(const void *va, unsigned int flags)
flags &= ~FLUSH_CACHE;
}
else
- {
wbinvd();
- }
}
if ( flags & FLUSH_ROOT_PGTBL )
diff --git a/xen/arch/x86/gdbstub.c b/xen/arch/x86/gdbstub.c
index ff9f7f9a69..4dc9be631f 100644
--- a/xen/arch/x86/gdbstub.c
+++ b/xen/arch/x86/gdbstub.c
@@ -1,7 +1,7 @@
/*
* x86-specific gdb stub routines
* based on x86 cdb(xen/arch/x86/cdb.c), but Extensively modified.
- *
+ *
* Copyright (C) 2006 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan. K.K.
*
@@ -9,12 +9,12 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>.
*/
@@ -36,7 +36,7 @@ gdb_arch_copy_from_user(void *dest, const void *src, unsigned len)
return __copy_from_user(dest, src, len);
}
-unsigned int
+unsigned int
gdb_arch_copy_to_user(void *dest, const void *src, unsigned len)
{
return __copy_to_user(dest, src, len);
@@ -60,7 +60,7 @@ gdb_arch_exit(struct cpu_user_regs *regs)
/* nothing */
}
-void
+void
gdb_arch_resume(struct cpu_user_regs *regs,
unsigned long addr, unsigned long type,
struct gdb_context *ctx)
diff --git a/xen/arch/x86/genapic/bigsmp.c b/xen/arch/x86/genapic/bigsmp.c
index 91a973ac16..9f3a5b3553 100644
--- a/xen/arch/x86/genapic/bigsmp.c
+++ b/xen/arch/x86/genapic/bigsmp.c
@@ -13,36 +13,40 @@
static __init int force_bigsmp(struct dmi_system_id *d)
{
- printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
- def_to_bigsmp = true;
- return 0;
+ printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
+ def_to_bigsmp = true;
+ return 0;
}
-static struct dmi_system_id __initdata bigsmp_dmi_table[] = {
- { force_bigsmp, "UNISYS ES7000-ONE", {
- DMI_MATCH(DMI_PRODUCT_NAME, "ES7000-ONE")
- }},
-
- { }
+static struct dmi_system_id __initdata bigsmp_dmi_table[] =
+{
+ {
+ force_bigsmp, "UNISYS ES7000-ONE", {
+ DMI_MATCH(DMI_PRODUCT_NAME, "ES7000-ONE")
+ }
+ },
+
+ { }
};
static __init int probe_bigsmp(void)
-{
- /*
- * We don't implement cluster mode, so force use of
- * physical mode in both cases.
- */
- if (acpi_gbl_FADT.flags &
- (ACPI_FADT_APIC_CLUSTER | ACPI_FADT_APIC_PHYSICAL))
- def_to_bigsmp = true;
- else if (!def_to_bigsmp)
- dmi_check_system(bigsmp_dmi_table);
- return def_to_bigsmp;
-}
+{
+ /*
+ * We don't implement cluster mode, so force use of
+ * physical mode in both cases.
+ */
+ if (acpi_gbl_FADT.flags &
+ (ACPI_FADT_APIC_CLUSTER | ACPI_FADT_APIC_PHYSICAL))
+ def_to_bigsmp = true;
+ else if (!def_to_bigsmp)
+ dmi_check_system(bigsmp_dmi_table);
+ return def_to_bigsmp;
+}
-const struct genapic __initconstrel apic_bigsmp = {
- APIC_INIT("bigsmp", probe_bigsmp),
- GENAPIC_PHYS
+const struct genapic __initconstrel apic_bigsmp =
+{
+ APIC_INIT("bigsmp", probe_bigsmp),
+ GENAPIC_PHYS
};
diff --git a/xen/arch/x86/genapic/default.c b/xen/arch/x86/genapic/default.c
index 53ebf20a3f..c1faae7b36 100644
--- a/xen/arch/x86/genapic/default.c
+++ b/xen/arch/x86/genapic/default.c
@@ -1,4 +1,4 @@
-/*
+/*
* Default generic APIC driver. This handles upto 8 CPUs.
*/
#include <xen/cpumask.h>
@@ -16,11 +16,12 @@
/* should be called last. */
static __init int probe_default(void)
-{
- return 1;
-}
+{
+ return 1;
+}
-const struct genapic __initconstrel apic_default = {
- APIC_INIT("default", probe_default),
- GENAPIC_FLAT
+const struct genapic __initconstrel apic_default =
+{
+ APIC_INIT("default", probe_default),
+ GENAPIC_FLAT
};
diff --git a/xen/arch/x86/genapic/delivery.c b/xen/arch/x86/genapic/delivery.c
index a86b8c9422..c99dc4c901 100644
--- a/xen/arch/x86/genapic/delivery.c
+++ b/xen/arch/x86/genapic/delivery.c
@@ -11,27 +11,27 @@
void init_apic_ldr_flat(void)
{
- unsigned long val;
+ unsigned long val;
- apic_write(APIC_DFR, APIC_DFR_FLAT);
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- val |= SET_xAPIC_LOGICAL_ID(1UL << smp_processor_id());
- apic_write(APIC_LDR, val);
+ apic_write(APIC_DFR, APIC_DFR_FLAT);
+ val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+ val |= SET_xAPIC_LOGICAL_ID(1UL << smp_processor_id());
+ apic_write(APIC_LDR, val);
}
void __init clustered_apic_check_flat(void)
{
- printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics);
+ printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics);
}
const cpumask_t *vector_allocation_cpumask_flat(int cpu)
{
- return &cpu_online_map;
-}
+ return &cpu_online_map;
+}
unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask)
{
- return cpumask_bits(cpumask)[0]&0xFF;
+ return cpumask_bits(cpumask)[0]&0xFF;
}
/*
@@ -40,25 +40,25 @@ unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask)
void init_apic_ldr_phys(void)
{
- unsigned long val;
- apic_write(APIC_DFR, APIC_DFR_FLAT);
- /* A dummy logical ID should be fine. We only deliver in phys mode. */
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- apic_write(APIC_LDR, val);
+ unsigned long val;
+ apic_write(APIC_DFR, APIC_DFR_FLAT);
+ /* A dummy logical ID should be fine. We only deliver in phys mode. */
+ val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+ apic_write(APIC_LDR, val);
}
void __init clustered_apic_check_phys(void)
{
- printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics);
+ printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics);
}
const cpumask_t *vector_allocation_cpumask_phys(int cpu)
{
- return cpumask_of(cpu);
+ return cpumask_of(cpu);
}
unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask)
{
- /* As we are using single CPU as destination, pick only one CPU here */
- return cpu_physical_id(cpumask_any(cpumask));
+ /* As we are using single CPU as destination, pick only one CPU here */
+ return cpu_physical_id(cpumask_any(cpumask));
}
diff --git a/xen/arch/x86/genapic/probe.c b/xen/arch/x86/genapic/probe.c
index 1fcc1734f5..c293e40cfd 100644
--- a/xen/arch/x86/genapic/probe.c
+++ b/xen/arch/x86/genapic/probe.c
@@ -1,8 +1,8 @@
-/* Copyright 2003 Andi Kleen, SuSE Labs.
- * Subject to the GNU Public License, v.2
- *
+/* Copyright 2003 Andi Kleen, SuSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
* Generic x86 APIC driver probe layer.
- */
+ */
#include <xen/cpumask.h>
#include <xen/string.h>
#include <xen/kernel.h>
@@ -17,99 +17,111 @@
struct genapic __read_mostly genapic;
-const struct genapic *const __initconstrel apic_probe[] = {
- &apic_bigsmp,
- &apic_default, /* must be last */
- NULL,
+const struct genapic *const __initconstrel apic_probe[] =
+{
+ &apic_bigsmp,
+ &apic_default, /* must be last */
+ NULL,
};
static bool_t __initdata cmdline_apic;
void __init generic_bigsmp_probe(void)
{
- /*
- * This routine is used to switch to bigsmp mode when
- * - There is no apic= option specified by the user
- * - generic_apic_probe() has choosen apic_default as the sub_arch
- * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
- */
-
- if (!cmdline_apic && genapic.name == apic_default.name)
- if (apic_bigsmp.probe()) {
- genapic = apic_bigsmp;
- printk(KERN_INFO "Overriding APIC driver with %s\n",
- genapic.name);
- }
+ /*
+ * This routine is used to switch to bigsmp mode when
+ * - There is no apic= option specified by the user
+ * - generic_apic_probe() has choosen apic_default as the sub_arch
+ * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
+ */
+
+ if (!cmdline_apic && genapic.name == apic_default.name)
+ if (apic_bigsmp.probe())
+ {
+ genapic = apic_bigsmp;
+ printk(KERN_INFO "Overriding APIC driver with %s\n",
+ genapic.name);
+ }
}
static int __init genapic_apic_force(const char *str)
{
- int i, rc = -EINVAL;
+ int i, rc = -EINVAL;
- for (i = 0; apic_probe[i]; i++)
- if (!strcmp(apic_probe[i]->name, str)) {
- genapic = *apic_probe[i];
- rc = 0;
- }
+ for (i = 0; apic_probe[i]; i++)
+ if (!strcmp(apic_probe[i]->name, str))
+ {
+ genapic = *apic_probe[i];
+ rc = 0;
+ }
- return rc;
+ return rc;
}
custom_param("apic", genapic_apic_force);
-void __init generic_apic_probe(void)
-{
- bool changed;
- int i;
-
- record_boot_APIC_mode();
-
- check_x2apic_preenabled();
- cmdline_apic = changed = !!genapic.name;
-
- for (i = 0; !changed && apic_probe[i]; i++) {
- if (apic_probe[i]->probe()) {
- changed = 1;
- genapic = *apic_probe[i];
- }
- }
- if (!changed)
- genapic = apic_default;
-
- printk(KERN_INFO "Using APIC driver %s\n", genapic.name);
-}
+void __init generic_apic_probe(void)
+{
+ bool changed;
+ int i;
+
+ record_boot_APIC_mode();
+
+ check_x2apic_preenabled();
+ cmdline_apic = changed = !!genapic.name;
+
+ for (i = 0; !changed && apic_probe[i]; i++)
+ {
+ if (apic_probe[i]->probe())
+ {
+ changed = 1;
+ genapic = *apic_probe[i];
+ }
+ }
+ if (!changed)
+ genapic = apic_default;
+
+ printk(KERN_INFO "Using APIC driver %s\n", genapic.name);
+}
/* These functions can switch the APIC even after the initial ->probe() */
-int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid)
-{
- int i;
- for (i = 0; apic_probe[i]; ++i) {
- if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) {
- if (!cmdline_apic &&
- genapic.name != apic_probe[i]->name) {
- genapic = *apic_probe[i];
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- genapic.name);
- }
- return 1;
- }
- }
- return 0;
-}
+int __init mps_oem_check(struct mp_config_table *mpc, char *oem,
+ char *productid)
+{
+ int i;
+ for (i = 0; apic_probe[i]; ++i)
+ {
+ if (apic_probe[i]->mps_oem_check(mpc, oem, productid))
+ {
+ if (!cmdline_apic &&
+ genapic.name != apic_probe[i]->name)
+ {
+ genapic = *apic_probe[i];
+ printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+ genapic.name);
+ }
+ return 1;
+ }
+ }
+ return 0;
+}
int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
- int i;
- for (i = 0; apic_probe[i]; ++i) {
- if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
- if (!cmdline_apic &&
- genapic.name != apic_probe[i]->name) {
- genapic = *apic_probe[i];
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- genapic.name);
- }
- return 1;
- }
- }
- return 0;
+ int i;
+ for (i = 0; apic_probe[i]; ++i)
+ {
+ if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id))
+ {
+ if (!cmdline_apic &&
+ genapic.name != apic_probe[i]->name)
+ {
+ genapic = *apic_probe[i];
+ printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+ genapic.name);
+ }
+ return 1;
+ }
+ }
+ return 0;
}
diff --git a/xen/arch/x86/genapic/x2apic.c b/xen/arch/x86/genapic/x2apic.c
index 74907e6002..391058fc1d 100644
--- a/xen/arch/x86/genapic/x2apic.c
+++ b/xen/arch/x86/genapic/x2apic.c
@@ -84,8 +84,8 @@ static unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask)
const cpumask_t *cluster_cpus = per_cpu(cluster_cpus, cpu);
for_each_cpu ( cpu, cluster_cpus )
- if ( cpumask_test_cpu(cpu, cpumask) )
- dest |= per_cpu(cpu_2_logical_apicid, cpu);
+ if ( cpumask_test_cpu(cpu, cpumask) )
+ dest |= per_cpu(cpu_2_logical_apicid, cpu);
return dest;
}
@@ -105,7 +105,7 @@ static void send_IPI_mask_x2apic_phys(const cpumask_t *cpumask, int vector)
* Ensure that any synchronisation data written in program order by this
* CPU is seen by notified remote CPUs. The WRMSR contained within
* apic_icr_write() can otherwise be executed early.
- *
+ *
* The reason smp_mb() is sufficient here is subtle: the register arguments
* to WRMSR must depend on a memory read executed after the barrier. This
* is guaranteed by cpu_physical_id(), which reads from a global array (and
@@ -163,7 +163,8 @@ static void send_IPI_mask_x2apic_cluster(const cpumask_t *cpumask, int vector)
local_irq_restore(flags);
}
-static const struct genapic __initconstrel apic_x2apic_phys = {
+static const struct genapic __initconstrel apic_x2apic_phys =
+{
APIC_INIT("x2apic_phys", NULL),
.int_delivery_mode = dest_Fixed,
.int_dest_mode = 0 /* physical delivery */,
@@ -175,7 +176,8 @@ static const struct genapic __initconstrel apic_x2apic_phys = {
.send_IPI_self = send_IPI_self_x2apic
};
-static const struct genapic __initconstrel apic_x2apic_cluster = {
+static const struct genapic __initconstrel apic_x2apic_cluster =
+{
APIC_INIT("x2apic_cluster", NULL),
.int_delivery_mode = dest_LowestPrio,
.int_dest_mode = 1 /* logical delivery */,
@@ -193,7 +195,8 @@ static int update_clusterinfo(
unsigned int cpu = (unsigned long)hcpu;
int err = 0;
- switch (action) {
+ switch (action)
+ {
case CPU_UP_PREPARE:
per_cpu(cpu_2_logical_apicid, cpu) = BAD_APICID;
if ( !cluster_cpus_spare )
@@ -220,11 +223,13 @@ static int update_clusterinfo(
return !err ? NOTIFY_DONE : notifier_from_errno(err);
}
-static struct notifier_block x2apic_cpu_nfb = {
- .notifier_call = update_clusterinfo
+static struct notifier_block x2apic_cpu_nfb =
+{
+ .notifier_call = update_clusterinfo
};
-static s8 __initdata x2apic_phys = -1; /* By default we use logical cluster mode. */
+static s8 __initdata x2apic_phys =
+ -1; /* By default we use logical cluster mode. */
boolean_param("x2apic_phys", x2apic_phys);
const struct genapic *__init apic_x2apic_probe(void)
diff --git a/xen/arch/x86/guest/pvh-boot.c b/xen/arch/x86/guest/pvh-boot.c
index ca8e156f7d..21055dc159 100644
--- a/xen/arch/x86/guest/pvh-boot.c
+++ b/xen/arch/x86/guest/pvh-boot.c
@@ -87,7 +87,8 @@ static void __init convert_pvh_info(multiboot_info_t **mbi,
static void __init get_memory_map(void)
{
- struct xen_memory_map memmap = {
+ struct xen_memory_map memmap =
+ {
.nr_entries = E820MAX,
};
diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c
index 7b7a5badab..317c289f48 100644
--- a/xen/arch/x86/guest/xen.c
+++ b/xen/arch/x86/guest/xen.c
@@ -90,7 +90,8 @@ void __init probe_hypervisor(void)
static void map_shared_info(void)
{
mfn_t mfn;
- struct xen_add_to_physmap xatp = {
+ struct xen_add_to_physmap xatp =
+ {
.domid = DOMID_SELF,
.space = XENMAPSPACE_shared_info,
};
@@ -239,7 +240,8 @@ static void init_evtchn(void)
/* Trick toolstack to think we are enlightened */
{
- struct xen_hvm_param a = {
+ struct xen_hvm_param a =
+ {
.domid = DOMID_SELF,
.index = HVM_PARAM_CALLBACK_IRQ,
.value = 1,
diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index 4b08488ef1..96160e5d26 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -163,7 +163,7 @@ static void evt_do_broadcast(cpumask_t *mask)
cpuidle_wakeup_mwait(mask);
if ( !cpumask_empty(mask) )
- cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
+ cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
}
static void handle_hpet_broadcast(struct hpet_event_channel *ch)
@@ -211,7 +211,7 @@ again:
}
static void hpet_interrupt_handler(int irq, void *data,
- struct cpu_user_regs *regs)
+ struct cpu_user_regs *regs)
{
struct hpet_event_channel *ch = (struct hpet_event_channel *)data;
@@ -219,7 +219,8 @@ static void hpet_interrupt_handler(int irq, void *data,
if ( !ch->event_handler )
{
- printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n", ch->idx);
+ printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n",
+ ch->idx);
return;
}
@@ -311,7 +312,8 @@ static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
/*
* IRQ Chip for MSI HPET Devices,
*/
-static hw_irq_controller hpet_msi_type = {
+static hw_irq_controller hpet_msi_type =
+{
.typename = "HPET-MSI",
.startup = hpet_msi_startup,
.shutdown = hpet_msi_shutdown,
@@ -440,7 +442,8 @@ static struct hpet_event_channel *hpet_get_channel(unsigned int cpu)
if ( num_hpets_used >= nr_cpu_ids )
return &hpet_events[cpu];
- do {
+ do
+ {
next = next_channel;
if ( (i = next + 1) == num_hpets_used )
i = 0;
diff --git a/xen/arch/x86/hvm/asid.c b/xen/arch/x86/hvm/asid.c
index 9d3c671a5f..4ded30eed1 100644
--- a/xen/arch/x86/hvm/asid.c
+++ b/xen/arch/x86/hvm/asid.c
@@ -49,11 +49,12 @@ boolean_param("asid", opt_asid_enabled);
*/
/* Per-CPU ASID management. */
-struct hvm_asid_data {
- uint64_t core_asid_generation;
- uint32_t next_asid;
- uint32_t max_asid;
- bool_t disabled;
+struct hvm_asid_data
+{
+ uint64_t core_asid_generation;
+ uint32_t next_asid;
+ uint32_t max_asid;
+ bool_t disabled;
};
static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data);
@@ -114,7 +115,7 @@ bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
{
struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
- /* On erratum #170 systems we must flush the TLB.
+ /* On erratum #170 systems we must flush the TLB.
* Generation overruns are taken here, too. */
if ( data->disabled )
goto disabled;
@@ -138,11 +139,11 @@ bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
/*
* When we assign ASID 1, flush all TLB entries as we are starting a new
- * generation, and all old ASID allocations are now stale.
+ * generation, and all old ASID allocations are now stale.
*/
return (asid->asid == 1);
- disabled:
+disabled:
asid->asid = 0;
return 0;
}
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d6d0e8be89..6f44633815 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -27,7 +27,8 @@
#include <xsm/xsm.h>
-struct dmop_args {
+struct dmop_args
+{
domid_t domid;
unsigned int nr_bufs;
/* Reserve enough buf elements for all current hypercalls. */
@@ -75,8 +76,8 @@ static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
return -EINVAL;
return shadow_mode_enabled(d) ?
- shadow_track_dirty_vram(d, first_pfn, nr, buf->h) :
- hap_track_dirty_vram(d, first_pfn, nr, buf->h);
+ shadow_track_dirty_vram(d, first_pfn, nr, buf->h) :
+ hap_track_dirty_vram(d, first_pfn, nr, buf->h);
}
static int set_pci_intx_level(struct domain *d, uint16_t domain,
@@ -237,7 +238,8 @@ static int set_mem_type(struct domain *d,
int rc = 0;
/* Interface types to internal p2m types */
- static const p2m_type_t memtype[] = {
+ static const p2m_type_t memtype[] =
+ {
[HVMMEM_ram_rw] = p2m_ram_rw,
[HVMMEM_ram_ro] = p2m_ram_ro,
[HVMMEM_mmio_dm] = p2m_mmio_dm,
@@ -343,7 +345,8 @@ static int dm_op(const struct dmop_args *op_args)
long rc;
size_t offset;
- static const uint8_t op_size[] = {
+ static const uint8_t op_size[] =
+ {
[XEN_DMOP_create_ioreq_server] = sizeof(struct xen_dm_op_create_ioreq_server),
[XEN_DMOP_get_ioreq_server_info] = sizeof(struct xen_dm_op_get_ioreq_server_info),
[XEN_DMOP_map_io_range_to_ioreq_server] = sizeof(struct xen_dm_op_ioreq_server_range),
@@ -408,7 +411,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_create_ioreq_server:
{
struct xen_dm_op_create_ioreq_server *data =
- &op.u.create_ioreq_server;
+ &op.u.create_ioreq_server;
const_op = false;
@@ -424,7 +427,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_get_ioreq_server_info:
{
struct xen_dm_op_get_ioreq_server_info *data =
- &op.u.get_ioreq_server_info;
+ &op.u.get_ioreq_server_info;
const uint16_t valid_flags = XEN_DMOP_no_gfns;
const_op = false;
@@ -445,7 +448,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_map_io_range_to_ioreq_server:
{
const struct xen_dm_op_ioreq_server_range *data =
- &op.u.map_io_range_to_ioreq_server;
+ &op.u.map_io_range_to_ioreq_server;
rc = -EINVAL;
if ( data->pad )
@@ -459,7 +462,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_unmap_io_range_from_ioreq_server:
{
const struct xen_dm_op_ioreq_server_range *data =
- &op.u.unmap_io_range_from_ioreq_server;
+ &op.u.unmap_io_range_from_ioreq_server;
rc = -EINVAL;
if ( data->pad )
@@ -473,7 +476,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_map_mem_type_to_ioreq_server:
{
struct xen_dm_op_map_mem_type_to_ioreq_server *data =
- &op.u.map_mem_type_to_ioreq_server;
+ &op.u.map_mem_type_to_ioreq_server;
unsigned long first_gfn = data->opaque;
const_op = false;
@@ -523,7 +526,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_set_ioreq_server_state:
{
const struct xen_dm_op_set_ioreq_server_state *data =
- &op.u.set_ioreq_server_state;
+ &op.u.set_ioreq_server_state;
rc = -EINVAL;
if ( data->pad )
@@ -536,7 +539,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_destroy_ioreq_server:
{
const struct xen_dm_op_destroy_ioreq_server *data =
- &op.u.destroy_ioreq_server;
+ &op.u.destroy_ioreq_server;
rc = -EINVAL;
if ( data->pad )
@@ -549,7 +552,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_track_dirty_vram:
{
const struct xen_dm_op_track_dirty_vram *data =
- &op.u.track_dirty_vram;
+ &op.u.track_dirty_vram;
rc = -EINVAL;
if ( data->pad )
@@ -565,7 +568,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_set_pci_intx_level:
{
const struct xen_dm_op_set_pci_intx_level *data =
- &op.u.set_pci_intx_level;
+ &op.u.set_pci_intx_level;
rc = set_pci_intx_level(d, data->domain, data->bus,
data->device, data->intx,
@@ -576,7 +579,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_set_isa_irq_level:
{
const struct xen_dm_op_set_isa_irq_level *data =
- &op.u.set_isa_irq_level;
+ &op.u.set_isa_irq_level;
rc = set_isa_irq_level(d, data->isa_irq, data->level);
break;
@@ -585,7 +588,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_set_pci_link_route:
{
const struct xen_dm_op_set_pci_link_route *data =
- &op.u.set_pci_link_route;
+ &op.u.set_pci_link_route;
rc = hvm_set_pci_link_route(d, data->link, data->isa_irq);
break;
@@ -594,7 +597,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_modified_memory:
{
struct xen_dm_op_modified_memory *data =
- &op.u.modified_memory;
+ &op.u.modified_memory;
rc = modified_memory(d, op_args, data);
const_op = !rc;
@@ -604,7 +607,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_set_mem_type:
{
struct xen_dm_op_set_mem_type *data =
- &op.u.set_mem_type;
+ &op.u.set_mem_type;
const_op = false;
@@ -619,7 +622,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_inject_event:
{
const struct xen_dm_op_inject_event *data =
- &op.u.inject_event;
+ &op.u.inject_event;
rc = -EINVAL;
if ( data->pad0 || data->pad1 )
@@ -632,7 +635,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_inject_msi:
{
const struct xen_dm_op_inject_msi *data =
- &op.u.inject_msi;
+ &op.u.inject_msi;
rc = -EINVAL;
if ( data->pad )
@@ -645,7 +648,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_remote_shutdown:
{
const struct xen_dm_op_remote_shutdown *data =
- &op.u.remote_shutdown;
+ &op.u.remote_shutdown;
domain_shutdown(d, data->reason);
rc = 0;
@@ -655,7 +658,8 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_relocate_memory:
{
struct xen_dm_op_relocate_memory *data = &op.u.relocate_memory;
- struct xen_add_to_physmap xatp = {
+ struct xen_add_to_physmap xatp =
+ {
.domid = op_args->domid,
.size = data->size,
.space = XENMAPSPACE_gmfn_range,
@@ -686,7 +690,7 @@ static int dm_op(const struct dmop_args *op_args)
case XEN_DMOP_pin_memory_cacheattr:
{
const struct xen_dm_op_pin_memory_cacheattr *data =
- &op.u.pin_memory_cacheattr;
+ &op.u.pin_memory_cacheattr;
if ( data->pad )
{
@@ -709,7 +713,7 @@ static int dm_op(const struct dmop_args *op_args)
(void *)&op.u, op_size[op.op]) )
rc = -EFAULT;
- out:
+out:
rcu_unlock_domain(d);
return rc;
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 8845399ae9..956b7b0a42 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -80,7 +80,7 @@ static int __init modify_identity_mmio(struct domain *d, unsigned long pfn,
for ( ; ; )
{
rc = map ? map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn))
- : unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
+ : unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
if ( rc == 0 )
break;
if ( rc < 0 )
@@ -103,10 +103,12 @@ static int __init pvh_populate_memory_range(struct domain *d,
unsigned long start,
unsigned long nr_pages)
{
- struct {
+ struct
+ {
unsigned long align;
unsigned int order;
- } static const __initconst orders[] = {
+ } static const __initconst orders[] =
+ {
/* NB: must be sorted by decreasing size. */
{ .align = PFN_DOWN(GB(1)), .order = PAGE_ORDER_1G },
{ .align = PFN_DOWN(MB(2)), .order = PAGE_ORDER_2M },
@@ -328,7 +330,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct domain *d)
put_page(mfn_to_page(mfn));
d->arch.hvm.params[HVM_PARAM_IDENT_PT] = gaddr;
if ( pvh_add_mem_range(d, gaddr, gaddr + PAGE_SIZE, E820_RESERVED) )
- printk("Unable to set identity page tables as reserved in the memory map\n");
+ printk("Unable to set identity page tables as reserved in the memory map\n");
return 0;
}
@@ -398,10 +400,8 @@ static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages)
cur_pages = nr_pages;
}
else
- {
cur_pages += pages;
- }
- next:
+next:
d->arch.nr_e820++;
entry_guest++;
ASSERT(d->arch.nr_e820 <= e820.nr_map + 1);
@@ -415,7 +415,8 @@ static void __init pvh_init_p2m(struct domain *d)
bool preempted;
pvh_setup_e820(d, nr_pages);
- do {
+ do
+ {
preempted = false;
paging_set_allocation(d, dom0_paging_pages(d, nr_pages),
&preempted);
@@ -461,10 +462,10 @@ static int __init pvh_populate_p2m(struct domain *d)
uint64_t end = min_t(uint64_t, MB(1),
d->arch.e820[i].addr + d->arch.e820[i].size);
enum hvm_translation_result res =
- hvm_copy_to_guest_phys(mfn_to_maddr(_mfn(addr)),
- mfn_to_virt(addr),
- d->arch.e820[i].addr - end,
- v);
+ hvm_copy_to_guest_phys(mfn_to_maddr(_mfn(addr)),
+ mfn_to_virt(addr),
+ d->arch.e820[i].addr - end,
+ v);
if ( res != HVMTRANS_okay )
printk("Failed to copy [%#lx, %#lx): %d\n",
@@ -620,7 +621,8 @@ static int __init pvh_setup_cpus(struct domain *d, paddr_t entry,
* This sets the vCPU state according to the state described in
* docs/misc/hvmlite.markdown.
*/
- vcpu_hvm_context_t cpu_ctx = {
+ vcpu_hvm_context_t cpu_ctx =
+ {
.mode = VCPU_HVM_MODE_32B,
.cpu_regs.x86_32.ebx = start_info,
.cpu_regs.x86_32.eip = entry,
@@ -668,7 +670,7 @@ static int __init pvh_setup_cpus(struct domain *d, paddr_t entry,
}
static int __init acpi_count_intr_ovr(struct acpi_subtable_header *header,
- const unsigned long end)
+ const unsigned long end)
{
acpi_intr_overrides++;
return 0;
@@ -819,7 +821,7 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr)
rc = 0;
- out:
+out:
xfree(madt);
return rc;
@@ -843,7 +845,8 @@ static bool __init pvh_acpi_table_allowed(const char *sig,
unsigned long address,
unsigned long size)
{
- static const char __initconst allowed_tables[][ACPI_NAME_SIZE] = {
+ static const char __initconst allowed_tables[][ACPI_NAME_SIZE] =
+ {
ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_FACS, ACPI_SIG_PSDT,
ACPI_SIG_SSDT, ACPI_SIG_SBST, ACPI_SIG_MCFG, ACPI_SIG_SLIC,
ACPI_SIG_MSDM, ACPI_SIG_WDAT, ACPI_SIG_FPDT, ACPI_SIG_S3PT,
@@ -901,7 +904,7 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d, paddr_t madt_addr,
acpi_dmar_reinstate();
/* Count the number of tables that will be added to the XSDT. */
- for( i = 0; i < acpi_gbl_root_table_list.count; i++ )
+ for ( i = 0; i < acpi_gbl_root_table_list.count; i++ )
{
if ( pvh_acpi_xsdt_table_allowed(tables[i].signature.ascii,
tables[i].address, tables[i].length) )
@@ -947,7 +950,7 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d, paddr_t madt_addr,
xsdt->table_offset_entry[0] = madt_addr;
/* Copy the addresses of the rest of the allowed tables. */
- for( i = 0, j = 1; i < acpi_gbl_root_table_list.count; i++ )
+ for ( i = 0, j = 1; i < acpi_gbl_root_table_list.count; i++ )
{
if ( pvh_acpi_xsdt_table_allowed(tables[i].signature.ascii,
tables[i].address, tables[i].length) )
@@ -983,7 +986,7 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d, paddr_t madt_addr,
rc = 0;
- out:
+out:
xfree(xsdt);
return rc;
@@ -995,7 +998,8 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info)
paddr_t madt_paddr, xsdt_paddr, rsdp_paddr;
unsigned int i;
int rc;
- struct acpi_table_rsdp *native_rsdp, rsdp = {
+ struct acpi_table_rsdp *native_rsdp, rsdp =
+ {
.signature = ACPI_SIG_RSDP,
.revision = 2,
.length = sizeof(rsdp),
@@ -1003,7 +1007,7 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info)
/* Scan top-level tables and add their regions to the guest memory map. */
- for( i = 0; i < acpi_gbl_root_table_list.count; i++ )
+ for ( i = 0; i < acpi_gbl_root_table_list.count; i++ )
{
const char *sig = acpi_gbl_root_table_list.tables[i].signature.ascii;
unsigned long addr = acpi_gbl_root_table_list.tables[i].address;
@@ -1017,7 +1021,7 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info)
if ( strncmp(sig, ACPI_SIG_MADT, ACPI_NAME_SIZE)
? pvh_acpi_table_allowed(sig, addr, size)
: !acpi_memory_banned(addr, size) )
- pvh_add_mem_range(d, addr, addr + size, E820_ACPI);
+ pvh_add_mem_range(d, addr, addr + size, E820_ACPI);
}
/* Identity map ACPI e820 regions. */
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 5d5a746a25..970555e43e 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -137,12 +137,12 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
return -EINVAL;
#define SEG(s, r) ({ \
- s = (struct segment_register) \
- { 0, { (r)->s ## _ar }, (r)->s ## _limit, (r)->s ## _base }; \
- /* Set accessed / busy bit for present segments. */ \
- if ( s.p ) \
- s.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2); \
- check_segment(&s, x86_seg_ ## s); })
+s = (struct segment_register) \
+ { 0, { (r)->s ## _ar }, (r)->s ## _limit, (r)->s ## _base }; \
+/* Set accessed / busy bit for present segments. */ \
+if ( s.p ) \
+ s.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2); \
+check_segment(&s, x86_seg_ ## s); })
rc = SEG(cs, regs);
rc |= SEG(ds, regs);
@@ -284,7 +284,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
if ( errstr )
{
gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n",
- v->arch.hvm.guest_efer, errstr);
+ v->arch.hvm.guest_efer, errstr);
return -EINVAL;
}
@@ -297,8 +297,8 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
struct page_info *page = get_page_from_gfn(v->domain,
- v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
- NULL, P2M_ALLOC);
+ v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
if ( !page )
{
gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 8659c89862..e2aa676353 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1,10 +1,10 @@
/******************************************************************************
* hvm/emulate.c
- *
+ *
* HVM instruction emulation. Used for MMIO and VMX real mode.
- *
+ *
* Copyright (c) 2008, Citrix Systems, Inc.
- *
+ *
* Authors:
* Keir Fraser <keir@xen.org>
*/
@@ -91,19 +91,21 @@ static int set_context_data(void *buffer, unsigned int size)
return X86EMUL_UNHANDLEABLE;
}
-static const struct hvm_io_ops null_ops = {
+static const struct hvm_io_ops null_ops =
+{
.read = null_read,
.write = null_write
};
-static const struct hvm_io_handler null_handler = {
+static const struct hvm_io_handler null_handler =
+{
.ops = &null_ops
};
static int ioreq_server_read(const struct hvm_io_handler *io_handler,
- uint64_t addr,
- uint32_t size,
- uint64_t *data)
+ uint64_t addr,
+ uint32_t size,
+ uint64_t *data)
{
if ( hvm_copy_from_guest_phys(data, addr, size) != HVMTRANS_okay )
return X86EMUL_UNHANDLEABLE;
@@ -111,12 +113,14 @@ static int ioreq_server_read(const struct hvm_io_handler *io_handler,
return X86EMUL_OKAY;
}
-static const struct hvm_io_ops ioreq_server_ops = {
+static const struct hvm_io_ops ioreq_server_ops =
+{
.read = ioreq_server_read,
.write = null_write
};
-static const struct hvm_io_handler ioreq_server_handler = {
+static const struct hvm_io_handler ioreq_server_handler =
+{
.ops = &ioreq_server_ops
};
@@ -127,7 +131,8 @@ static int hvmemul_do_io(
struct vcpu *curr = current;
struct domain *currd = curr->domain;
struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
- ioreq_t p = {
+ ioreq_t p =
+ {
.type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
.addr = addr,
.size = size,
@@ -310,7 +315,7 @@ static int hvmemul_do_io(
}
case X86EMUL_UNIMPLEMENTED:
ASSERT_UNREACHABLE();
- /* Fall-through */
+ /* Fall-through */
default:
BUG();
}
@@ -320,7 +325,7 @@ static int hvmemul_do_io(
if ( rc != X86EMUL_OKAY )
return rc;
- finish_access:
+finish_access:
if ( dir == IOREQ_READ )
{
hvmtrace_io_assist(&p);
@@ -367,7 +372,7 @@ static int hvmemul_acquire_page(unsigned long gmfn, struct page_info **page)
default:
ASSERT_UNREACHABLE();
- /* Fallthrough */
+ /* Fallthrough */
case -EINVAL:
return X86EMUL_UNHANDLEABLE;
@@ -443,7 +448,7 @@ static int hvmemul_do_io_addr(
*reps = count;
- out:
+out:
while ( nr_pages )
hvmemul_release_page(ram_page[--nr_pages]);
@@ -545,7 +550,7 @@ static void *hvmemul_map_linear_addr(
struct vcpu *curr = current;
void *err, *mapping;
unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
- (linear >> PAGE_SHIFT) + 1;
+ (linear >> PAGE_SHIFT) + 1;
unsigned int i;
/*
@@ -648,10 +653,10 @@ static void *hvmemul_map_linear_addr(
#endif
return mapping + (linear & ~PAGE_MASK);
- unhandleable:
+unhandleable:
err = ERR_PTR(~X86EMUL_UNHANDLEABLE);
- out:
+out:
/* Drop all held references. */
while ( mfn-- > hvmemul_ctxt->mfn )
put_page(mfn_to_page(*mfn));
@@ -665,7 +670,7 @@ static void hvmemul_unmap_linear_addr(
{
struct domain *currd = current->domain;
unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
- (linear >> PAGE_SHIFT) + 1;
+ (linear >> PAGE_SHIFT) + 1;
unsigned int i;
mfn_t *mfn = &hvmemul_ctxt->mfn[0];
@@ -734,7 +739,7 @@ static int hvmemul_linear_to_phys(
paddr_t _paddr;
unsigned long one_rep = 1;
int rc = hvmemul_linear_to_phys(
- addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
+ addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
if ( rc != X86EMUL_OKAY )
return rc;
pfn = _paddr >> PAGE_SHIFT;
@@ -782,7 +787,7 @@ static int hvmemul_linear_to_phys(
*paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
return X86EMUL_OKAY;
}
-
+
static int hvmemul_virtual_to_linear(
enum x86_segment seg,
@@ -811,7 +816,7 @@ static int hvmemul_virtual_to_linear(
*/
if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) &&
current->arch.vm_event->emulate_flags != 0 )
- max_reps = 1;
+ max_reps = 1;
/*
* Clip repetitions to avoid overflow when multiplying by @bytes_per_rep.
@@ -832,9 +837,9 @@ static int hvmemul_virtual_to_linear(
*/
ASSERT(offset >= ((*reps - 1) * bytes_per_rep));
okay = hvm_virtual_to_linear_addr(
- seg, reg, offset - (*reps - 1) * bytes_per_rep,
- *reps * bytes_per_rep, access_type,
- hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
+ seg, reg, offset - (*reps - 1) * bytes_per_rep,
+ *reps * bytes_per_rep, access_type,
+ hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
*linear += (*reps - 1) * bytes_per_rep;
if ( hvmemul_ctxt->ctxt.addr_size != 64 )
*linear = (uint32_t)*linear;
@@ -842,8 +847,8 @@ static int hvmemul_virtual_to_linear(
else
{
okay = hvm_virtual_to_linear_addr(
- seg, reg, offset, *reps * bytes_per_rep, access_type,
- hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
+ seg, reg, offset, *reps * bytes_per_rep, access_type,
+ hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
}
if ( okay )
@@ -970,7 +975,7 @@ static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
return NULL;
i = vio->mmio_cache_count;
- if( i == ARRAY_SIZE(vio->mmio_cache) )
+ if ( i == ARRAY_SIZE(vio->mmio_cache) )
return NULL;
++vio->mmio_cache_count;
@@ -992,9 +997,12 @@ static void latch_linear_to_phys(struct hvm_vcpu_io *vio, unsigned long gla,
vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = PFN_DOWN(gpa);
- vio->mmio_access = (struct npfec){ .gla_valid = 1,
- .read_access = 1,
- .write_access = write };
+ vio->mmio_access = (struct npfec)
+ {
+ .gla_valid = 1,
+ .read_access = 1,
+ .write_access = write
+ };
}
static int hvmemul_linear_mmio_access(
@@ -1082,7 +1090,7 @@ static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t pfec)
return false;
}
else if ( !vio->mmio_access.read_access )
- return false;
+ return false;
return (vio->mmio_gla == (addr & PAGE_MASK) &&
(addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
@@ -1213,7 +1221,7 @@ static int __hvmemul_read(
pfec |= PFEC_insn_fetch;
rc = hvmemul_virtual_to_linear(
- seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
+ seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
@@ -1234,8 +1242,8 @@ static int hvmemul_read(
return set_context_data(p_data, bytes);
return __hvmemul_read(
- seg, offset, p_data, bytes, hvm_access_read,
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
+ seg, offset, p_data, bytes, hvm_access_read,
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
}
int hvmemul_insn_fetch(
@@ -1305,7 +1313,7 @@ static int hvmemul_write(
pfec |= PFEC_user_mode;
rc = hvmemul_virtual_to_linear(
- seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+ seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
@@ -1313,7 +1321,7 @@ static int hvmemul_write(
{
mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
if ( IS_ERR(mapping) )
- return ~PTR_ERR(mapping);
+ return ~PTR_ERR(mapping);
}
if ( !mapping )
@@ -1342,7 +1350,7 @@ static int hvmemul_rmw(
void *mapping = NULL;
rc = hvmemul_virtual_to_linear(
- seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+ seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
@@ -1402,13 +1410,13 @@ static int hvmemul_rep_ins_discard(
}
static int hvmemul_rep_movs_discard(
- enum x86_segment src_seg,
- unsigned long src_offset,
- enum x86_segment dst_seg,
- unsigned long dst_offset,
- unsigned int bytes_per_rep,
- unsigned long *reps,
- struct x86_emulate_ctxt *ctxt)
+ enum x86_segment src_seg,
+ unsigned long src_offset,
+ enum x86_segment dst_seg,
+ unsigned long dst_offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
{
return X86EMUL_OKAY;
}
@@ -1498,7 +1506,7 @@ static int hvmemul_cmpxchg(
void *mapping = NULL;
rc = hvmemul_virtual_to_linear(
- seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+ seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY )
return rc;
@@ -1525,7 +1533,10 @@ static int hvmemul_cmpxchg(
switch ( bytes )
{
- case 1: case 2: case 4: case 8:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
{
unsigned long old = 0, new = 0, cur;
@@ -1601,8 +1612,8 @@ static int hvmemul_rep_ins(
int rc;
rc = hvmemul_virtual_to_linear(
- dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,
- hvmemul_ctxt, &addr);
+ dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,
+ hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY )
return rc;
@@ -1610,7 +1621,7 @@ static int hvmemul_rep_ins(
pfec |= PFEC_user_mode;
rc = hvmemul_linear_to_phys(
- addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
+ addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
if ( rc != X86EMUL_OKAY )
return rc;
@@ -1682,8 +1693,8 @@ static int hvmemul_rep_outs(
return hvmemul_rep_outs_set_context(dst_port, bytes_per_rep, reps);
rc = hvmemul_virtual_to_linear(
- src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
- hvmemul_ctxt, &addr);
+ src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
+ hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY )
return rc;
@@ -1691,7 +1702,7 @@ static int hvmemul_rep_outs(
pfec |= PFEC_user_mode;
rc = hvmemul_linear_to_phys(
- addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
+ addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
if ( rc != X86EMUL_OKAY )
return rc;
@@ -1704,13 +1715,13 @@ static int hvmemul_rep_outs(
}
static int hvmemul_rep_movs(
- enum x86_segment src_seg,
- unsigned long src_offset,
- enum x86_segment dst_seg,
- unsigned long dst_offset,
- unsigned int bytes_per_rep,
- unsigned long *reps,
- struct x86_emulate_ctxt *ctxt)
+ enum x86_segment src_seg,
+ unsigned long src_offset,
+ enum x86_segment dst_seg,
+ unsigned long dst_offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
@@ -1723,14 +1734,14 @@ static int hvmemul_rep_movs(
char *buf;
rc = hvmemul_virtual_to_linear(
- src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
- hvmemul_ctxt, &saddr);
+ src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
+ hvmemul_ctxt, &saddr);
if ( rc != X86EMUL_OKAY )
return rc;
rc = hvmemul_virtual_to_linear(
- dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,
- hvmemul_ctxt, &daddr);
+ dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,
+ hvmemul_ctxt, &daddr);
if ( rc != X86EMUL_OKAY )
return rc;
@@ -1785,14 +1796,14 @@ static int hvmemul_rep_movs(
{
latch_linear_to_phys(vio, saddr, sgpa, 0);
return hvmemul_do_mmio_addr(
- sgpa, reps, bytes_per_rep, IOREQ_READ, df, dgpa);
+ sgpa, reps, bytes_per_rep, IOREQ_READ, df, dgpa);
}
if ( dp2mt == p2m_mmio_dm )
{
latch_linear_to_phys(vio, daddr, dgpa, 1);
return hvmemul_do_mmio_addr(
- dgpa, reps, bytes_per_rep, IOREQ_WRITE, df, sgpa);
+ dgpa, reps, bytes_per_rep, IOREQ_WRITE, df, sgpa);
}
/* RAM-to-RAM copy: emulate as equivalent of memmove(dgpa, sgpa, bytes). */
@@ -1935,10 +1946,10 @@ static int hvmemul_rep_stos(
: "a" (*(const uint##bits##_t *)p_data), \
"1" (buf), "2" (*reps) : "memory" ); \
break
- CASE(8, b);
- CASE(16, w);
- CASE(32, l);
- CASE(64, q);
+ CASE(8, b);
+ CASE(16, w);
+ CASE(32, l);
+ CASE(64, q);
#undef CASE
default:
@@ -1968,7 +1979,7 @@ static int hvmemul_rep_stos(
gdprintk(XENLOG_WARNING,
"Failed REP STOS: gpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n",
gpa, *reps, bytes_per_rep);
- /* fall through */
+ /* fall through */
case p2m_mmio_direct:
return X86EMUL_UNHANDLEABLE;
@@ -1989,7 +2000,7 @@ static int hvmemul_read_segment(
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
if ( IS_ERR(sreg) )
- return -PTR_ERR(sreg);
+ return -PTR_ERR(sreg);
*reg = *sreg;
@@ -2236,7 +2247,8 @@ static void hvmemul_put_fpu(
fpu_ctxt->x[FPU_WORD_SIZE_OFFSET] = 8;
break;
- case 4: case 2:
+ case 4:
+ case 2:
fpu_ctxt->fip.offs = aux->ip;
fpu_ctxt->fip.sel = aux->cs;
if ( dval )
@@ -2247,7 +2259,8 @@ static void hvmemul_put_fpu(
fpu_ctxt->x[FPU_WORD_SIZE_OFFSET] = mode;
break;
- case 0: case 1:
+ case 0:
+ case 1:
fpu_ctxt->fip.addr = aux->ip | (aux->cs << 4);
if ( dval )
fpu_ctxt->fdp.addr = aux->dp | (aux->ds << 4);
@@ -2296,7 +2309,7 @@ static int hvmemul_invlpg(
int rc;
rc = hvmemul_virtual_to_linear(
- seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr);
+ seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr);
if ( rc == X86EMUL_EXCEPTION )
{
@@ -2332,7 +2345,8 @@ static int hvmemul_vmfunc(
return rc;
}
-static const struct x86_emulate_ops hvm_emulate_ops = {
+static const struct x86_emulate_ops hvm_emulate_ops =
+{
.read = hvmemul_read,
.insn_fetch = hvmemul_insn_fetch,
.write = hvmemul_write,
@@ -2361,7 +2375,8 @@ static const struct x86_emulate_ops hvm_emulate_ops = {
.vmfunc = hvmemul_vmfunc,
};
-static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
+static const struct x86_emulate_ops hvm_emulate_ops_no_write =
+{
.read = hvmemul_read,
.insn_fetch = hvmemul_insn_fetch,
.write = hvmemul_write_discard,
@@ -2389,7 +2404,7 @@ static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
};
static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
- const struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
const struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
struct vcpu *curr = current;
@@ -2448,9 +2463,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
if ( hvmemul_ctxt->ctxt.retire.hlt &&
!hvm_local_events_need_delivery(curr) )
- {
hvm_hlt(regs->eflags);
- }
return rc;
}
@@ -2463,12 +2476,14 @@ int hvm_emulate_one(
int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
{
- static const struct x86_emulate_ops hvm_intercept_ops_mmcfg = {
+ static const struct x86_emulate_ops hvm_intercept_ops_mmcfg =
+ {
.read = x86emul_unhandleable_rw,
.insn_fetch = hvmemul_insn_fetch,
.write = mmcfg_intercept_write,
};
- static const struct x86_emulate_ops hvm_ro_emulate_ops_mmio = {
+ static const struct x86_emulate_ops hvm_ro_emulate_ops_mmio =
+ {
.read = x86emul_unhandleable_rw,
.insn_fetch = hvmemul_insn_fetch,
.write = mmio_ro_emulated_write,
@@ -2500,7 +2515,7 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
break;
case X86EMUL_EXCEPTION:
hvm_inject_event(&ctxt.ctxt.event);
- /* fallthrough */
+ /* fallthrough */
default:
hvm_emulate_writeback(&ctxt);
}
@@ -2509,7 +2524,7 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
}
void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
- unsigned int errcode)
+ unsigned int errcode)
{
struct hvm_emulate_ctxt ctx = {{ 0 }};
int rc;
@@ -2521,7 +2536,8 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
case EMUL_KIND_NOWRITE:
rc = _hvm_emulate_one(&ctx, &hvm_emulate_ops_no_write);
break;
- case EMUL_KIND_SET_CONTEXT_INSN: {
+ case EMUL_KIND_SET_CONTEXT_INSN:
+ {
struct vcpu *curr = current;
struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
@@ -2556,7 +2572,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
case X86EMUL_UNIMPLEMENTED:
if ( hvm_monitor_emul_unimplemented() )
return;
- /* fall-through */
+ /* fall-through */
case X86EMUL_UNHANDLEABLE:
hvm_dump_emulation_state(XENLOG_G_DEBUG, "Mem event", &ctx, rc);
hvm_inject_hw_exception(trapnr, errcode);
@@ -2678,12 +2694,18 @@ static const char *guest_x86_mode_to_str(int mode)
{
switch ( mode )
{
- case 0: return "Real";
- case 1: return "v86";
- case 2: return "16bit";
- case 4: return "32bit";
- case 8: return "64bit";
- default: return "Unknown";
+ case 0:
+ return "Real";
+ case 1:
+ return "v86";
+ case 2:
+ return "16bit";
+ case 4:
+ return "32bit";
+ case 8:
+ return "64bit";
+ default:
+ return "Unknown";
}
}
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 12f13f8c3c..a1afb04a53 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -200,7 +200,7 @@ static int hpet_read(
if ( length != 8 )
result = (val >> ((addr & 7) * 8)) & ((1ULL << (length * 8)) - 1);
- out:
+out:
*pval = result;
return X86EMUL_OKAY;
}
@@ -281,7 +281,7 @@ static void hpet_set_timer(HPETState *h, unsigned int tn,
diff = (timer_is_32bit(h, tn) &&
vhpet_domain(h)->creation_finished &&
(-diff > HPET_TINY_TIME_SPAN))
- ? (uint32_t)diff : 0;
+ ? (uint32_t)diff : 0;
destroy_periodic_time(&h->pt[tn]);
if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) )
@@ -377,8 +377,8 @@ static int hpet_write(
new_val = val;
if ( length != 8 )
new_val = hpet_fixup_reg(
- new_val << (addr & 7) * 8, old_val,
- ((1ULL << (length*8)) - 1) << ((addr & 7) * 8));
+ new_val << (addr & 7) * 8, old_val,
+ ((1ULL << (length*8)) - 1) << ((addr & 7) * 8));
switch ( addr & ~7 )
{
@@ -393,9 +393,9 @@ static int hpet_write(
for ( i = 0; i < HPET_TIMER_NUM; i++ )
{
h->hpet.comparator64[i] =
- h->hpet.timers[i].config & HPET_TN_32BIT ?
- (uint32_t)h->hpet.timers[i].cmp :
- h->hpet.timers[i].cmp;
+ h->hpet.timers[i].config & HPET_TN_32BIT ?
+ (uint32_t)h->hpet.timers[i].cmp :
+ h->hpet.timers[i].cmp;
if ( timer_enabled(h, i) )
set_start_timer(i);
}
@@ -471,7 +471,7 @@ static int hpet_write(
* the right mode. */
set_restart_timer(tn);
else if ( (new_val & HPET_TN_32BIT) &&
- !(old_val & HPET_TN_32BIT) )
+ !(old_val & HPET_TN_32BIT) )
/* switching from 64 bit to 32 bit mode could cause timer
* next fire time, or period, to change. */
set_restart_timer(tn);
@@ -560,7 +560,7 @@ static int hpet_write(
write_unlock(&h->lock);
- out:
+out:
return X86EMUL_OKAY;
}
@@ -570,7 +570,8 @@ static int hpet_range(struct vcpu *v, unsigned long addr)
(addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)) );
}
-static const struct hvm_mmio_ops hpet_mmio_ops = {
+static const struct hvm_mmio_ops hpet_mmio_ops =
+{
.check = hpet_range,
.read = hpet_read,
.write = hpet_write
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 029eea3b85..8145b91e28 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4,7 +4,7 @@
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, International Business Machines Corporation.
* Copyright (c) 2008, Citrix Systems, Inc.
- *
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -92,7 +92,7 @@ struct hvm_function_table hvm_funcs __read_mostly;
*/
#define HVM_IOBITMAP_SIZE (3 * PAGE_SIZE)
unsigned long __section(".bss.page_aligned") __aligned(PAGE_SIZE)
- hvm_io_bitmap[HVM_IOBITMAP_SIZE / BYTES_PER_LONG];
+hvm_io_bitmap[HVM_IOBITMAP_SIZE / BYTES_PER_LONG];
/* Xen command-line option to enable HAP */
static bool_t __initdata opt_hap_enabled = 1;
@@ -138,7 +138,8 @@ static int cpu_callback(
return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block cpu_nfb =
+{
.notifier_call = cpu_callback
};
@@ -273,7 +274,7 @@ void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable)
struct vcpu *v;
for_each_vcpu ( d, v )
- alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable);
+ alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable);
}
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
@@ -299,7 +300,7 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
break;
default:
HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid guest PAT: %"PRIx64"\n",
- guest_pat);
+ guest_pat);
return 0;
}
@@ -374,9 +375,9 @@ u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz)
"divq %[hkhz] "
: "=d" (dummy), "=a" (ratio)
: [frac] "c" (ratio_frac_bits),
- [gkhz] "a" ((u64) gtsc_khz),
- [zero] "d" (0ULL),
- [hkhz] "rm" ((u64) cpu_khz) );
+ [gkhz] "a" ((u64) gtsc_khz),
+ [zero] "d" (0ULL),
+ [hkhz] "rm" ((u64) cpu_khz) );
return ratio > max_ratio ? 0 : ratio;
}
@@ -393,7 +394,7 @@ u64 hvm_scale_tsc(const struct domain *d, u64 tsc)
asm ( "mulq %[ratio]; shrdq %[frac],%%rdx,%[tsc]"
: [tsc] "+a" (tsc), "=&d" (dummy)
: [frac] "c" (hvm_funcs.tsc_scaling.ratio_frac_bits),
- [ratio] "rm" (ratio) );
+ [ratio] "rm" (ratio) );
return tsc;
}
@@ -497,7 +498,7 @@ void hvm_migrate_pirqs(struct vcpu *v)
struct domain *d = v->domain;
if ( !iommu_enabled || !hvm_domain_irq(d)->dpci )
- return;
+ return;
spin_lock(&d->event_lock);
pt_pirq_iterate(d, migrate_pirq, v);
@@ -677,21 +678,21 @@ int hvm_domain_initialise(struct domain *d)
return 0;
- fail2:
+fail2:
rtc_deinit(d);
stdvga_deinit(d);
vioapic_deinit(d);
- fail1:
+fail1:
if ( is_hardware_domain(d) )
xfree(d->arch.hvm.io_bitmap);
xfree(d->arch.hvm.io_handler);
xfree(d->arch.hvm.params);
xfree(d->arch.hvm.pl_time);
xfree(d->arch.hvm.irq);
- fail0:
+fail0:
hvm_destroy_cacheattr_region_list(d);
destroy_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0);
- fail:
+fail:
viridian_domain_deinit(d);
return rc;
}
@@ -746,7 +747,8 @@ void hvm_domain_destroy(struct domain *d)
static int hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h)
{
- struct hvm_tsc_adjust ctxt = {
+ struct hvm_tsc_adjust ctxt =
+ {
.tsc_adjust = v->arch.hvm.msr_tsc_adjust,
};
@@ -779,7 +781,8 @@ HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
{
struct segment_register seg;
- struct hvm_hw_cpu ctxt = {
+ struct hvm_hw_cpu ctxt =
+ {
.tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm.sync_tsc),
.msr_tsc_aux = v->arch.msrs->tsc_aux,
.rax = v->arch.user_regs.rax,
@@ -944,7 +947,7 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore)
(p->basic.pae ? X86_CR4_PAE : 0) |
(mce ? X86_CR4_MCE : 0) |
(p->basic.pge ? X86_CR4_PGE : 0) |
- X86_CR4_PCE |
+ X86_CR4_PCE |
(p->basic.fxsr ? X86_CR4_OSFXSR : 0) |
(p->basic.sse ? X86_CR4_OSXMMEXCPT : 0) |
(p->feat.umip ? X86_CR4_UMIP : 0) |
@@ -1018,7 +1021,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
return -EINVAL;
}
- /* Older Xen versions used to save the segment arbytes directly
+ /* Older Xen versions used to save the segment arbytes directly
* from the VMCS on Intel hosts. Detect this and rearrange them
* into the struct segment_register format. */
#define UNFOLD_ARBYTES(_r) \
@@ -1228,7 +1231,7 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
return -ENODATA;
}
if ( desc->length < offsetof(struct hvm_hw_cpu_xsave, save_area) +
- XSTATE_AREA_MIN_SIZE )
+ XSTATE_AREA_MIN_SIZE )
{
printk(XENLOG_G_WARNING
"HVM%d.%d restore mismatch: xsave length %u < %zu\n",
@@ -1306,7 +1309,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
}
#define HVM_CPU_MSR_SIZE(cnt) offsetof(struct hvm_msr, msr[cnt])
-static const uint32_t msrs_to_send[] = {
+static const uint32_t msrs_to_send[] =
+{
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
MSR_IA32_BNDCFGS,
@@ -1325,7 +1329,7 @@ static int hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h)
int err;
err = _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
- HVM_CPU_MSR_SIZE(ARRAY_SIZE(msrs_to_send)));
+ HVM_CPU_MSR_SIZE(ARRAY_SIZE(msrs_to_send)));
if ( err )
return err;
ctxt = (struct hvm_msr *)&h->data[h->cur];
@@ -1471,7 +1475,7 @@ static int __init hvm_register_CPU_save_and_restore(void)
hvm_save_cpu_xsave_states,
hvm_load_cpu_xsave_states,
HVM_CPU_XSAVE_SIZE(xfeature_mask) +
- sizeof(struct hvm_save_descriptor),
+ sizeof(struct hvm_save_descriptor),
HVMSR_PER_VCPU);
hvm_register_savevm(CPU_MSR_CODE,
@@ -1479,7 +1483,7 @@ static int __init hvm_register_CPU_save_and_restore(void)
hvm_save_cpu_msrs,
hvm_load_cpu_msrs,
HVM_CPU_MSR_SIZE(ARRAY_SIZE(msrs_to_send)) +
- sizeof(struct hvm_save_descriptor),
+ sizeof(struct hvm_save_descriptor),
HVMSR_PER_VCPU);
return 0;
@@ -1505,7 +1509,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
if ( rc != 0 ) /* teardown: vlapic_destroy */
goto fail2;
- if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
+ if ( (rc = hvm_funcs.vcpu_initialise(v)) !=
+ 0 ) /* teardown: hvm_funcs.vcpu_destroy */
goto fail3;
softirq_tasklet_init(
@@ -1522,7 +1527,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
vcpu_nestedhvm(v).nv_vvmcxaddr = INVALID_PADDR;
if ( nestedhvm_enabled(d)
- && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */
+ && (rc = nestedhvm_vcpu_initialise(v)) <
+ 0 ) /* teardown: nestedhvm_vcpu_destroy */
goto fail5;
rc = viridian_vcpu_init(v);
@@ -1538,24 +1544,24 @@ int hvm_vcpu_initialise(struct vcpu *v)
/* NB. All these really belong in hvm_domain_initialise(). */
pmtimer_init(v);
hpet_init(d);
-
+
/* Init guest TSC to start from zero. */
hvm_set_guest_tsc(v, 0);
}
return 0;
- fail6:
+fail6:
nestedhvm_vcpu_destroy(v);
- fail5:
+fail5:
free_compat_arg_xlat(v);
- fail4:
+fail4:
hvm_funcs.vcpu_destroy(v);
- fail3:
+fail3:
vlapic_destroy(v);
- fail2:
+fail2:
hvm_vcpu_cacheattr_destroy(v);
- fail1:
+fail1:
viridian_vcpu_deinit(v);
return rc;
}
@@ -1593,8 +1599,8 @@ void hvm_vcpu_down(struct vcpu *v)
/* Any other VCPUs online? ... */
domain_lock(d);
for_each_vcpu ( d, v )
- if ( !(v->pause_flags & VPF_down) )
- online_count++;
+ if ( !(v->pause_flags & VPF_down) )
+ online_count++;
domain_unlock(d);
/* ... Shut down the domain if not. */
@@ -1699,8 +1705,8 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
* If this fails, inject a nested page fault into the guest.
*/
if ( nestedhvm_enabled(currd)
- && nestedhvm_vcpu_in_guestmode(curr)
- && nestedhvm_paging_mode_hap(curr) )
+ && nestedhvm_vcpu_in_guestmode(curr)
+ && nestedhvm_paging_mode_hap(curr) )
{
int rv;
@@ -1712,11 +1718,12 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
* the same as for shadow paging.
*/
- rv = nestedhvm_hap_nested_page_fault(curr, &gpa,
- npfec.read_access,
- npfec.write_access,
- npfec.insn_fetch);
- switch (rv) {
+ rv = nestedhvm_hap_nested_page_fault(curr, &gpa,
+ npfec.read_access,
+ npfec.write_access,
+ npfec.insn_fetch);
+ switch (rv)
+ {
case NESTEDHVM_PAGEFAULT_DONE:
case NESTEDHVM_PAGEFAULT_RETRY:
return 1;
@@ -1849,7 +1856,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
* If this GFN is emulated MMIO or marked as read-only, pass the fault
* to the mmio handler.
*/
- if ( (p2mt == p2m_mmio_dm) ||
+ if ( (p2mt == p2m_mmio_dm) ||
(npfec.write_access &&
(p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
{
@@ -1867,12 +1874,12 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
if ( npfec.write_access && (p2mt == p2m_ram_shared) )
{
ASSERT(p2m_is_hostp2m(p2m));
- sharing_enomem =
+ sharing_enomem =
(mem_sharing_unshare_page(currd, gfn, 0) < 0);
rc = 1;
goto out_put_gfn;
}
-
+
/* Spurious fault? PoD and log-dirty also take this path. */
if ( p2m_is_ram(p2mt) )
{
@@ -1913,12 +1920,12 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
* Otherwise, this is an error condition. */
rc = fall_through;
- out_put_gfn:
+out_put_gfn:
if ( p2m != hostp2m )
__put_gfn(p2m, gfn);
__put_gfn(hostp2m, gfn);
- out:
- /* All of these are delayed until we exit, since we might
+out:
+ /* All of these are delayed until we exit, since we might
* sleep on event ring wait queues, and we must not hold
* locks in such circumstance */
if ( paged )
@@ -2009,8 +2016,8 @@ int hvm_set_efer(uint64_t value)
}
if ( nestedhvm_enabled(v->domain) && cpu_has_svm &&
- ((value & EFER_SVME) == 0 ) &&
- ((value ^ v->arch.hvm.guest_efer) & EFER_SVME) )
+ ((value & EFER_SVME) == 0 ) &&
+ ((value ^ v->arch.hvm.guest_efer) & EFER_SVME) )
{
/* Cleared EFER.SVME: Flush all nestedp2m tables */
p2m_flush_nestedp2m(v->domain);
@@ -2086,7 +2093,7 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
return rc;
- exit_and_crash:
+exit_and_crash:
domain_crash(curr->domain);
return X86EMUL_UNHANDLEABLE;
}
@@ -2118,7 +2125,7 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
return X86EMUL_OKAY;
- exit_and_crash:
+exit_and_crash:
domain_crash(curr->domain);
return X86EMUL_UNHANDLEABLE;
}
@@ -2272,7 +2279,8 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
hvm_update_cr(v, 0, value);
- if ( (value ^ old_value) & X86_CR0_PG ) {
+ if ( (value ^ old_value) & X86_CR0_PG )
+ {
if ( !nestedhvm_vmswitch_in_progress(v) && nestedhvm_vcpu_in_guestmode(v) )
paging_update_nestedmode(v);
else
@@ -2330,7 +2338,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer)
paging_update_cr3(v, noflush);
return X86EMUL_OKAY;
- bad_cr3:
+bad_cr3:
gdprintk(XENLOG_ERR, "Invalid CR3\n");
domain_crash(v->domain);
return X86EMUL_UNHANDLEABLE;
@@ -2542,7 +2550,7 @@ bool_t hvm_virtual_to_linear_addr(
/* All checks ok. */
okay = 1;
- out:
+out:
/*
* Always return the correct linear address, even if a permission check
* failed. The permissions failure is not relevant to some callers.
@@ -2551,12 +2559,13 @@ bool_t hvm_virtual_to_linear_addr(
return okay;
}
-struct hvm_write_map {
+struct hvm_write_map
+{
struct list_head list;
struct page_info *page;
};
-/* On non-NULL return, we leave this function holding an additional
+/* On non-NULL return, we leave this function holding an additional
* ref on the underlying mfn, if any */
static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
bool_t *writable)
@@ -2636,13 +2645,13 @@ void hvm_unmap_guest_frame(void *p, bool_t permanent)
unmap_domain_page_global(p);
spin_lock(&d->arch.hvm.write_map.lock);
list_for_each_entry(track, &d->arch.hvm.write_map.list, list)
- if ( track->page == page )
- {
- paging_mark_dirty(d, mfn);
- list_del(&track->list);
- xfree(track);
- break;
- }
+ if ( track->page == page )
+ {
+ paging_mark_dirty(d, mfn);
+ list_del(&track->list);
+ xfree(track);
+ break;
+ }
spin_unlock(&d->arch.hvm.write_map.lock);
}
@@ -2655,7 +2664,7 @@ void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
spin_lock(&d->arch.hvm.write_map.lock);
list_for_each_entry(track, &d->arch.hvm.write_map.list, list)
- paging_mark_dirty(d, page_to_mfn(track->page));
+ paging_mark_dirty(d, page_to_mfn(track->page));
spin_unlock(&d->arch.hvm.write_map.lock);
}
@@ -2688,7 +2697,7 @@ static void *hvm_map_entry(unsigned long va, bool_t *writable)
return v + (va & ~PAGE_MASK);
- fail:
+fail:
domain_crash(current->domain);
return NULL;
}
@@ -2748,7 +2757,8 @@ static int task_switch_load_seg(
if ( pdesc == NULL )
goto fault;
- do {
+ do
+ {
desc = *pdesc;
/* LDT descriptor is a system segment. All others are code/data. */
@@ -2799,7 +2809,7 @@ static int task_switch_load_seg(
if ( !(desc.b & _SEGMENT_P) )
{
fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
- : TRAP_stack_error;
+ : TRAP_stack_error;
goto fault;
}
} while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
@@ -2809,7 +2819,7 @@ static int task_switch_load_seg(
/* Force the Accessed flag in our local copy. */
desc.b |= 0x100;
- skip_accessed_flag:
+skip_accessed_flag:
hvm_unmap_entry(pdesc);
segr.base = (((desc.b << 0) & 0xff000000u) |
@@ -2825,14 +2835,15 @@ static int task_switch_load_seg(
return 0;
- fault:
+fault:
hvm_unmap_entry(pdesc);
hvm_inject_hw_exception(fault_type, sel & 0xfffc);
return 1;
}
-struct tss32 {
+struct tss32
+{
uint16_t back_link, :16;
uint32_t esp0;
uint16_t ss0, :16;
@@ -2896,8 +2907,8 @@ void hvm_task_switch(
if ( ((tss_sel & 0xfff8) + 7) > gdt.limit )
{
hvm_inject_hw_exception((taskswitch_reason == TSW_iret) ?
- TRAP_invalid_tss : TRAP_gp_fault,
- tss_sel & 0xfff8);
+ TRAP_invalid_tss : TRAP_gp_fault,
+ tss_sel & 0xfff8);
goto out;
}
@@ -2942,7 +2953,7 @@ void hvm_task_switch(
}
rc = hvm_copy_from_guest_linear(
- &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+ &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMTRANS_okay )
@@ -2989,7 +3000,7 @@ void hvm_task_switch(
goto out;
rc = hvm_copy_from_guest_linear(
- &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+ &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
/*
@@ -3092,7 +3103,7 @@ void hvm_task_switch(
if ( (tss.trace & 1) && !exn_raised )
hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
- out:
+out:
hvm_unmap_entry(optss_desc);
hvm_unmap_entry(nptss_desc);
}
@@ -3247,9 +3258,7 @@ static enum hvm_translation_result __hvm_copy(
}
}
else
- {
memcpy(buf, p, count);
- }
unmap_domain_page(p);
@@ -3361,11 +3370,11 @@ static uint64_t _hvm_rdtsc_intercept(void)
case 2:
if ( unlikely(hvm_get_cpl(curr)) )
{
- case 1:
+ case 1:
currd->arch.vtsc_usercount++;
break;
}
- /* fall through */
+ /* fall through */
case 0:
currd->arch.vtsc_kerncount++;
break;
@@ -3449,14 +3458,14 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
goto gp_fault;
index = msr - MSR_MTRRfix16K_80000;
*msr_content = fixed_range_base[array_index_nospec(index + 1,
- ARRAY_SIZE(v->arch.hvm.mtrr.fixed_ranges))];
+ ARRAY_SIZE(v->arch.hvm.mtrr.fixed_ranges))];
break;
case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix4K_C0000;
*msr_content = fixed_range_base[array_index_nospec(index + 3,
- ARRAY_SIZE(v->arch.hvm.mtrr.fixed_ranges))];
+ ARRAY_SIZE(v->arch.hvm.mtrr.fixed_ranges))];
break;
case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT_MAX - 1):
if ( !d->arch.cpuid->basic.mtrr )
@@ -3466,19 +3475,19 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT) )
goto gp_fault;
*msr_content = var_range_base[array_index_nospec(index,
- 2 * MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap,
- MTRRcap_VCNT))];
+ 2 * MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap,
+ MTRRcap_VCNT))];
break;
case MSR_K8_ENABLE_C1E:
case MSR_AMD64_NB_CFG:
- /*
- * These AMD-only registers may be accessed if this HVM guest
- * has been migrated to an Intel host. This fixes a guest crash
- * in this case.
- */
- *msr_content = 0;
- break;
+ /*
+ * These AMD-only registers may be accessed if this HVM guest
+ * has been migrated to an Intel host. This fixes a guest crash
+ * in this case.
+ */
+ *msr_content = 0;
+ break;
default:
if ( (ret = vmce_rdmsr(msr, msr_content)) < 0 )
@@ -3491,12 +3500,12 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
break;
}
- out:
+out:
HVMTRACE_3D(MSR_READ, msr,
(uint32_t)*msr_content, (uint32_t)(*msr_content >> 32));
return ret;
- gp_fault:
+gp_fault:
ret = X86EMUL_EXCEPTION;
*msr_content = -1ull;
goto out;
@@ -3510,7 +3519,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
int ret;
HVMTRACE_3D(MSR_WRITE, msr,
- (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
+ (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
if ( may_defer && unlikely(monitored_msr(v->domain, msr)) )
{
@@ -3542,7 +3551,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
case MSR_EFER:
if ( hvm_set_efer(msr_content) )
- return X86EMUL_EXCEPTION;
+ return X86EMUL_EXCEPTION;
break;
case MSR_IA32_TSC:
@@ -3562,7 +3571,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
case MSR_IA32_CR_PAT:
if ( !hvm_set_guest_pat(v, msr_content) )
- goto gp_fault;
+ goto gp_fault;
break;
case MSR_MTRRcap:
@@ -3573,7 +3582,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
goto gp_fault;
if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm.mtrr,
msr_content) )
- goto gp_fault;
+ goto gp_fault;
break;
case MSR_MTRRfix64K_00000:
if ( !d->arch.cpuid->basic.mtrr )
@@ -3737,7 +3746,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
break;
case X86EMUL_EXCEPTION:
hvm_inject_event(&ctxt.ctxt.event);
- /* fall through */
+ /* fall through */
default:
hvm_emulate_writeback(&ctxt);
break;
@@ -3750,7 +3759,8 @@ enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
ASSERT(v == current);
- if ( nestedhvm_enabled(v->domain) ) {
+ if ( nestedhvm_enabled(v->domain) )
+ {
enum hvm_intblk intr;
intr = nhvm_interrupt_blocked(v);
@@ -3896,7 +3906,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
v->is_initialised = 1;
clear_bit(_VPF_down, &v->pause_flags);
- out:
+out:
domain_unlock(d);
}
@@ -3943,7 +3953,7 @@ static void hvm_s3_resume(struct domain *d)
struct vcpu *v;
for_each_vcpu( d, v )
- hvm_set_guest_tsc(v, 0);
+ hvm_set_guest_tsc(v, 0);
domain_unpause(d);
}
}
@@ -3962,14 +3972,14 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
/* Pause all other vcpus. */
for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_pause_nosync(v);
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
/* Now that all VCPUs are signalled to deschedule, we wait... */
for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- while ( !vcpu_runnable(v) && v->is_running )
- cpu_relax();
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
/* All other vcpus are paused, safe to unlock now. */
spin_unlock(&d->hypercall_deadlock_mutex);
@@ -3996,8 +4006,8 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
/* Done. */
for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_unpause(v);
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
return true;
}
@@ -4173,7 +4183,7 @@ static int hvmop_set_param(
domain_pause(d);
d->arch.hvm.params[a.index] = a.value;
for_each_vcpu ( d, v )
- paging_update_cr3(v, false);
+ paging_update_cr3(v, false);
domain_unpause(d);
domctl_lock_release();
@@ -4226,11 +4236,11 @@ static int hvmop_set_param(
if ( a.value &&
!d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
for_each_vcpu(d, v)
- if ( rc == 0 )
- rc = nestedhvm_vcpu_initialise(v);
+ if ( rc == 0 )
+ rc = nestedhvm_vcpu_initialise(v);
if ( !a.value || rc )
for_each_vcpu(d, v)
- nestedhvm_vcpu_destroy(v);
+ nestedhvm_vcpu_destroy(v);
break;
case HVM_PARAM_ALTP2M:
rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d);
@@ -4311,10 +4321,10 @@ static int hvmop_set_param(
* plus one padding byte).
*/
if ( (a.value >> 32) > sizeof(struct tss32) +
- (0x100 / 8) + (0x10000 / 8) + 1 )
+ (0x100 / 8) + (0x10000 / 8) + 1 )
a.value = (uint32_t)a.value |
((sizeof(struct tss32) + (0x100 / 8) +
- (0x10000 / 8) + 1) << 32);
+ (0x10000 / 8) + 1) << 32);
a.value |= VM86_TSS_UPDATED;
break;
@@ -4331,7 +4341,7 @@ static int hvmop_set_param(
HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64,
a.index, a.value);
- out:
+out:
rcu_unlock_domain(d);
return rc;
}
@@ -4431,7 +4441,7 @@ static int hvmop_get_param(
HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64,
a.index, a.value);
- out:
+out:
rcu_unlock_domain(d);
return rc;
}
@@ -4732,14 +4742,14 @@ static int do_altp2m_op(
rc = -EINVAL;
else
rc = p2m_change_altp2m_gfn(d, a.u.change_gfn.view,
- _gfn(a.u.change_gfn.old_gfn),
- _gfn(a.u.change_gfn.new_gfn));
+ _gfn(a.u.change_gfn.old_gfn),
+ _gfn(a.u.change_gfn.new_gfn));
break;
default:
ASSERT_UNREACHABLE();
}
- out:
+out:
rcu_unlock_domain(d);
return rc;
@@ -4892,7 +4902,7 @@ static int hvmop_get_mem_type(
goto out;
rc = 0;
- out:
+out:
rcu_unlock_domain(d);
return rc;
@@ -4913,17 +4923,17 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
{
case HVMOP_set_evtchn_upcall_vector:
rc = hvmop_set_evtchn_upcall_vector(
- guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
+ guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
break;
-
+
case HVMOP_set_param:
rc = hvmop_set_param(
- guest_handle_cast(arg, xen_hvm_param_t));
+ guest_handle_cast(arg, xen_hvm_param_t));
break;
case HVMOP_get_param:
rc = hvmop_get_param(
- guest_handle_cast(arg, xen_hvm_param_t));
+ guest_handle_cast(arg, xen_hvm_param_t));
break;
case HVMOP_flush_tlbs:
@@ -4932,7 +4942,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_get_mem_type:
rc = hvmop_get_mem_type(
- guest_handle_cast(arg, xen_hvm_get_mem_type_t));
+ guest_handle_cast(arg, xen_hvm_get_mem_type_t));
break;
case HVMOP_pagetable_dying:
@@ -4959,7 +4969,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
- case HVMOP_get_time: {
+ case HVMOP_get_time:
+ {
xen_hvm_get_time_t gxt;
gxt.now = NOW();
@@ -4968,7 +4979,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
- case HVMOP_xentrace: {
+ case HVMOP_xentrace:
+ {
xen_hvm_xentrace_t tr;
if ( copy_from_guest(&tr, arg, 1 ) )
@@ -5016,20 +5028,20 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
switch ( op )
{
- case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON:
- case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF:
- rc = -EOPNOTSUPP;
- if ( !cpu_has_monitor_trap_flag )
- break;
- rc = 0;
- vcpu_pause(v);
- v->arch.hvm.single_step =
- (op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON);
- vcpu_unpause(v); /* guest will latch new state */
- break;
- default:
- rc = -ENOSYS;
+ case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON:
+ case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF:
+ rc = -EOPNOTSUPP;
+ if ( !cpu_has_monitor_trap_flag )
break;
+ rc = 0;
+ vcpu_pause(v);
+ v->arch.hvm.single_step =
+ (op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON);
+ vcpu_unpause(v); /* guest will latch new state */
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
}
return rc;
@@ -5076,10 +5088,10 @@ void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
*/
reg->type |= 0x2;
- /*
- * %cs and %tr are unconditionally present. SVM ignores these present
- * bits and will happily run without them set.
- */
+ /*
+ * %cs and %tr are unconditionally present. SVM ignores these present
+ * bits and will happily run without them set.
+ */
case x86_seg_cs:
reg->p = 1;
break;
diff --git a/xen/arch/x86/hvm/hypercall.c b/xen/arch/x86/hvm/hypercall.c
index 33dd2d99d2..d47a54f653 100644
--- a/xen/arch/x86/hvm/hypercall.c
+++ b/xen/arch/x86/hvm/hypercall.c
@@ -83,7 +83,7 @@ static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
default:
if ( !is_hardware_domain(curr->domain) )
return -ENOSYS;
- /* fall through */
+ /* fall through */
case PHYSDEVOP_map_pirq:
case PHYSDEVOP_unmap_pirq:
case PHYSDEVOP_eoi:
@@ -119,7 +119,8 @@ static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
#define do_arch_1 paging_domctl_continuation
-static const hypercall_table_t hvm_hypercall_table[] = {
+static const hypercall_table_t hvm_hypercall_table[] =
+{
HVM_CALL(memory_op),
#ifdef CONFIG_GRANT_TABLE
HVM_CALL(grant_table_op),
@@ -164,17 +165,17 @@ int hvm_hypercall(struct cpu_user_regs *regs)
{
case 8:
eax = regs->rax;
- /* Fallthrough to permission check. */
+ /* Fallthrough to permission check. */
case 4:
case 2:
if ( currd->arch.monitor.guest_request_userspace_enabled &&
- eax == __HYPERVISOR_hvm_op &&
- (mode == 8 ? regs->rdi : regs->ebx) == HVMOP_guest_request_vm_event )
+ eax == __HYPERVISOR_hvm_op &&
+ (mode == 8 ? regs->rdi : regs->ebx) == HVMOP_guest_request_vm_event )
break;
if ( unlikely(hvm_get_cpl(curr)) )
{
- default:
+ default:
regs->rax = -EPERM;
return HVM_HCALL_completed;
}
@@ -220,12 +221,18 @@ int hvm_hypercall(struct cpu_user_regs *regs)
/* Deliberately corrupt parameter regs not used by this hypercall. */
switch ( hypercall_args_table[eax].native )
{
- case 0: rdi = 0xdeadbeefdeadf00dUL;
- case 1: rsi = 0xdeadbeefdeadf00dUL;
- case 2: rdx = 0xdeadbeefdeadf00dUL;
- case 3: r10 = 0xdeadbeefdeadf00dUL;
- case 4: r8 = 0xdeadbeefdeadf00dUL;
- case 5: r9 = 0xdeadbeefdeadf00dUL;
+ case 0:
+ rdi = 0xdeadbeefdeadf00dUL;
+ case 1:
+ rsi = 0xdeadbeefdeadf00dUL;
+ case 2:
+ rdx = 0xdeadbeefdeadf00dUL;
+ case 3:
+ r10 = 0xdeadbeefdeadf00dUL;
+ case 4:
+ r8 = 0xdeadbeefdeadf00dUL;
+ case 5:
+ r9 = 0xdeadbeefdeadf00dUL;
}
#endif
@@ -238,12 +245,18 @@ int hvm_hypercall(struct cpu_user_regs *regs)
/* Deliberately corrupt parameter regs used by this hypercall. */
switch ( hypercall_args_table[eax].native )
{
- case 6: regs->r9 = 0xdeadbeefdeadf00dUL;
- case 5: regs->r8 = 0xdeadbeefdeadf00dUL;
- case 4: regs->r10 = 0xdeadbeefdeadf00dUL;
- case 3: regs->rdx = 0xdeadbeefdeadf00dUL;
- case 2: regs->rsi = 0xdeadbeefdeadf00dUL;
- case 1: regs->rdi = 0xdeadbeefdeadf00dUL;
+ case 6:
+ regs->r9 = 0xdeadbeefdeadf00dUL;
+ case 5:
+ regs->r8 = 0xdeadbeefdeadf00dUL;
+ case 4:
+ regs->r10 = 0xdeadbeefdeadf00dUL;
+ case 3:
+ regs->rdx = 0xdeadbeefdeadf00dUL;
+ case 2:
+ regs->rsi = 0xdeadbeefdeadf00dUL;
+ case 1:
+ regs->rdi = 0xdeadbeefdeadf00dUL;
}
}
#endif
@@ -264,12 +277,18 @@ int hvm_hypercall(struct cpu_user_regs *regs)
/* Deliberately corrupt parameter regs not used by this hypercall. */
switch ( hypercall_args_table[eax].compat )
{
- case 0: ebx = 0xdeadf00d;
- case 1: ecx = 0xdeadf00d;
- case 2: edx = 0xdeadf00d;
- case 3: esi = 0xdeadf00d;
- case 4: edi = 0xdeadf00d;
- case 5: ebp = 0xdeadf00d;
+ case 0:
+ ebx = 0xdeadf00d;
+ case 1:
+ ecx = 0xdeadf00d;
+ case 2:
+ edx = 0xdeadf00d;
+ case 3:
+ esi = 0xdeadf00d;
+ case 4:
+ edi = 0xdeadf00d;
+ case 5:
+ ebp = 0xdeadf00d;
}
#endif
@@ -284,12 +303,18 @@ int hvm_hypercall(struct cpu_user_regs *regs)
/* Deliberately corrupt parameter regs used by this hypercall. */
switch ( hypercall_args_table[eax].compat )
{
- case 6: regs->rbp = 0xdeadf00d;
- case 5: regs->rdi = 0xdeadf00d;
- case 4: regs->rsi = 0xdeadf00d;
- case 3: regs->rdx = 0xdeadf00d;
- case 2: regs->rcx = 0xdeadf00d;
- case 1: regs->rbx = 0xdeadf00d;
+ case 6:
+ regs->rbp = 0xdeadf00d;
+ case 5:
+ regs->rdi = 0xdeadf00d;
+ case 4:
+ regs->rsi = 0xdeadf00d;
+ case 3:
+ regs->rdx = 0xdeadf00d;
+ case 2:
+ regs->rcx = 0xdeadf00d;
+ case 1:
+ regs->rbx = 0xdeadf00d;
}
}
#endif
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index aac22c595d..3e6ff3490e 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -65,7 +65,8 @@ static int hvm_mmio_write(const struct hvm_io_handler *handler,
return handler->mmio.ops->write(current, addr, size, data);
}
-static const struct hvm_io_ops mmio_ops = {
+static const struct hvm_io_ops mmio_ops =
+{
.accept = hvm_mmio_accept,
.read = hvm_mmio_read,
.write = hvm_mmio_write
@@ -106,7 +107,8 @@ static int hvm_portio_write(const struct hvm_io_handler *handler,
return handler->portio.action(IOREQ_WRITE, addr, size, &val);
}
-static const struct hvm_io_ops portio_ops = {
+static const struct hvm_io_ops portio_ops =
+{
.accept = hvm_portio_accept,
.read = hvm_portio_read,
.write = hvm_portio_write
@@ -146,7 +148,7 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
ASSERT_UNREACHABLE();
- /* fall through */
+ /* fall through */
default:
domain_crash(current->domain);
return X86EMUL_UNHANDLEABLE;
@@ -175,7 +177,7 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
ASSERT_UNREACHABLE();
- /* fall through */
+ /* fall through */
default:
domain_crash(current->domain);
return X86EMUL_UNHANDLEABLE;
@@ -222,7 +224,7 @@ static const struct hvm_io_handler *hvm_find_io_handler(const ioreq_t *p)
for ( i = 0; i < curr_d->arch.hvm.io_handler_count; i++ )
{
const struct hvm_io_handler *handler =
- &curr_d->arch.hvm.io_handler[i];
+ &curr_d->arch.hvm.io_handler[i];
const struct hvm_io_ops *ops = handler->ops;
if ( handler->type != p->type )
@@ -306,7 +308,7 @@ void relocate_portio_handler(struct domain *d, unsigned int old_port,
for ( i = 0; i < d->arch.hvm.io_handler_count; i++ )
{
struct hvm_io_handler *handler =
- &d->arch.hvm.io_handler[i];
+ &d->arch.hvm.io_handler[i];
if ( handler->type != IOREQ_TYPE_PIO )
continue;
@@ -324,7 +326,8 @@ bool_t hvm_mmio_internal(paddr_t gpa)
{
const struct hvm_io_handler *handler;
const struct hvm_io_ops *ops;
- ioreq_t p = {
+ ioreq_t p =
+ {
.type = IOREQ_TYPE_COPY,
.addr = gpa,
.count = 1,
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index a5b0a23f06..2b667614e6 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -48,7 +48,8 @@
void send_timeoffset_req(unsigned long timeoff)
{
- ioreq_t p = {
+ ioreq_t p =
+ {
.type = IOREQ_TYPE_TIMEOFFSET,
.size = 8,
.count = 1,
@@ -67,7 +68,8 @@ void send_timeoffset_req(unsigned long timeoff)
/* Ask ioemu mapcache to invalidate mappings. */
void send_invalidate_req(void)
{
- ioreq_t p = {
+ ioreq_t p =
+ {
.type = IOREQ_TYPE_INVALIDATE,
.size = 4,
.dir = IOREQ_WRITE,
@@ -92,7 +94,7 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr)
if ( hvm_ioreq_needs_completion(&vio->io_req) )
vio->io_completion = HVMIO_mmio_completion;
else
- vio->mmio_access = (struct npfec){};
+ vio->mmio_access = (struct npfec) {};
switch ( rc )
{
@@ -122,7 +124,7 @@ bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
vio->mmio_access = access.gla_valid &&
access.kind == npfec_kind_with_gla
- ? access : (struct npfec){};
+ ? access : (struct npfec) {};
vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = gpfn;
return handle_mmio();
@@ -248,7 +250,8 @@ static int g2m_portio_write(const struct hvm_io_handler *handler,
return X86EMUL_OKAY;
}
-static const struct hvm_io_ops g2m_portio_ops = {
+static const struct hvm_io_ops g2m_portio_ops =
+{
.accept = g2m_portio_accept,
.read = g2m_portio_read,
.write = g2m_portio_write
@@ -362,7 +365,8 @@ static int vpci_portio_write(const struct hvm_io_handler *handler,
return X86EMUL_OKAY;
}
-static const struct hvm_io_ops vpci_portio_ops = {
+static const struct hvm_io_ops vpci_portio_ops =
+{
.accept = vpci_portio_accept,
.read = vpci_portio_read,
.write = vpci_portio_write,
@@ -383,7 +387,8 @@ void register_vpci_portio_handler(struct domain *d)
handler->ops = &vpci_portio_ops;
}
-struct hvm_mmcfg {
+struct hvm_mmcfg
+{
struct list_head next;
paddr_t addr;
unsigned int size;
@@ -398,8 +403,8 @@ static const struct hvm_mmcfg *vpci_mmcfg_find(const struct domain *d,
const struct hvm_mmcfg *mmcfg;
list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next )
- if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size )
- return mmcfg;
+ if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size )
+ return mmcfg;
return NULL;
}
@@ -506,7 +511,8 @@ static int vpci_mmcfg_write(struct vcpu *v, unsigned long addr,
return X86EMUL_OKAY;
}
-static const struct hvm_mmio_ops vpci_mmcfg_ops = {
+static const struct hvm_mmio_ops vpci_mmcfg_ops =
+{
.check = vpci_mmcfg_accept,
.read = vpci_mmcfg_read,
.write = vpci_mmcfg_write,
@@ -534,20 +540,20 @@ int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr,
write_lock(&d->arch.hvm.mmcfg_lock);
list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next )
- if ( new->addr < mmcfg->addr + mmcfg->size &&
- mmcfg->addr < new->addr + new->size )
- {
- int ret = -EEXIST;
-
- if ( new->addr == mmcfg->addr &&
- new->start_bus == mmcfg->start_bus &&
- new->segment == mmcfg->segment &&
- new->size == mmcfg->size )
- ret = 0;
- write_unlock(&d->arch.hvm.mmcfg_lock);
- xfree(new);
- return ret;
- }
+ if ( new->addr < mmcfg->addr + mmcfg->size &&
+ mmcfg->addr < new->addr + new->size )
+ {
+ int ret = -EEXIST;
+
+ if ( new->addr == mmcfg->addr &&
+ new->start_bus == mmcfg->start_bus &&
+ new->segment == mmcfg->segment &&
+ new->size == mmcfg->size )
+ ret = 0;
+ write_unlock(&d->arch.hvm.mmcfg_lock);
+ xfree(new);
+ return ret;
+ }
if ( list_empty(&d->arch.hvm.mmcfg_regions) )
register_mmio_handler(d, &vpci_mmcfg_ops);
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 7a80cfb28b..b494f8e79b 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -130,7 +130,7 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
smp_rmb();
- recheck:
+recheck:
if ( unlikely(state == STATE_IOREQ_NONE) )
{
/*
@@ -283,7 +283,7 @@ static bool hvm_free_legacy_ioreq_gfn(struct hvm_ioreq_server *s,
for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
{
if ( gfn_eq(gfn, _gfn(d->arch.hvm.params[i])) )
- break;
+ break;
}
if ( i > HVM_PARAM_BUFIOREQ_PFN )
return false;
@@ -397,7 +397,7 @@ static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
clear_page(iorp->va);
return 0;
- fail:
+fail:
if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
put_page_and_type(page);
@@ -541,14 +541,14 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
spin_unlock(&s->lock);
return 0;
- fail3:
+fail3:
free_xen_event_channel(v->domain, sv->ioreq_evtchn);
- fail2:
+fail2:
spin_unlock(&s->lock);
xfree(sv);
- fail1:
+fail1:
return rc;
}
@@ -688,7 +688,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
return 0;
- fail:
+fail:
hvm_ioreq_server_free_rangesets(s);
return rc;
@@ -711,9 +711,9 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
list_entry )
- hvm_update_ioreq_evtchn(s, sv);
+ hvm_update_ioreq_evtchn(s, sv);
- done:
+done:
spin_unlock(&s->lock);
}
@@ -729,7 +729,7 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
s->enabled = false;
- done:
+done:
spin_unlock(&s->lock);
}
@@ -768,7 +768,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
return 0;
- fail_add:
+fail_add:
hvm_ioreq_server_remove_all_vcpus(s);
hvm_ioreq_server_unmap_pages(s);
@@ -848,7 +848,7 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
return 0;
- fail:
+fail:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
domain_unpause(d);
@@ -892,7 +892,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
rc = 0;
- out:
+out:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
@@ -939,7 +939,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
rc = 0;
- out:
+out:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
@@ -990,7 +990,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
break;
}
- out:
+out:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
@@ -1042,7 +1042,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
rc = rangeset_add_range(r, start, end);
- out:
+out:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
@@ -1094,7 +1094,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
rc = rangeset_remove_range(r, start, end);
- out:
+out:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
@@ -1134,7 +1134,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
rc = p2m_set_ioreq_server(d, flags, s);
- out:
+out:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
if ( rc == 0 && flags == 0 )
@@ -1177,7 +1177,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
rc = 0;
- out:
+out:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
}
@@ -1201,7 +1201,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
return 0;
- fail:
+fail:
while ( id-- != 0 )
{
s = GET_IOREQ_SERVER(d, id);
@@ -1225,7 +1225,7 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
- hvm_ioreq_server_remove_vcpu(s, v);
+ hvm_ioreq_server_remove_vcpu(s, v);
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
}
@@ -1287,7 +1287,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
if ( CF8_ADDR_HI(cf8) &&
d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
(x86_fam = get_cpu_family(
- d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 &&
+ d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 &&
x86_fam < 0x17 )
{
uint64_t msr_val;
@@ -1300,7 +1300,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
else
{
type = (p->type == IOREQ_TYPE_PIO) ?
- XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
+ XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
addr = p->addr;
}
@@ -1357,7 +1357,8 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
buf_ioreq_t bp = { .data = p->data,
.addr = p->addr,
.type = p->type,
- .dir = p->dir };
+ .dir = p->dir
+ };
/* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
int qw = 0;
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index e03a87ad50..baa701017c 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -1,8 +1,8 @@
/******************************************************************************
* irq.c
- *
+ *
* Interrupt distribution and delivery logic.
- *
+ *
* Copyright (c) 2006, K A Fraser, XenSource Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -293,7 +293,7 @@ static void hvm_set_callback_irq_level(struct vcpu *v)
break;
}
- out:
+out:
spin_unlock(&d->arch.hvm.irq_lock);
}
@@ -367,7 +367,7 @@ int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
vpic_irq_positive_edge(d, isa_irq);
}
- out:
+out:
spin_unlock(&d->arch.hvm.irq_lock);
dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
@@ -382,9 +382,9 @@ int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
uint8_t dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
uint8_t dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK);
uint8_t delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK)
- >> MSI_DATA_DELIVERY_MODE_SHIFT;
+ >> MSI_DATA_DELIVERY_MODE_SHIFT;
uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK)
- >> MSI_DATA_TRIGGER_SHIFT;
+ >> MSI_DATA_TRIGGER_SHIFT;
uint8_t vector = data & MSI_DATA_VECTOR_MASK;
if ( !vector )
@@ -477,7 +477,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31;
pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3;
if ( hvm_irq->callback_via_asserted )
- __hvm_pci_intx_assert(d, pdev, pintx);
+ __hvm_pci_intx_assert(d, pdev, pintx);
break;
case HVMIRQ_callback_vector:
hvm_irq->callback_via.vector = (uint8_t)via;
@@ -489,8 +489,8 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
spin_unlock(&d->arch.hvm.irq_lock);
for_each_vcpu ( d, v )
- if ( is_vcpu_online(v) )
- hvm_assert_evtchn_irq(v);
+ if ( is_vcpu_online(v) )
+ hvm_assert_evtchn_irq(v);
#ifndef NDEBUG
printk(XENLOG_G_INFO "Dom%u callback via changed to ", d->domain_id);
@@ -585,12 +585,12 @@ int hvm_local_events_need_delivery(struct vcpu *v)
static void irq_dump(struct domain *d)
{
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
- int i;
+ int i;
printk("Domain %d:\n", d->domain_id);
printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
" ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1],
- (uint32_t) hvm_irq->isa_irq.pad[0],
+ (uint32_t) hvm_irq->isa_irq.pad[0],
hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 )
@@ -618,7 +618,7 @@ static void irq_dump(struct domain *d)
hvm_irq->pci_link_assert_count[2],
hvm_irq->pci_link_assert_count[3]);
printk("Callback via %i:%#"PRIx32",%s asserted\n",
- hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
+ hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
hvm_irq->callback_via_asserted ? "" : " not");
}
@@ -631,8 +631,8 @@ static void dump_irq_info(unsigned char key)
rcu_read_lock(&domlist_read_lock);
for_each_domain ( d )
- if ( is_hvm_domain(d) )
- irq_dump(d);
+ if ( is_hvm_domain(d) )
+ irq_dump(d);
rcu_read_unlock(&domlist_read_lock);
}
@@ -670,7 +670,7 @@ static int irq_save_pci(struct vcpu *v, hvm_domain_context_t *h)
rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx);
if ( asserted )
- __hvm_pci_intx_assert(d, pdev, pintx);
+ __hvm_pci_intx_assert(d, pdev, pintx);
spin_unlock(&d->arch.hvm.irq_lock);
@@ -707,7 +707,7 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
/* Clear the PCI link assert counts */
for ( link = 0; link < 4; link++ )
hvm_irq->pci_link_assert_count[link] = 0;
-
+
/* Clear the GSI link assert counts */
for ( gsi = 0; gsi < hvm_irq->nr_gsis; gsi++ )
hvm_irq->gsi_assert_count[gsi] = 0;
@@ -760,7 +760,7 @@ static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
for ( link = 0; link < 4; link++ )
if ( hvm_irq->pci_link.route[link] > 15 )
{
- gdprintk(XENLOG_ERR,
+ gdprintk(XENLOG_ERR,
"HVM restore: PCI-ISA link %u out of range (%u)\n",
link, hvm_irq->pci_link.route[link]);
return -EINVAL;
diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
index 2a41ccc930..73d0c2d60d 100644
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -46,7 +46,8 @@ bool hvm_monitor_cr(unsigned int index, unsigned long value, unsigned long old)
{
bool sync = ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask;
- vm_event_request_t req = {
+ vm_event_request_t req =
+ {
.reason = VM_EVENT_REASON_WRITE_CTRLREG,
.u.write_ctrlreg.index = index,
.u.write_ctrlreg.new_value = value,
@@ -68,13 +69,14 @@ bool hvm_monitor_emul_unimplemented(void)
* Send a vm_event to the monitor to signal that the current
* instruction couldn't be emulated.
*/
- vm_event_request_t req = {
+ vm_event_request_t req =
+ {
.reason = VM_EVENT_REASON_EMUL_UNIMPLEMENTED,
.vcpu_id = curr->vcpu_id,
};
return curr->domain->arch.monitor.emul_unimplemented_enabled &&
- monitor_traps(curr, true, &req) == 1;
+ monitor_traps(curr, true, &req) == 1;
}
void hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value)
@@ -83,9 +85,10 @@ void hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value)
if ( monitored_msr(curr->domain, msr) &&
(!monitored_msr_onchangeonly(curr->domain, msr) ||
- new_value != old_value) )
+ new_value != old_value) )
{
- vm_event_request_t req = {
+ vm_event_request_t req =
+ {
.reason = VM_EVENT_REASON_MOV_TO_MSR,
.u.mov_to_msr.msr = msr,
.u.mov_to_msr.new_value = new_value,
@@ -100,7 +103,8 @@ void hvm_monitor_descriptor_access(uint64_t exit_info,
uint64_t vmx_exit_qualification,
uint8_t descriptor, bool is_write)
{
- vm_event_request_t req = {
+ vm_event_request_t req =
+ {
.reason = VM_EVENT_REASON_DESCRIPTOR_ACCESS,
.u.desc_access.descriptor = descriptor,
.u.desc_access.is_write = is_write,
@@ -112,9 +116,7 @@ void hvm_monitor_descriptor_access(uint64_t exit_info,
req.u.desc_access.arch.vmx.exit_qualification = vmx_exit_qualification;
}
else
- {
req.u.desc_access.arch.svm.exitinfo = exit_info;
- }
monitor_traps(current, true, &req);
}
@@ -136,11 +138,11 @@ static inline unsigned long gfn_of_rip(unsigned long rip)
int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
unsigned long trap_type, unsigned long insn_length)
{
- /*
- * rc < 0 error in monitor/vm_event, crash
- * !rc continue normally
- * rc > 0 paused waiting for response, work here is done
- */
+ /*
+ * rc < 0 error in monitor/vm_event, crash
+ * !rc continue normally
+ * rc > 0 paused waiting for response, work here is done
+ */
struct vcpu *curr = current;
struct arch_domain *ad = &curr->domain->arch;
vm_event_request_t req = {};
@@ -204,7 +206,8 @@ int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
unsigned int err, uint64_t cr2)
{
- vm_event_request_t req = {
+ vm_event_request_t req =
+ {
.reason = VM_EVENT_REASON_INTERRUPT,
.u.interrupt.x86.vector = vector,
.u.interrupt.x86.type = type,
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 7ccd85bcea..ef71f9b5a6 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -30,14 +30,17 @@
#define pat_cr_2_paf(pat_cr,n) ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff)
/* PAT entry to PTE flags (PAT, PCD, PWT bits). */
-static const uint8_t pat_entry_2_pte_flags[8] = {
+static const uint8_t pat_entry_2_pte_flags[8] =
+{
0, _PAGE_PWT,
_PAGE_PCD, _PAGE_PCD | _PAGE_PWT,
_PAGE_PAT, _PAGE_PAT | _PAGE_PWT,
- _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
+ _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT
+};
/* Effective mm type lookup table, according to MTRR and PAT. */
-static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
+static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] =
+{
#define RS MEMORY_NUM_TYPES
#define UC MTRR_TYPE_UNCACHABLE
#define WB MTRR_TYPE_WRBACK
@@ -45,14 +48,14 @@ static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
#define WP MTRR_TYPE_WRPROT
#define WT MTRR_TYPE_WRTHROUGH
-/* PAT(UC, WC, RS, RS, WT, WP, WB, UC-) */
-/* MTRR(UC) */ {UC, WC, RS, RS, UC, UC, UC, UC},
-/* MTRR(WC) */ {UC, WC, RS, RS, UC, UC, WC, WC},
-/* MTRR(RS) */ {RS, RS, RS, RS, RS, RS, RS, RS},
-/* MTRR(RS) */ {RS, RS, RS, RS, RS, RS, RS, RS},
-/* MTRR(WT) */ {UC, WC, RS, RS, WT, WP, WT, UC},
-/* MTRR(WP) */ {UC, WC, RS, RS, WT, WP, WP, WC},
-/* MTRR(WB) */ {UC, WC, RS, RS, WT, WP, WB, UC}
+ /* PAT(UC, WC, RS, RS, WT, WP, WB, UC-) */
+ /* MTRR(UC) */ {UC, WC, RS, RS, UC, UC, UC, UC},
+ /* MTRR(WC) */ {UC, WC, RS, RS, UC, UC, WC, WC},
+ /* MTRR(RS) */ {RS, RS, RS, RS,
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment