Merge branch 'x86/urgent' into x86/cleanups

This commit is contained in:
Ingo Molnar 2008-11-18 15:41:36 +01:00
commit cbe9ee00ce
7 changed files with 45 additions and 23 deletions

View File

@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ
config X86_SMP
bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
select USE_GENERIC_SMP_HELPERS
default y
config USE_GENERIC_SMP_HELPERS
def_bool y
depends on SMP
config X86_32_SMP
def_bool y
depends on X86_32 && SMP

View File

@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
struct ds_context *context = *p_context;
if (!context) {
spin_unlock(&ds_lock);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
if (!context) {
spin_lock(&ds_lock);
return NULL;
}
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
if (!context->ds) {
kfree(context);
spin_lock(&ds_lock);
return NULL;
}
spin_lock(&ds_lock);
/*
* Check for race - another CPU could have allocated
* it meanwhile:
*/
if (*p_context) {
kfree(context->ds);
kfree(context);
return *p_context;
}
*p_context = context;
context->this = p_context;
@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
spin_lock(&ds_lock);
if (!check_tracer(task))
return -EPERM;
error = -ENOMEM;
context = ds_alloc_context(task);
if (!context)
goto out_unlock;
error = -EPERM;
if (!check_tracer(task))
goto out_unlock;
error = -EALREADY;
if (context->owner[qual] == current)
goto out_unlock;

View File

@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
{
struct acpi_table_header *header = NULL;
int i = 0;
acpi_size tbl_size;
while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
struct oem_table *t = (struct oem_table *)header;
oem_addrX = t->OEMTableAddr;
oem_size = t->OEMTableSize;
early_acpi_os_unmap_memory(header, tbl_size);
*oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
oem_size);
return 0;
}
early_acpi_os_unmap_memory(header, tbl_size);
}
return -1;
}
void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
{
if (!oem_addr)
return;
__acpi_unmap_table((char *)oem_addr, oem_size);
}
#endif

View File

@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
cycles_t start, now, prev, end;
int i;
rdtsc_barrier();
start = get_cycles();
rdtsc_barrier();
/*
* The measurement runs for 20 msecs:
*/
@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
*/
__raw_spin_lock(&sync_lock);
prev = last_tsc;
rdtsc_barrier();
now = get_cycles();
rdtsc_barrier();
last_tsc = now;
__raw_spin_unlock(&sync_lock);

View File

@ -7,6 +7,7 @@
* This file provides all the same external entries as smp.c but uses
* the voyager hal to provide the functionality
*/
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
x86_write_percpu(cpu_number, hard_smp_processor_id());
}
static void voyager_send_call_func(cpumask_t callmask)
{
__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
send_CPI(mask, VIC_CALL_FUNCTION_CPI);
}
static void voyager_send_call_func_single(int cpu)
{
send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
}
struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
.smp_prepare_cpus = voyager_smp_prepare_cpus,
@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
.smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule,
.send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_single_ipi = native_send_call_func_single_ipi,
.send_call_func_ipi = voyager_send_call_func,
.send_call_func_single_ipi = voyager_send_call_func_single,
};

View File

@ -122,14 +122,7 @@ static struct timer_list balloon_timer;
static void scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
if (PageHighMem(page)) {
void *v = kmap(page);
clear_page(v);
kunmap(v);
} else {
void *v = page_address(page);
clear_page(v);
}
clear_highpage(page);
#endif
}