#define PERCPU_SIZE_SHIFT 13
#define PERCPU_STACK_END PAGE_SIZE
#define PERCPU_LINUX_SP PERCPU_STACK_END
-#define PERCPU_CPU_ID (PERCPU_LINUX_SP + 4)
#ifndef __ASSEMBLY__
#include <asm/cell.h>
struct per_cpu {
- /* Keep these three in sync with defines above! */
+ /* Keep these two in sync with defines above! */
u8 stack[PAGE_SIZE];
unsigned long linux_sp;
- unsigned int cpu_id;
+ unsigned int cpu_id;
// u32 apic_id;
struct cell *cell;
CHECK_ASSUMPTION(sizeof(cpu_data.stack) == PERCPU_STACK_END);
CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, linux_sp) ==
PERCPU_LINUX_SP);
- CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, cpu_id) ==
- PERCPU_CPU_ID);
}
#endif /* !__ASSEMBLY__ */
push %r14
push %r15
- mov %rdi,%rdx
- shl $PERCPU_SIZE_SHIFT,%rdi
+ mov %rdi,%rsi
+ shl $PERCPU_SIZE_SHIFT,%rsi
lea __page_pool(%rip),%rax
- add %rax,%rdi
+ add %rax,%rsi
- mov %rsp,PERCPU_LINUX_SP(%rdi)
- mov %edx,PERCPU_CPU_ID(%rdi)
+ mov %rsp,PERCPU_LINUX_SP(%rsi)
- lea PERCPU_STACK_END-8(%rdi),%rsp
+ lea PERCPU_STACK_END-8(%rsi),%rsp
- push %rdi
+ push %rsi
call entry
- pop %rdi
+ pop %rsi
- mov PERCPU_LINUX_SP(%rdi),%rsp
+ mov PERCPU_LINUX_SP(%rsi),%rsp
pop %r15
pop %r14
#define PERCPU_SIZE_SHIFT 14
#define PERCPU_STACK_END PAGE_SIZE
#define PERCPU_LINUX_SP PERCPU_STACK_END
-#define PERCPU_CPU_ID (PERCPU_LINUX_SP + 8)
#ifndef __ASSEMBLY__
} __attribute__((packed));
struct per_cpu {
- /* Keep these three in sync with defines above! */
+ /* Keep these two in sync with defines above! */
u8 stack[PAGE_SIZE];
unsigned long linux_sp;
- unsigned int cpu_id;
+ unsigned int cpu_id;
u32 apic_id;
struct cell *cell;
CHECK_ASSUMPTION(sizeof(cpu_data.stack) == PERCPU_STACK_END);
CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, linux_sp) ==
PERCPU_LINUX_SP);
- CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, cpu_id) ==
- PERCPU_CPU_ID);
}
#endif /* !__ASSEMBLY__ */
int arch_entry(unsigned int cpu_id);
void vm_exit(void);
-int entry(struct per_cpu *cpu_data);
+int entry(unsigned int cpu_id, struct per_cpu *cpu_data);
int arch_init_early(struct cell *linux_cell);
int arch_cpu_init(struct per_cpu *cpu_data);
printk("Initializing remaining processors:\n");
}
-int entry(struct per_cpu *cpu_data)
+int entry(unsigned int cpu_id, struct per_cpu *cpu_data)
{
bool master = false;
+ cpu_data->cpu_id = cpu_id;
+
spin_lock(&init_lock);
if (master_cpu_id == -1) {
master = true;
- init_early(cpu_data->cpu_id);
+ init_early(cpu_id);
}
if (!error) {