MIPS: mm: Add set_cpu_context() for ASID assignments
When we gain MMID support we'll be storing MMIDs as atomic64_t values and accessing them via atomic64_* functions. This necessitates that we don't use cpu_context() as the left hand side of an assignment, ie. as a modifiable lvalue. In preparation for this introduce a new set_cpu_context() function & replace all assignments with cpu_context() on their left hand side with an equivalent call to set_cpu_context(). To enforce that cpu_context() should not be used for assignments, we rewrite it as a static inline function. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
This commit is contained in:
parent
42d5b84657
commit
0b317c389c
@ -88,7 +88,17 @@ static inline u64 asid_first_version(unsigned int cpu)
|
||||
return ~asid_version_mask(cpu) + 1;
|
||||
}
|
||||
|
||||
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
|
||||
static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
|
||||
{
|
||||
return mm->context.asid[cpu];
|
||||
}
|
||||
|
||||
static inline void set_cpu_context(unsigned int cpu,
|
||||
struct mm_struct *mm, u64 ctx)
|
||||
{
|
||||
mm->context.asid[cpu] = ctx;
|
||||
}
|
||||
|
||||
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
|
||||
#define cpu_asid(cpu, mm) \
|
||||
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
|
||||
@ -111,7 +121,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
cpu_context(i, mm) = 0;
|
||||
set_cpu_context(i, mm, 0);
|
||||
|
||||
mm->context.bd_emupage_allocmap = NULL;
|
||||
spin_lock_init(&mm->context.bd_emupage_lock);
|
||||
@ -175,7 +185,7 @@ drop_mmu_context(struct mm_struct *mm)
|
||||
htw_start();
|
||||
} else {
|
||||
/* will get a new context next time */
|
||||
cpu_context(cpu, mm) = 0;
|
||||
set_cpu_context(cpu, mm, 0);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
@ -537,7 +537,7 @@ void flush_tlb_mm(struct mm_struct *mm)
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
|
||||
cpu_context(cpu, mm) = 0;
|
||||
set_cpu_context(cpu, mm, 0);
|
||||
}
|
||||
}
|
||||
drop_mmu_context(mm);
|
||||
@ -583,7 +583,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
|
||||
* mm has been completely unused by that CPU.
|
||||
*/
|
||||
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
|
||||
cpu_context(cpu, mm) = !exec;
|
||||
set_cpu_context(cpu, mm, !exec);
|
||||
}
|
||||
}
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
@ -635,7 +635,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
* by that CPU.
|
||||
*/
|
||||
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
|
||||
cpu_context(cpu, vma->vm_mm) = 1;
|
||||
set_cpu_context(cpu, vma->vm_mm, 1);
|
||||
}
|
||||
}
|
||||
local_flush_tlb_page(vma, page);
|
||||
|
@ -1019,7 +1019,7 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
|
||||
get_new_mmu_context(kern_mm);
|
||||
for_each_possible_cpu(i)
|
||||
if (i != cpu)
|
||||
cpu_context(i, kern_mm) = 0;
|
||||
set_cpu_context(i, kern_mm, 0);
|
||||
preempt_enable();
|
||||
}
|
||||
kvm_write_c0_guest_entryhi(cop0, entryhi);
|
||||
@ -1090,8 +1090,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
|
||||
if (i == cpu)
|
||||
continue;
|
||||
if (user)
|
||||
cpu_context(i, user_mm) = 0;
|
||||
cpu_context(i, kern_mm) = 0;
|
||||
set_cpu_context(i, user_mm, 0);
|
||||
set_cpu_context(i, kern_mm, 0);
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
|
@ -1098,8 +1098,8 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
|
||||
kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
|
||||
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
|
||||
for_each_possible_cpu(i) {
|
||||
cpu_context(i, kern_mm) = 0;
|
||||
cpu_context(i, user_mm) = 0;
|
||||
set_cpu_context(i, kern_mm, 0);
|
||||
set_cpu_context(i, user_mm, 0);
|
||||
}
|
||||
|
||||
/* Generate new ASID for current mode */
|
||||
@ -1211,7 +1211,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
|
||||
if (gasid != vcpu->arch.last_user_gasid) {
|
||||
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
|
||||
for_each_possible_cpu(i)
|
||||
cpu_context(i, user_mm) = 0;
|
||||
set_cpu_context(i, user_mm, 0);
|
||||
vcpu->arch.last_user_gasid = gasid;
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,8 @@ void get_new_mmu_context(struct mm_struct *mm)
|
||||
local_flush_tlb_all(); /* start new asid cycle */
|
||||
}
|
||||
|
||||
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
||||
set_cpu_context(cpu, mm, asid);
|
||||
asid_cache(cpu) = asid;
|
||||
}
|
||||
|
||||
void check_mmu_context(struct mm_struct *mm)
|
||||
|
Loading…
Reference in New Issue
Block a user