ramips: backport series of patches that ensure GCRs of other CPUs are accessed properly

Signed-off-by: Nikolay Martynov <mar.kolya@gmail.com>

SVN-Revision: 47838
master
John Crispin 9 years ago
parent e0d77e68ce
commit ed8775d3c3
  1. 140
      target/linux/ramips/patches-4.3/0056-cm-intoruce-core-other-locking-functions.patch
  2. 80
      target/linux/ramips/patches-4.3/0057-cm-use-core-other-locking-function.patch
  3. 49
      target/linux/ramips/patches-4.3/0058-backport-ensure-core-other-GCR-reflect-correct-core.patch

@ -0,0 +1,140 @@
commit 23d5de8efb9aed48074a72bf3d43841e1556ca42
Author: Paul Burton <paul.burton@imgtec.com>
Date: Tue Sep 22 11:12:16 2015 -0700
MIPS: CM: Introduce core-other locking functions
Introduce mips_cm_lock_other & mips_cm_unlock_other, mirroring the
existing CPC equivalents, in order to lock access from the current core
to another via the core-other GCR region. This hasn't been required in
the past but with CM3 the CPC starts using GCR_CL_OTHER rather than
CPC_CL_OTHER and this will be required for safety.
[ralf@linux-mips.org: Fix merge conflict.]
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/11207/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -334,6 +334,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
/* GCR_Cx_OTHER register fields */
#define CM_GCR_Cx_OTHER_CORENUM_SHF 16
#define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16)
+#define CM3_GCR_Cx_OTHER_CORE_SHF 8
+#define CM3_GCR_Cx_OTHER_CORE_MSK (_ULCAST_(0x3f) << 8)
+#define CM3_GCR_Cx_OTHER_VP_SHF 0
+#define CM3_GCR_Cx_OTHER_VP_MSK (_ULCAST_(0x7) << 0)
/* GCR_Cx_RESET_BASE register fields */
#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12
@@ -444,4 +448,32 @@ static inline unsigned int mips_cm_vp_id
return (core * mips_cm_max_vp_width()) + vp;
}
+#ifdef CONFIG_MIPS_CM
+
+/**
+ * mips_cm_lock_other - lock access to another core
+ * @core: the other core to be accessed
+ * @vp: the VP within the other core to be accessed
+ *
+ * Call before operating upon a core via the 'other' register region in
+ * order to prevent the region being moved during access. Must be followed
+ * by a call to mips_cm_unlock_other.
+ */
+extern void mips_cm_lock_other(unsigned int core, unsigned int vp);
+
+/**
+ * mips_cm_unlock_other - unlock access to another core
+ *
+ * Call after operating upon another core via the 'other' register region.
+ * Must be called after mips_cm_lock_other.
+ */
+extern void mips_cm_unlock_other(void);
+
+#else /* !CONFIG_MIPS_CM */
+
+static inline void mips_cm_lock_other(unsigned int core) { }
+static inline void mips_cm_unlock_other(void) { }
+
+#endif /* !CONFIG_MIPS_CM */
+
#endif /* __MIPS_ASM_MIPS_CM_H__ */
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -9,6 +9,8 @@
*/
#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
#include <asm/mips-cm.h>
#include <asm/mipsregs.h>
@@ -136,6 +138,9 @@ static char *cm3_causes[32] = {
"0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
};
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock);
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags);
+
phys_addr_t __mips_cm_phys_base(void)
{
u32 config3 = read_c0_config3();
@@ -200,6 +205,7 @@ int mips_cm_probe(void)
{
phys_addr_t addr;
u32 base_reg;
+ unsigned cpu;
/*
* No need to probe again if we have already been
@@ -247,9 +253,42 @@ int mips_cm_probe(void)
/* determine register width for this CM */
mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cm_core_lock, cpu));
+
return 0;
}
+void mips_cm_lock_other(unsigned int core, unsigned int vp)
+{
+ unsigned curr_core;
+ u32 val;
+
+ preempt_disable();
+ curr_core = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ val = core << CM3_GCR_Cx_OTHER_CORE_SHF;
+ val |= vp << CM3_GCR_Cx_OTHER_VP_SHF;
+ } else {
+ BUG_ON(vp != 0);
+ val = core << CM_GCR_Cx_OTHER_CORENUM_SHF;
+ }
+
+ write_gcr_cl_other(val);
+}
+
+void mips_cm_unlock_other(void)
+{
+ unsigned curr_core = current_cpu_data.core;
+
+ spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+ preempt_enable();
+}
+
void mips_cm_error_report(void)
{
unsigned long revision = mips_cm_revision();

@ -0,0 +1,80 @@
commit 4ede31617056b7424eef28dce59dd6dbe81729c3
Author: Paul Burton <paul.burton@imgtec.com>
Date: Tue Sep 22 11:12:17 2015 -0700
MIPS: CM: make use of mips_cm_{lock,unlock}_other
Document that CPC core-other accesses must take place within the bounds
of the CM lock, and begin using the CM lock functions where we access
the GCRs of other cores. This is required because with CM3 the CPC began
using GCR_CL_OTHER instead of CPC_CL_OTHER.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Andrew Bresticker <abrestic@chromium.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: linux-kernel@vger.kernel.org
Cc: Niklas Cassel <niklas.cassel@axis.com>
Cc: Ezequiel Garcia <ezequiel.garcia@imgtec.com>
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/11208/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -149,7 +149,8 @@ BUILD_CPC_Cx_RW(other, 0x10)
* core: the other core to be accessed
*
* Call before operating upon a core via the 'other' register region in
- * order to prevent the region being moved during access. Must be followed
+ * order to prevent the region being moved during access. Must be called
+ * within the bounds of a mips_cm_{lock,unlock}_other pair, and followed
* by a call to mips_cpc_unlock_other.
*/
extern void mips_cpc_lock_other(unsigned int core);
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -37,8 +37,9 @@ static unsigned core_vpe_count(unsigned
if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
return 1;
- write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+ mips_cm_lock_other(core, 0);
cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
+ mips_cm_unlock_other();
return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
}
@@ -193,7 +194,7 @@ static void boot_core(unsigned core)
u32 access;
/* Select the appropriate core */
- write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+ mips_cm_lock_other(core, 0);
/* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
@@ -216,6 +217,8 @@ static void boot_core(unsigned core)
write_gcr_co_reset_release(0);
}
+ mips_cm_unlock_other();
+
/* The core is now powered up */
bitmap_set(core_power, core, 1);
}
--- a/arch/mips/kernel/smp-gic.c
+++ b/arch/mips/kernel/smp-gic.c
@@ -46,9 +46,11 @@ void gic_send_ipi_single(int cpu, unsign
if (mips_cpc_present() && (core != current_cpu_data.core)) {
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+ mips_cm_lock_other(core, 0);
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
mips_cpc_unlock_other();
+ mips_cm_unlock_other();
}
}

@ -0,0 +1,49 @@
commit 78a54c4d8e5a7915a4ec2ba0eb461fae50590683
Author: Paul Burton <paul.burton@imgtec.com>
Date: Tue Sep 22 11:12:18 2015 -0700
MIPS: CM, CPC: Ensure core-other GCRs reflect the correct core
Ensure the update to which core the core-other GCR regions reflect has
taken place before any core-other GCRs are accessed by placing a memory
barrier (sync instruction) between the write to the core-other registers
and any such GCR accesses.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: linux-kernel@vger.kernel.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/11209/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -278,6 +278,12 @@ void mips_cm_lock_other(unsigned int cor
}
write_gcr_cl_other(val);
+
+ /*
+ * Ensure the core-other region reflects the appropriate core &
+ * VP before any accesses to it occur.
+ */
+ mb();
}
void mips_cm_unlock_other(void)
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -76,6 +76,12 @@ void mips_cpc_lock_other(unsigned int co
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
+
+ /*
+ * Ensure the core-other region reflects the appropriate core &
+ * VP before any accesses to it occur.
+ */
+ mb();
}
void mips_cpc_unlock_other(void)
Loading…
Cancel
Save