Refresh patches. A number of patches have landed upstream & hence are no longer required locally: 062-[1-6]-MIPS-* series 042-0004-mtd-bcm47xxpart-fix-parsing-first-block Reintroduced lantiq/patches-4.4/0050-MIPS-Lantiq-Fix-cascaded-IRQ-setup as it was incorrectly included upstream thus dropped from LEDE. As it has now been reverted upstream it needs to be included again for LEDE. Run tested ar71xx Archer C7 v2 and lantiq. Signed-off-by: Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk> [update from 4.4.68 to 4.4.69] Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>master
parent
0a05fbd135
commit
088e28772c
@ -1,40 +0,0 @@ |
||||
From bd5d21310133921021d78995ad6346f908483124 Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
|
||||
Date: Sun, 20 Nov 2016 16:09:30 +0100
|
||||
Subject: [PATCH] mtd: bcm47xxpart: fix parsing first block after aligned TRX
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
After parsing TRX we should skip to the first block placed behind it.
|
||||
Our code was working only with TRX with length not aligned to the
|
||||
blocksize. In other cases (length aligned) it was missing the block
|
||||
places right after TRX.
|
||||
|
||||
This fixes calculation and simplifies the comment.
|
||||
|
||||
Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
|
||||
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
|
||||
---
|
||||
drivers/mtd/bcm47xxpart.c | 10 ++++------
|
||||
1 file changed, 4 insertions(+), 6 deletions(-)
|
||||
|
||||
--- a/drivers/mtd/bcm47xxpart.c
|
||||
+++ b/drivers/mtd/bcm47xxpart.c
|
||||
@@ -229,12 +229,10 @@ static int bcm47xxpart_parse(struct mtd_
|
||||
|
||||
last_trx_part = curr_part - 1;
|
||||
|
||||
- /*
|
||||
- * We have whole TRX scanned, skip to the next part. Use
|
||||
- * roundown (not roundup), as the loop will increase
|
||||
- * offset in next step.
|
||||
- */
|
||||
- offset = rounddown(offset + trx->length, blocksize);
|
||||
+ /* Jump to the end of TRX */
|
||||
+ offset = roundup(offset + trx->length, blocksize);
|
||||
+ /* Next loop iteration will increase the offset */
|
||||
+ offset -= blocksize;
|
||||
continue;
|
||||
}
|
||||
|
@ -1,70 +0,0 @@ |
||||
From: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
Date: Mon, 19 Dec 2016 14:20:56 +0000
|
||||
Subject: [PATCH] MIPS: Introduce irq_stack
|
||||
|
||||
Allocate a per-cpu irq stack for use within interrupt handlers.
|
||||
|
||||
Also add a utility function on_irq_stack to determine if a given stack
|
||||
pointer is within the irq stack for that cpu.
|
||||
|
||||
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
---
|
||||
|
||||
--- a/arch/mips/include/asm/irq.h
|
||||
+++ b/arch/mips/include/asm/irq.h
|
||||
@@ -17,6 +17,18 @@
|
||||
|
||||
#include <irq.h>
|
||||
|
||||
+#define IRQ_STACK_SIZE THREAD_SIZE
|
||||
+
|
||||
+extern void *irq_stack[NR_CPUS];
|
||||
+
|
||||
+static inline bool on_irq_stack(int cpu, unsigned long sp)
|
||||
+{
|
||||
+ unsigned long low = (unsigned long)irq_stack[cpu];
|
||||
+ unsigned long high = low + IRQ_STACK_SIZE;
|
||||
+
|
||||
+ return (low <= sp && sp <= high);
|
||||
+}
|
||||
+
|
||||
#ifdef CONFIG_I8259
|
||||
static inline int irq_canonicalize(int irq)
|
||||
{
|
||||
--- a/arch/mips/kernel/asm-offsets.c
|
||||
+++ b/arch/mips/kernel/asm-offsets.c
|
||||
@@ -101,6 +101,7 @@ void output_thread_info_defines(void)
|
||||
OFFSET(TI_REGS, thread_info, regs);
|
||||
DEFINE(_THREAD_SIZE, THREAD_SIZE);
|
||||
DEFINE(_THREAD_MASK, THREAD_MASK);
|
||||
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
|
||||
BLANK();
|
||||
}
|
||||
|
||||
--- a/arch/mips/kernel/irq.c
|
||||
+++ b/arch/mips/kernel/irq.c
|
||||
@@ -25,6 +25,8 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
+void *irq_stack[NR_CPUS];
|
||||
+
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
* each architecture has to answer this themselves.
|
||||
@@ -55,6 +57,15 @@ void __init init_IRQ(void)
|
||||
irq_set_noprobe(i);
|
||||
|
||||
arch_init_irq();
|
||||
+
|
||||
+ for_each_possible_cpu(i) {
|
||||
+ int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
|
||||
+ void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
|
||||
+
|
||||
+ irq_stack[i] = s;
|
||||
+ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
|
||||
+ irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
|
||||
+ }
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
@ -1,42 +0,0 @@ |
||||
From: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
Date: Mon, 19 Dec 2016 14:20:57 +0000
|
||||
Subject: [PATCH] MIPS: Stack unwinding while on IRQ stack
|
||||
|
||||
Within unwind stack, check if the stack pointer being unwound is within
|
||||
the CPU's irq_stack and if so use that page rather than the task's stack
|
||||
page.
|
||||
|
||||
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
---
|
||||
|
||||
--- a/arch/mips/kernel/process.c
|
||||
+++ b/arch/mips/kernel/process.c
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/dsp.h>
|
||||
#include <asm/fpu.h>
|
||||
+#include <asm/irq.h>
|
||||
#include <asm/msa.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mipsregs.h>
|
||||
@@ -552,7 +553,19 @@ EXPORT_SYMBOL(unwind_stack_by_address);
|
||||
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
||||
unsigned long pc, unsigned long *ra)
|
||||
{
|
||||
- unsigned long stack_page = (unsigned long)task_stack_page(task);
|
||||
+ unsigned long stack_page = 0;
|
||||
+ int cpu;
|
||||
+
|
||||
+ for_each_possible_cpu(cpu) {
|
||||
+ if (on_irq_stack(cpu, *sp)) {
|
||||
+ stack_page = (unsigned long)irq_stack[cpu];
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (!stack_page)
|
||||
+ stack_page = (unsigned long)task_stack_page(task);
|
||||
+
|
||||
return unwind_stack_by_address(stack_page, sp, pc, ra);
|
||||
}
|
||||
#endif
|
@ -1,48 +0,0 @@ |
||||
From: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
Date: Mon, 19 Dec 2016 14:20:58 +0000
|
||||
Subject: [PATCH] MIPS: Only change $28 to thread_info if coming from user
|
||||
mode
|
||||
|
||||
The SAVE_SOME macro is used to save the execution context on all
|
||||
exceptions.
|
||||
If an exception occurs while executing user code, the stack is switched
|
||||
to the kernel's stack for the current task, and register $28 is switched
|
||||
to point to the current_thread_info, which is at the bottom of the stack
|
||||
region.
|
||||
If the exception occurs while executing kernel code, the stack is left,
|
||||
and this change ensures that register $28 is not updated. This is the
|
||||
correct behaviour when the kernel can be executing on the separate irq
|
||||
stack, because the thread_info will not be at the base of it.
|
||||
|
||||
With this change, register $28 is only switched to it's kernel
|
||||
conventional usage of the currrent thread info pointer at the point at
|
||||
which execution enters kernel space. Doing it on every exception was
|
||||
redundant, but OK without an IRQ stack, but will be erroneous once that
|
||||
is introduced.
|
||||
|
||||
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
Reviewed-by: Maciej W. Rozycki <macro@imgtec.com>
|
||||
---
|
||||
|
||||
--- a/arch/mips/include/asm/stackframe.h
|
||||
+++ b/arch/mips/include/asm/stackframe.h
|
||||
@@ -216,12 +216,19 @@
|
||||
LONG_S $25, PT_R25(sp)
|
||||
LONG_S $28, PT_R28(sp)
|
||||
LONG_S $31, PT_R31(sp)
|
||||
+
|
||||
+ /* Set thread_info if we're coming from user mode */
|
||||
+ mfc0 k0, CP0_STATUS
|
||||
+ sll k0, 3 /* extract cu0 bit */
|
||||
+ bltz k0, 9f
|
||||
+
|
||||
ori $28, sp, _THREAD_MASK
|
||||
xori $28, _THREAD_MASK
|
||||
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||
.set mips64
|
||||
pref 0, 0($28) /* Prefetch the current pointer */
|
||||
#endif
|
||||
+9:
|
||||
.set pop
|
||||
.endm
|
||||
|
@ -1,116 +0,0 @@ |
||||
From: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
Date: Mon, 19 Dec 2016 14:20:59 +0000
|
||||
Subject: [PATCH] MIPS: Switch to the irq_stack in interrupts
|
||||
|
||||
When enterring interrupt context via handle_int or except_vec_vi, switch
|
||||
to the irq_stack of the current CPU if it is not already in use.
|
||||
|
||||
The current stack pointer is masked with the thread size and compared to
|
||||
the base or the irq stack. If it does not match then the stack pointer
|
||||
is set to the top of that stack, otherwise this is a nested irq being
|
||||
handled on the irq stack so the stack pointer should be left as it was.
|
||||
|
||||
The in-use stack pointer is placed in the callee saved register s1. It
|
||||
will be saved to the stack when plat_irq_dispatch is invoked and can be
|
||||
restored once control returns here.
|
||||
|
||||
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
---
|
||||
|
||||
--- a/arch/mips/kernel/genex.S
|
||||
+++ b/arch/mips/kernel/genex.S
|
||||
@@ -188,9 +188,44 @@ NESTED(handle_int, PT_SIZE, sp)
|
||||
|
||||
LONG_L s0, TI_REGS($28)
|
||||
LONG_S sp, TI_REGS($28)
|
||||
- PTR_LA ra, ret_from_irq
|
||||
- PTR_LA v0, plat_irq_dispatch
|
||||
- jr v0
|
||||
+
|
||||
+ /*
|
||||
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
|
||||
+ * Check if we are already using the IRQ stack.
|
||||
+ */
|
||||
+ move s1, sp # Preserve the sp
|
||||
+
|
||||
+ /* Get IRQ stack for this CPU */
|
||||
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
|
||||
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
||||
+ lui k1, %hi(irq_stack)
|
||||
+#else
|
||||
+ lui k1, %highest(irq_stack)
|
||||
+ daddiu k1, %higher(irq_stack)
|
||||
+ dsll k1, 16
|
||||
+ daddiu k1, %hi(irq_stack)
|
||||
+ dsll k1, 16
|
||||
+#endif
|
||||
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
|
||||
+ LONG_ADDU k1, k0
|
||||
+ LONG_L t0, %lo(irq_stack)(k1)
|
||||
+
|
||||
+ # Check if already on IRQ stack
|
||||
+ PTR_LI t1, ~(_THREAD_SIZE-1)
|
||||
+ and t1, t1, sp
|
||||
+ beq t0, t1, 2f
|
||||
+
|
||||
+ /* Switch to IRQ stack */
|
||||
+ li t1, _IRQ_STACK_SIZE
|
||||
+ PTR_ADD sp, t0, t1
|
||||
+
|
||||
+2:
|
||||
+ jal plat_irq_dispatch
|
||||
+
|
||||
+ /* Restore sp */
|
||||
+ move sp, s1
|
||||
+
|
||||
+ j ret_from_irq
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
nop
|
||||
#endif
|
||||
@@ -263,8 +298,44 @@ NESTED(except_vec_vi_handler, 0, sp)
|
||||
|
||||
LONG_L s0, TI_REGS($28)
|
||||
LONG_S sp, TI_REGS($28)
|
||||
- PTR_LA ra, ret_from_irq
|
||||
- jr v0
|
||||
+
|
||||
+ /*
|
||||
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
|
||||
+ * Check if we are already using the IRQ stack.
|
||||
+ */
|
||||
+ move s1, sp # Preserve the sp
|
||||
+
|
||||
+ /* Get IRQ stack for this CPU */
|
||||
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
|
||||
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
||||
+ lui k1, %hi(irq_stack)
|
||||
+#else
|
||||
+ lui k1, %highest(irq_stack)
|
||||
+ daddiu k1, %higher(irq_stack)
|
||||
+ dsll k1, 16
|
||||
+ daddiu k1, %hi(irq_stack)
|
||||
+ dsll k1, 16
|
||||
+#endif
|
||||
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
|
||||
+ LONG_ADDU k1, k0
|
||||
+ LONG_L t0, %lo(irq_stack)(k1)
|
||||
+
|
||||
+ # Check if already on IRQ stack
|
||||
+ PTR_LI t1, ~(_THREAD_SIZE-1)
|
||||
+ and t1, t1, sp
|
||||
+ beq t0, t1, 2f
|
||||
+
|
||||
+ /* Switch to IRQ stack */
|
||||
+ li t1, _IRQ_STACK_SIZE
|
||||
+ PTR_ADD sp, t0, t1
|
||||
+
|
||||
+2:
|
||||
+ jal plat_irq_dispatch
|
||||
+
|
||||
+ /* Restore sp */
|
||||
+ move sp, s1
|
||||
+
|
||||
+ j ret_from_irq
|
||||
END(except_vec_vi_handler)
|
||||
|
||||
/*
|
@ -1,21 +0,0 @@ |
||||
From: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
Date: Mon, 19 Dec 2016 14:21:00 +0000
|
||||
Subject: [PATCH] MIPS: Select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
|
||||
Since do_IRQ is now invoked on a separate IRQ stack, we select
|
||||
HAVE_IRQ_EXIT_ON_IRQ_STACK so that softirq's may be invoked directly
|
||||
from irq_exit(), rather than requiring do_softirq_own_stack.
|
||||
|
||||
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
---
|
||||
|
||||
--- a/arch/mips/Kconfig
|
||||
+++ b/arch/mips/Kconfig
|
||||
@@ -9,6 +9,7 @@ config MIPS
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_GENERIC_DMA_COHERENT
|
||||
select HAVE_IDE
|
||||
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_PERF_EVENTS
|
||||
select PERF_USE_VMALLOC
|
@ -1,35 +0,0 @@ |
||||
From de856416e7143e32afc4849625616554aa060f7a Mon Sep 17 00:00:00 2001
|
||||
From: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
Date: Wed, 25 Jan 2017 17:00:25 +0000
|
||||
Subject: [PATCH] MIPS: IRQ Stack: Fix erroneous jal to plat_irq_dispatch
|
||||
|
||||
Commit dda45f701c9d ("MIPS: Switch to the irq_stack in interrupts")
|
||||
changed both the normal and vectored interrupt handlers. Unfortunately
|
||||
the vectored version, "except_vec_vi_handler", was incorrectly modified
|
||||
to unconditionally jal to plat_irq_dispatch, rather than doing a jalr to
|
||||
the vectored handler that has been set up. This is ok for many platforms
|
||||
which set the vectored handler to plat_irq_dispatch anyway, but will
|
||||
cause problems with platforms that use other handlers.
|
||||
|
||||
Fixes: dda45f701c9d ("MIPS: Switch to the irq_stack in interrupts")
|
||||
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
|
||||
Cc: Ralf Baechle <ralf@linux-mips.org>
|
||||
Cc: Paul Burton <paul.burton@imgtec.com>
|
||||
Cc: linux-mips@linux-mips.org
|
||||
Patchwork: https://patchwork.linux-mips.org/patch/15110/
|
||||
Signed-off-by: James Hogan <james.hogan@imgtec.com>
|
||||
---
|
||||
arch/mips/kernel/genex.S | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/arch/mips/kernel/genex.S
|
||||
+++ b/arch/mips/kernel/genex.S
|
||||
@@ -330,7 +330,7 @@ NESTED(except_vec_vi_handler, 0, sp)
|
||||
PTR_ADD sp, t0, t1
|
||||
|
||||
2:
|
||||
- jal plat_irq_dispatch
|
||||
+ jalr v0
|
||||
|
||||
/* Restore sp */
|
||||
move sp, s1
|
@ -0,0 +1,87 @@ |
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Thu, 19 Jan 2017 12:14:44 +0100
|
||||
Subject: [PATCH] MIPS: Lantiq: Fix cascaded IRQ setup
|
||||
|
||||
With the IRQ stack changes integrated, the XRX200 devices started
|
||||
emitting a constant stream of kernel messages like this:
|
||||
|
||||
[ 565.415310] Spurious IRQ: CAUSE=0x1100c300
|
||||
|
||||
This appears to be caused by IP0 firing for some reason without being
|
||||
handled. Fix this by setting up IP2-6 as a proper chained IRQ handler and
|
||||
calling do_IRQ for all MIPS CPU interrupts.
|
||||
|
||||
Cc: john@phrozen.org
|
||||
Cc: stable@vger.kernel.org
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/arch/mips/lantiq/irq.c
|
||||
+++ b/arch/mips/lantiq/irq.c
|
||||
@@ -271,6 +271,11 @@ static void ltq_hw5_irqdispatch(void)
|
||||
DEFINE_HWx_IRQDISPATCH(5)
|
||||
#endif
|
||||
|
||||
+static void ltq_hw_irq_handler(struct irq_desc *desc)
|
||||
+{
|
||||
+ ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
|
||||
+}
|
||||
+
|
||||
#ifdef CONFIG_MIPS_MT_SMP
|
||||
void __init arch_init_ipiirq(int irq, struct irqaction *action)
|
||||
{
|
||||
@@ -315,23 +320,19 @@ static struct irqaction irq_call = {
|
||||
asmlinkage void plat_irq_dispatch(void)
|
||||
{
|
||||
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
|
||||
- unsigned int i;
|
||||
+ int irq;
|
||||
|
||||
- if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
|
||||
- do_IRQ(MIPS_CPU_TIMER_IRQ);
|
||||
- goto out;
|
||||
- } else {
|
||||
- for (i = 0; i < MAX_IM; i++) {
|
||||
- if (pending & (CAUSEF_IP2 << i)) {
|
||||
- ltq_hw_irqdispatch(i);
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
+ if (!pending) {
|
||||
+ spurious_interrupt();
|
||||
+ return;
|
||||
}
|
||||
- pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
|
||||
|
||||
-out:
|
||||
- return;
|
||||
+ pending >>= CAUSEB_IP;
|
||||
+ while (pending) {
|
||||
+ irq = fls(pending) - 1;
|
||||
+ do_IRQ(MIPS_CPU_IRQ_BASE + irq);
|
||||
+ pending &= ~BIT(irq);
|
||||
+ }
|
||||
}
|
||||
|
||||
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
|
||||
@@ -356,11 +357,6 @@ static const struct irq_domain_ops irq_d
|
||||
.map = icu_map,
|
||||
};
|
||||
|
||||
-static struct irqaction cascade = {
|
||||
- .handler = no_action,
|
||||
- .name = "cascade",
|
||||
-};
|
||||
-
|
||||
int __init icu_of_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
struct device_node *eiu_node;
|
||||
@@ -392,7 +388,7 @@ int __init icu_of_init(struct device_nod
|
||||
mips_cpu_irq_init();
|
||||
|
||||
for (i = 0; i < MAX_IM; i++)
|
||||
- setup_irq(i + 2, &cascade);
|
||||
+ irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
|
||||
|
||||
if (cpu_has_vint) {
|
||||
pr_info("Setting up vectored interrupts\n");
|
Loading…
Reference in new issue