summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/hugetlb-2level.h71
-rw-r--r--arch/arm/include/asm/hugetlb-3level.h61
-rw-r--r--arch/arm/include/asm/hugetlb.h87
-rw-r--r--arch/arm/include/asm/memory.h105
-rw-r--r--arch/arm/include/asm/mmzone.h49
-rw-r--r--arch/arm/include/asm/module.h7
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h152
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h12
-rw-r--r--arch/arm/include/asm/pgtable-3level.h83
-rw-r--r--arch/arm/include/asm/pgtable.h34
-rw-r--r--arch/arm/include/asm/proc-fns.h28
-rw-r--r--arch/arm/include/asm/runtime-patch.h208
-rw-r--r--arch/arm/include/asm/tlb.h16
-rw-r--r--arch/arm/include/asm/tlbflush.h2
-rw-r--r--arch/arm/include/asm/topology.h15
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
18 files changed, 889 insertions, 46 deletions
diff --git a/arch/arm/include/asm/hugetlb-2level.h b/arch/arm/include/asm/hugetlb-2level.h
new file mode 100644
index 00000000000..3532b54bcfd
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb-2level.h
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/include/asm/hugetlb-2level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_2LEVEL_H
+#define _ASM_ARM_HUGETLB_2LEVEL_H
+
+
+pte_t huge_ptep_get(pte_t *ptep);
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+static inline pte_t pte_mkhuge(pte_t pte) { return pte; }
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pmd_t *pmdp = (pmd_t *) ptep;
+ set_pmd_at(mm, addr, pmdp, pmd_wrprotect(*pmdp));
+}
+
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pmd_t *pmdp = (pmd_t *)ptep;
+ pte_t pte = huge_ptep_get(ptep);
+ pmd_clear(pmdp);
+
+ return pte;
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ int changed = !pte_same(huge_ptep_get(ptep), pte);
+
+ if (changed) {
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ huge_ptep_clear_flush(vma, addr, &pte);
+ }
+
+ return changed;
+}
+
+#endif /* _ASM_ARM_HUGETLB_2LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
new file mode 100644
index 00000000000..486806445c3
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb-3level.h
@@ -0,0 +1,61 @@
+/*
+ * arch/arm/include/asm/hugetlb-3level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
+#define _ASM_ARM_HUGETLB_3LEVEL_H
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
new file mode 100644
index 00000000000..1e92975f6c1
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb.h
@@ -0,0 +1,87 @@
+/*
+ * arch/arm/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_H
+#define _ASM_ARM_HUGETLB_H
+
+#include <asm/page.h>
+
+#ifdef CONFIG_ARM_LPAE
+#include <asm/hugetlb-3level.h>
+#else
+#include <asm/hugetlb-2level.h>
+#endif
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 73cf03aa981..485c3154ba8 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -18,6 +18,9 @@
#include <linux/types.h>
#include <linux/sizes.h>
+#include <asm/cache.h>
+#include <asm/runtime-patch.h>
+
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
@@ -99,11 +102,11 @@
#endif
#ifndef PHYS_OFFSET
-#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
+#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
#endif
#ifndef END_MEM
-#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
+#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
#ifndef PAGE_OFFSET
@@ -141,6 +144,20 @@
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+/*
+ * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
+ * around in head.S and proc-*.S are shifted by this amount, in order to
+ * leave spare high bits for systems with physical address extension. This
+ * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
+ * gives us about 38-bits or so.
+ */
+#ifdef CONFIG_ARM_LPAE
+#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
+#else
+#define ARCH_PGD_SHIFT 0
+#endif
+#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
+
#ifndef __ASSEMBLY__
/*
@@ -151,40 +168,70 @@
#ifndef __virt_to_phys
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-/*
- * Constants used to force the right instruction encodings and shifts
- * so that all we need to do is modify the 8-bit constant field.
- */
-#define __PV_BITS_31_24 0x81000000
+extern unsigned long __pv_offset;
+extern phys_addr_t __pv_phys_offset;
+#define PHYS_OFFSET __virt_to_phys(PAGE_OFFSET)
-extern unsigned long __pv_phys_offset;
-#define PHYS_OFFSET __pv_phys_offset
+static inline phys_addr_t __virt_to_phys(unsigned long x)
+{
+ phys_addr_t t;
-#define __pv_stub(from,to,instr,type) \
- __asm__("@ __pv_stub\n" \
- "1: " instr " %0, %1, %2\n" \
- " .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
- " .popsection\n" \
- : "=r" (to) \
- : "r" (from), "I" (type))
+#ifndef CONFIG_ARM_LPAE
+ early_patch_imm8("add", t, x, __pv_offset, 0);
+#else
+ unsigned long __tmp;
-static inline unsigned long __virt_to_phys(unsigned long x)
-{
- unsigned long t;
- __pv_stub(x, t, "add", __PV_BITS_31_24);
+#ifndef __ARMEB__
+#define PV_PHYS_HIGH "(__pv_phys_offset + 4)"
+#else
+#define PV_PHYS_HIGH "__pv_phys_offset"
+#endif
+
+ early_patch_stub(
+ /* type */ PATCH_IMM8,
+ /* code */
+ "ldr %[tmp], =__pv_offset\n"
+ "ldr %[tmp], [%[tmp]]\n"
+ "add %Q[to], %[from], %[tmp]\n"
+ "ldr %[tmp], =" PV_PHYS_HIGH "\n"
+ "ldr %[tmp], [%[tmp]]\n"
+ "mov %R[to], %[tmp]\n",
+ /* pad */ 4,
+ /* patch_data */
+ ".long __pv_offset\n"
+ "add %Q[to], %[from], %[imm]\n"
+ ".long " PV_PHYS_HIGH "\n"
+ "mov %R[to], %[imm]\n",
+ /* operands */
+ : [to] "=r" (t),
+ [tmp] "=&r" (__tmp)
+ : [from]"r" (x),
+ [imm] "I" (__IMM8),
+ "i" (&__pv_offset),
+ "i" (&__pv_phys_offset));
+#endif
return t;
}
-static inline unsigned long __phys_to_virt(unsigned long x)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
{
- unsigned long t;
- __pv_stub(x, t, "sub", __PV_BITS_31_24);
+ unsigned long t, xlo = x;
+ early_patch_imm8("sub", t, xlo, __pv_offset, 0);
return t;
}
+
#else
-#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
+{
+ return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+ return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
#endif
#endif
#endif /* __ASSEMBLY__ */
@@ -222,14 +269,14 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
static inline void *phys_to_virt(phys_addr_t x)
{
- return (void *)(__phys_to_virt((unsigned long)(x)));
+ return (void *)__phys_to_virt(x);
}
/*
* Drivers should NOT use these either.
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
-#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
/*
@@ -279,4 +326,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#include <asm-generic/memory_model.h>
+#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
+
#endif
diff --git a/arch/arm/include/asm/mmzone.h b/arch/arm/include/asm/mmzone.h
new file mode 100644
index 00000000000..628e5035659
--- /dev/null
+++ b/arch/arm/include/asm/mmzone.h
@@ -0,0 +1,49 @@
+/*
+ * Discontiguous memory and NUMA support, based on the PowerPC implementation.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef __ASM_ARM_MMZONE_H_
+#define __ASM_ARM_MMZONE_H_
+#ifdef __KERNEL__
+
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_NUMA_ALLOC_NODES
+#define NODE_DATA(nid) (node_data[nid])
+extern void __init arm_numa_alloc_nodes(unsigned long max_low);
+extern struct pglist_data *node_data[];
+#else
+#define arm_numa_alloc_nodes(_mlow) do {} while (0)
+#endif
+
+#ifdef CONFIG_NUMA
+extern cpumask_var_t *node_to_cpumask_map;
+extern int numa_cpu_lookup_table[];
+extern int pfn_to_nid(unsigned long pfn);
+extern void __init arm_setup_nodes(unsigned long min, unsigned long max_high);
+extern void __init arm_numa_alloc_cpumask(unsigned long max_low);
+#else
+#define pfn_to_nid(pfn) (0)
+#define arm_setup_nodes(min, max_high) memblock_set_node( \
+ __pfn_to_phys(min), \
+ __pfn_to_phys(max_high - min), 0)
+#define arm_numa_alloc_cpumask(max_low) do {} while (0)
+#endif /* CONFIG_NUMA */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_ARM_MMZONE_H_ */
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 0d3a28dbc8e..c4ebe522ca4 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -39,9 +39,16 @@ struct mod_arch_specific {
#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
#endif
+#ifdef CONFIG_ARM_RUNTIME_PATCH
+#define MODULE_ARCH_VERMAGIC_RT_PATCH "rt-patch "
+#else
+#define MODULE_ARCH_VERMAGIC_RT_PATCH ""
+#endif
+
#define MODULE_ARCH_VERMAGIC \
MODULE_ARCH_VERMAGIC_ARMVSN \
MODULE_ARCH_VERMAGIC_ARMTHUMB \
+ MODULE_ARCH_VERMAGIC_RT_PATCH \
MODULE_ARCH_VERMAGIC_P2V
#endif /* _ASM_ARM_MODULE_H */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 812a4944e78..6363f3d1d50 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -13,7 +13,7 @@
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386e..eb1640ed2a5 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -127,6 +127,11 @@
#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
/*
+ * for 2 levels of paging we don't mask off any bits when comparing present ptes
+ */
+#define L_PTE_CMP_MASKOFF 0
+
+/*
* These are the memory types, defined to be compatible with
* pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
*/
@@ -160,7 +165,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
return (pmd_t *)pud;
}
-#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+#define pmd_bad(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_FAULT)
#define copy_pmd(pmdpd,pmdps) \
do { \
@@ -176,11 +181,156 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
clean_pmd_entry(pmdp); \
} while (0)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define _PMD_HUGE(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#else
+#define _PMD_HUGE(pmd) (0)
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
/* we don't need complex calculations here as the pmd is folded into the pgd */
#define pmd_addr_end(addr,end) (end)
+#define pmd_present(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) != PMD_TYPE_FAULT)
+
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+
+#ifdef CONFIG_SYS_SUPPORTS_HUGETLBFS
+
+/*
+ * now follows some of the definitions to allow huge page support, we can't put
+ * these in the hugetlb source files as they are also required for transparent
+ * hugepage support.
+ */
+
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define HUGE_LINUX_PTE_COUNT (PAGE_OFFSET >> HPAGE_SHIFT)
+#define HUGE_LINUX_PTE_SIZE (HUGE_LINUX_PTE_COUNT * sizeof(pte_t *))
+#define HUGE_LINUX_PTE_INDEX(addr) (addr >> HPAGE_SHIFT)
+
+/*
+ * We re-purpose the following domain bits in the section descriptor
+ */
+#define PMD_DSECT_DIRTY (_AT(pmdval_t, 1) << 5)
+#define PMD_DSECT_AF (_AT(pmdval_t, 1) << 6)
+#define PMD_DSECT_SPLITTING (_AT(pmdval_t, 1) << 7)
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+PMD_BIT_FUNC(wrprotect, &= ~PMD_SECT_AP_WRITE);
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ /*
+ * we can sometimes be passed a pmd pointing to a level 2 descriptor
+ * from collapse_huge_page.
+ */
+ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) {
+ pmdp[0] = __pmd(pmd_val(pmd));
+ pmdp[1] = __pmd(pmd_val(pmd) + 256 * sizeof(pte_t));
+ } else {
+ pmdp[0] = __pmd(pmd_val(pmd)); /* first 1M section */
+ pmdp[1] = __pmd(pmd_val(pmd) + SECTION_SIZE); /* second 1M section */
+ }
+
+ flush_pmd_entry(pmdp);
+}
+
+#define HPMD_XLATE(res, cmp, from, to) do { if (cmp & from) res |= to; \
+ else res &= ~to; \
+ } while (0)
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ pmdval_t pmdval = pmd_val(pmd);
+ pteval_t newprotval = pgprot_val(newprot);
+
+ HPMD_XLATE(pmdval, newprotval, L_PTE_XN, PMD_SECT_XN);
+ HPMD_XLATE(pmdval, newprotval, L_PTE_SHARED, PMD_SECT_S);
+ HPMD_XLATE(pmdval, newprotval, L_PTE_YOUNG, PMD_DSECT_AF);
+ HPMD_XLATE(pmdval, newprotval, L_PTE_DIRTY, PMD_DSECT_DIRTY);
+
+ /* preserve bits C & B */
+ pmdval |= (newprotval & (3 << 2));
+
+ /* Linux PTE bit 4 corresponds to PMD TEX bit 0 */
+ HPMD_XLATE(pmdval, newprotval, 1 << 4, PMD_SECT_TEX(1));
+
+ if (newprotval & L_PTE_RDONLY)
+ pmdval &= ~PMD_SECT_AP_WRITE;
+ else
+ pmdval |= PMD_SECT_AP_WRITE;
+
+ return __pmd(pmdval);
+}
+
+#else
+#define HPAGE_SIZE 0
+#endif /* CONFIG_SYS_SUPPORTS_HUGETLBFS */
+
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_mkhuge(pmd) (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+PMD_BIT_FUNC(mkold, &= ~PMD_DSECT_AF);
+PMD_BIT_FUNC(mksplitting, |= PMD_DSECT_SPLITTING);
+PMD_BIT_FUNC(mkdirty, |= PMD_DSECT_DIRTY);
+PMD_BIT_FUNC(mkyoung, |= PMD_DSECT_AF);
+PMD_BIT_FUNC(mkwrite, |= PMD_SECT_AP_WRITE);
+PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
+
+#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_DSECT_SPLITTING)
+#define pmd_young(pmd) (pmd_val(pmd) & PMD_DSECT_AF)
+#define pmd_write(pmd) (pmd_val(pmd) & PMD_SECT_AP_WRITE)
+#define pmd_trans_huge(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ /*
+ * for a section, we need to mask off more of the pmd
+ * before looking up the pfn
+ */
+ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+ return __phys_to_pfn(pmd_val(pmd) & HPAGE_MASK);
+ else
+ return __phys_to_pfn(pmd_val(pmd) & PHYS_MASK);
+}
+
+static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
+{
+ pmd_t pmd = __pmd(__pfn_to_phys(pfn) | PMD_SECT_AP_READ | PMD_SECT_nG);
+
+ return pmd_modify(pmd, prot);
+}
+
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot);
+
+static inline int has_transparent_hugepage(void)
+{
+ return 1;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ /*
+ * for a section, we need to mask off more of the pmd
+ * before looking up the page as it is a section descriptor.
+ */
+ if (_PMD_HUGE(pmd))
+ return phys_to_page(pmd_val(pmd) & HPAGE_MASK);
+
+ return phys_to_page(pmd_val(pmd) & PHYS_MASK);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index d7952824c5c..bf1707f0239 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -38,6 +38,8 @@
*/
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
@@ -74,4 +76,14 @@
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+#if defined CONFIG_VMSPLIT_2G
+#define TTBR1_OFFSET (1 << 4) /* skip two L1 entries */
+#elif defined CONFIG_VMSPLIT_3G
+#define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */
+#else
+#define TTBR1_OFFSET 0
+#endif
+
+#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+
#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a3f37929940..3c2432fe5e6 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -48,20 +48,28 @@
#define PMD_SHIFT 21
#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
+#define PMD_MASK (~((1 << PMD_SHIFT) - 1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
/*
* section address mask and size definitions.
*/
#define SECTION_SHIFT 21
#define SECTION_SIZE (1UL << SECTION_SHIFT)
-#define SECTION_MASK (~(SECTION_SIZE-1))
+#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
* "Linux" PTE definitions for LPAE.
*
* These bits overlap with the hardware bits but the naming is preserved for
@@ -79,6 +87,9 @@
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
+#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57)
+
/*
* To be used in assembly code with the upper page attributes.
*/
@@ -86,6 +97,11 @@
#define L_PTE_DIRTY_HIGH (1 << (55 - 32))
/*
+ * we need to mask off PTE_EXT_NG when comparing present ptes.
+ */
+#define L_PTE_CMP_MASKOFF PTE_EXT_NG
+
+/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
*/
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */
@@ -150,6 +166,67 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+#define pte_huge(pte) ((pte_val(pte) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+
+#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+
+#define pmd_present(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) != PMD_TYPE_FAULT)
+#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+#endif
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
+PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
+
+#define pmd_mkhuge(pmd) (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+#define pmd_pfn(pmd) ((pmd_val(pmd) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY;
+ pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+ return pmd;
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ BUG_ON(addr >= TASK_SIZE);
+ pmd = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+ set_pmd(pmdp, pmd);
+ flush_pmd_entry(pmdp);
+}
+
+static inline int has_transparent_hugepage(void)
+{
+ return 1;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9c82f988c0e..529ceae8d98 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -24,6 +24,9 @@
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
+
+#include <asm/tlbflush.h>
+
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level.h>
#else
@@ -163,15 +166,17 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_present(pmd) (pmd_val(pmd))
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
+#ifdef SYS_SUPPORTS_HUGETLBFS
+ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+ return __va(pmd_val(pmd) & HPAGE_MASK);
+#endif
+
return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
}
-#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-
#ifndef CONFIG_HIGHPTE
#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
#define __pte_unmap(pte) do { } while (0)
@@ -246,6 +251,29 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
}
/*
+ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
+ * that are written to a page table but not for ptes created with mk_pte.
+ *
+ * This can cause some comparison tests made by pte_same to fail spuriously and
+ * lead to other problems.
+ *
+ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
+ * present before running the comparison.
+ */
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ pteval_t vala = pte_val(pte_a), valb = pte_val(pte_b);
+ if (pte_present(pte_a))
+ vala &= ~L_PTE_CMP_MASKOFF;
+
+ if (pte_present(pte_b))
+ valb &= ~L_PTE_CMP_MASKOFF;
+
+ return vala == valb;
+}
+
+/*
* Encode and decode a swap entry. Swap entries are stored in the Linux
* page tables as follows:
*
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index f3628fb3d2b..2d270b8c6e1 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -60,7 +60,7 @@ extern struct processor {
/*
* Set the page table
*/
- void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
+ void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
/*
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
@@ -82,7 +82,7 @@ extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
-extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
#ifdef CONFIG_ARM_LPAE
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
#else
@@ -116,13 +116,27 @@ extern void cpu_resume(void);
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#ifdef CONFIG_ARM_LPAE
+
+#define cpu_get_ttbr(nr) \
+ ({ \
+ u64 ttbr; \
+ __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
+ : "=r" (ttbr) \
+ : : ); \
+ ttbr; \
+ })
+
+#define cpu_set_ttbr(nr, val) \
+ do { \
+ u64 ttbr = val; \
+ __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \
+ : : "r" (ttbr) \
+ : "cc"); \
+ } while (0)
+
#define cpu_get_pgd() \
({ \
- unsigned long pg, pg2; \
- __asm__("mrrc p15, 0, %0, %1, c2" \
- : "=r" (pg), "=r" (pg2) \
- : \
- : "cc"); \
+ u64 pg = cpu_get_ttbr(0); \
pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
(pgd_t *)phys_to_virt(pg); \
})
diff --git a/arch/arm/include/asm/runtime-patch.h b/arch/arm/include/asm/runtime-patch.h
new file mode 100644
index 00000000000..366444d7eef
--- /dev/null
+++ b/arch/arm/include/asm/runtime-patch.h
@@ -0,0 +1,208 @@
+/*
+ * arch/arm/include/asm/runtime-patch.h
+ * Note: this file should not be included by non-asm/.h files
+ *
+ * Copyright 2012 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARM_RUNTIME_PATCH_H
+#define __ASM_ARM_RUNTIME_PATCH_H
+
+#include <linux/stringify.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARM_RUNTIME_PATCH
+
+struct patch_info {
+ void *insn;
+ u16 type;
+ u8 insn_size;
+ u8 data_size;
+ u32 data[0];
+};
+
+#define PATCH_IMM8 0x0001
+struct patch_info_imm8 {
+ u32 *imm;
+ u32 insn;
+};
+
+#define patch_next(p) ((void *)(p) + sizeof(*(p)) + (p)->data_size)
+#define patch_data(p) ((void *)&(p)->data[0])
+
+#define patch_stub(type, code, patch_data, ...) \
+ __asm__("@ patch stub\n" \
+ "1:\n" \
+ code \
+ "2:\n" \
+ " .pushsection .runtime.patch.table, \"a\"\n" \
+ "3:\n" \
+ " .word 1b\n" \
+ " .hword (" __stringify(type) ")\n" \
+ " .byte (2b-1b)\n" \
+ " .byte (5f-4f)\n" \
+ "4:\n" \
+ patch_data \
+ " .align\n" \
+ "5:\n" \
+ " .popsection\n" \
+ __VA_ARGS__)
+
+#define early_patch_stub(type, code, pad, patch_data, ...) \
+ __asm__("@ patch stub\n" \
+ "1:\n" \
+ " b 6f\n" \
+ " .fill " __stringify(pad) ", 1, 0\n" \
+ "2:\n" \
+ " .pushsection .runtime.patch.table, \"a\"\n" \
+ "3:\n" \
+ " .word 1b\n" \
+ " .hword (" __stringify(type) ")\n" \
+ " .byte (2b-1b)\n" \
+ " .byte (5f-4f)\n" \
+ "4:\n" \
+ patch_data \
+ " .align\n" \
+ "5:\n" \
+ " .popsection\n" \
+ " .pushsection .runtime.patch.code, \"ax\"\n" \
+ "6:\n" \
+ code \
+ " b 2b\n" \
+ " .popsection\n" \
+ __VA_ARGS__)
+
+/* constant used to force encoding */
+#define __IMM8 (0x81 << 24)
+
+/*
+ * patch_imm8() - init-time specialized binary operation (imm8 operand)
+ * This effectively does: to = from "insn" sym,
+ * where the value of sym is fixed at init-time, and is patched
+ * in as an immediate operand. This value must be
+ * representible as an 8-bit quantity with an optional
+ * rotation.
+ *
+ * The stub code produced by this variant is non-functional
+ * prior to patching. Use early_patch_imm8() if you need the
+ * code to be functional early on in the init sequence.
+ */
+#define patch_imm8(_insn, _to, _from, _sym, _ofs) \
+ patch_stub( \
+ /* type */ \
+ PATCH_IMM8, \
+ /* code */ \
+ _insn " %[to], %[from], %[imm]\n", \
+ /* patch_data */ \
+ ".long " __stringify(_sym + _ofs) "\n" \
+ _insn " %[to], %[from], %[imm]\n", \
+ /* operands */ \
+ : [to] "=r" (_to) \
+ : [from] "r" (_from), \
+ [imm] "I" (__IMM8), \
+ "i" (&(_sym)) \
+ : "cc")
+
+/*
+ * patch_imm8_mov() - same as patch_imm8(), but for mov/mvn instructions
+ */
+#define patch_imm8_mov(_insn, _to, _sym, _ofs) \
+ patch_stub( \
+ /* type */ \
+ PATCH_IMM8, \
+ /* code */ \
+ _insn " %[to], %[imm]\n", \
+ /* patch_data */ \
+ ".long " __stringify(_sym + _ofs) "\n" \
+ _insn " %[to], %[imm]\n", \
+ /* operands */ \
+ : [to] "=r" (_to) \
+ : [imm] "I" (__IMM8), \
+ "i" (&(_sym)) \
+ : "cc")
+
+/*
+ * early_patch_imm8() - early functional variant of patch_imm8() above. The
+ * same restrictions on the constant apply here. This
+ * version emits workable (albeit inefficient) code at
+ * compile-time, and therefore functions even prior to
+ * patch application.
+ */
+#define early_patch_imm8(_insn, _to, _from, _sym, _ofs) \
+do { \
+ unsigned long __tmp; \
+ early_patch_stub( \
+ /* type */ \
+ PATCH_IMM8, \
+ /* code */ \
+ "ldr %[tmp], =" __stringify(_sym + _ofs) "\n"\
+ "ldr %[tmp], [%[tmp]]\n" \
+ _insn " %[to], %[from], %[tmp]\n", \
+ /* pad */ \
+ 0, \
+ /* patch_data */ \
+ ".long " __stringify(_sym + _ofs) "\n" \
+ _insn " %[to], %[from], %[imm]\n", \
+ /* operands */ \
+ : [to] "=r" (_to), \
+ [tmp] "=&r" (__tmp) \
+ : [from] "r" (_from), \
+ [imm] "I" (__IMM8), \
+ "i" (&(_sym)) \
+ : "cc"); \
+} while (0)
+
+#define early_patch_imm8_mov(_insn, _to, _sym, _ofs) \
+do { \
+ unsigned long __tmp; \
+ early_patch_stub( \
+ /* type */ \
+ PATCH_IMM8 \
+ /* code */ \
+ "ldr %[tmp], =" __stringify(_sym + _ofs) "\n"\
+ "ldr %[tmp], [%[tmp]]\n" \
+ _insn " %[to], %[tmp]\n", \
+ /* pad */ \
+ 0, \
+ /* patch_data */ \
+ ".long " __stringify(_sym + _ofs) "\n" \
+ _insn " %[to], %[imm]\n", \
+ /* operands */ \
+ : [to] "=r" (_to), \
+ [tmp] "=&r" (__tmp) \
+ : [imm] "I" (__IMM8), \
+ "i" (&(_sym)) \
+ : "cc"); \
+} while (0)
+
+int runtime_patch(const void *table, unsigned size);
+void runtime_patch_kernel(void);
+
+#else
+
+static inline int runtime_patch(const void *table, unsigned size)
+{
+ return 0;
+}
+
+static inline void runtime_patch_kernel(void)
+{
+}
+
+#endif /* CONFIG_ARM_RUNTIME_PATCH */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARM_RUNTIME_PATCH_H */
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 99a19512ee2..0fc2d9d3503 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -92,10 +92,16 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
if (!tlb->fullmm) {
+ unsigned long size = PAGE_SIZE;
+
if (addr < tlb->range_start)
tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
+
+ if (tlb->vma && is_vm_hugetlb_page(tlb->vma))
+ size = HPAGE_SIZE;
+
+ if (addr + size > tlb->range_end)
+ tlb->range_end = addr + size;
}
}
@@ -223,6 +229,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
#endif
}
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+ tlb_add_flush(tlb, addr);
+}
+
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 6e924d3a77e..907cede92e8 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -505,6 +505,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
}
#endif
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
#endif
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 983fa7c153a..5357eb195d1 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -68,6 +68,21 @@ static inline int cluster_to_logical_mask(unsigned int socket_id,
#endif
+#ifdef CONFIG_NUMA
+
+static inline int cpu_to_node(int cpu)
+{
+ return numa_cpu_lookup_table[cpu];
+}
+
+#define cpumask_of_node(node) ((node) == -1 ? \
+ cpu_all_mask : \
+ node_to_cpumask_map[node])
+
+#define parent_node(node) (node)
+
+#endif /* CONFIG_NUMA */
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 21a2700d295..16f45f673f6 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -15,7 +15,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls (380)
+#define __NR_syscalls (384)
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#define __ARCH_WANT_STAT64
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 4da7cde70b5..3d97a64aede 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -406,6 +406,7 @@
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
/* 378 for kcmp */
#define __NR_finit_module (__NR_SYSCALL_BASE+379)
+#define __NR_migrate_pages (__NR_SYSCALL_BASE+380)
/*
* This may need to be greater than __NR_last_syscall+1 in order to