summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/agp.h27
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/page.h18
-rw-r--r--arch/ia64/include/asm/pgtable.h31
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/process.c1
-rw-r--r--arch/ia64/kernel/sys_ia64.c7
-rw-r--r--arch/ia64/kernel/time.c1
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/mm/init.c8
11 files changed, 48 insertions, 51 deletions
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index f994c1daf9d4..aefae2efde9f 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
+generic-y += agp.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += vtime.h
diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h
deleted file mode 100644
index 0261507dc264..000000000000
--- a/arch/ia64/include/asm/agp.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_IA64_AGP_H
-#define _ASM_IA64_AGP_H
-
-/*
- * IA-64 specific AGP definitions.
- *
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-/*
- * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
- * in coherent mode, which lets us map the AGP memory as normal (write-back) memory
- * (unlike x86, where it gets mapped "write-coalescing").
- */
-#define map_page_into_agp(page) do { } while (0)
-#define unmap_page_from_agp(page) do { } while (0)
-#define flush_agp_cache() mb()
-
-/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-#define alloc_gatt_pages(order) \
- ((char *)__get_free_pages(GFP_KERNEL, (order)))
-#define free_gatt_pages(table, order) \
- free_pages((unsigned long)(table), (order))
-
-#endif /* _ASM_IA64_AGP_H */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index a5d9d788eede..af6fa8e1597c 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -8,7 +8,7 @@
*/
extern const struct dma_map_ops *dma_ops;
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+static inline const struct dma_map_ops *get_arch_dma_ops(void)
{
return dma_ops;
}
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 1b990466d540..310b09c3342d 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -82,25 +82,19 @@ do { \
} while (0)
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
({ \
- struct page *page = alloc_page_vma( \
- GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
- if (page) \
- flush_dcache_page(page); \
- page; \
+ struct folio *folio = vma_alloc_folio( \
+ GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
+ if (folio) \
+ flush_dcache_folio(folio); \
+ folio; \
})
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
-
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
-#ifdef CONFIG_FLATMEM
-# define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif
-
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 01517a5e6778..21c97e31a28a 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -58,6 +58,9 @@
#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE (1 << 7)
+
#define _PFN_MASK _PAGE_PPN_MASK
/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
@@ -399,6 +402,9 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
* Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
* bits in the swap-type field of the swap pte. It would be nice to
* enforce that, but we can't easily include <linux/swap.h> here.
@@ -406,16 +412,35 @@ extern void paging_init (void);
*
* Format of swap pte:
* bit 0 : present bit (must be zero)
- * bits 1- 7: swap-type
+ * bits 1- 6: swap type
+ * bit 7 : exclusive marker
* bits 8-62: swap offset
* bit 63 : _PAGE_PROTNONE bit
*/
-#define __swp_type(entry) (((entry).val >> 1) & 0x7f)
+#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
#define __swp_offset(entry) (((entry).val << 1) >> 9)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) })
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type & 0x3f) << 1) | \
+ ((long) (offset) << 8) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 31149e41f9be..21dfa4aa35bb 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -525,7 +525,7 @@ efi_init (void)
*/
if (efi_systab == NULL)
panic("Whoa! Can't find EFI system table.\n");
- if (efi_systab_check_header(&efi_systab->hdr, 1))
+ if (efi_systab_check_header(&efi_systab->hdr))
panic("Whoa! EFI system table signature incorrect\n");
efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 416305e550e2..f6195a0a00ae 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -242,6 +242,7 @@ void arch_cpu_idle(void)
(*mark_idle)(1);
raw_safe_halt();
+ raw_local_irq_disable();
if (mark_idle)
(*mark_idle)(0);
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index f6a502e8f02c..6e948d015332 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -170,6 +170,9 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u
asmlinkage long
ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
{
+ struct timespec64 rtn_tp;
+ s64 tick_ns;
+
/*
* ia64's clock_gettime() syscall is implemented as a vdso call
* fsys_clock_gettime(). Currently it handles only
@@ -185,8 +188,8 @@ ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *
switch (which_clock) {
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
- s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
- struct timespec64 rtn_tp = ns_to_timespec64(tick_ns);
+ tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
+ rtn_tp = ns_to_timespec64(tick_ns);
return put_timespec64(&rtn_tp, tp);
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index fa9c0ab8c6fc..83ef044b63ef 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/sched/cputime.h>
+#include <asm/cputime.h>
#include <asm/delay.h>
#include <asm/efi.h>
#include <asm/hw_irq.h>
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 9b265783be6a..53dfde161c8a 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -51,7 +51,6 @@ SECTIONS {
__end_ivt_text = .;
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index fc4e4217e87f..7f5353e28516 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -109,7 +109,7 @@ ia64_init_addr_space (void)
vma_set_anonymous(vma);
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
- vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+ vm_flags_init(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
@@ -127,8 +127,8 @@ ia64_init_addr_space (void)
vma_set_anonymous(vma);
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
- vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
- VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_init(vma, VM_READ | VM_MAYREAD | VM_IO |
+ VM_DONTEXPAND | VM_DONTDUMP);
mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
mmap_write_unlock(current->mm);
@@ -272,7 +272,7 @@ static int __init gate_vma_init(void)
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+ vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
return 0;