summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorFlora Cui <flora.cui@amd.com>2020-08-28 10:29:11 +0800
committerFlora Cui <Flora.Cui@amd.com>2020-08-30 21:57:10 -0400
commit78361ef6c29737441b37396f0fe4d61ed0ba871f (patch)
treee3a6f6579e0979672f22748b8e5649560c0cb0a9 /include
parent570ea696a9f176d4107bc92973999f5f77c45c5f (diff)
drm/amdkcl: split vmf stuf to kcl_memory.h
no actual code change. Signed-off-by: Flora Cui <flora.cui@amd.com> Reviewed-by: Yang Xiong <Yang.Xiong@amd.com>
Diffstat (limited to 'include')
-rw-r--r--include/kcl/kcl_memory.h61
-rw-r--r--include/kcl/kcl_mm.h56
2 files changed, 62 insertions, 55 deletions
diff --git a/include/kcl/kcl_memory.h b/include/kcl/kcl_memory.h
new file mode 100644
index 000000000000..d7b6210d3565
--- /dev/null
+++ b/include/kcl/kcl_memory.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _KCL_KCL_MEMORY_H
+#define _KCL_KCL_MEMORY_H
+
+#ifndef HAVE_VMF_INSERT
+static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
+ unsigned long addr,
+ pfn_t pfn)
+{
+ int err;
+#if !defined(HAVE_PFN_T_VM_INSERT_MIXED)
+ err = vm_insert_mixed(vma, addr, pfn_t_to_pfn(pfn));
+#else
+ err = vm_insert_mixed(vma, addr, pfn);
+#endif
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ int err = vm_insert_pfn(vma, addr, pfn);
+
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+#endif /* HAVE_VMF_INSERT */
+
+#ifndef HAVE_VMF_INSERT_MIXED_PROT
+vm_fault_t _kcl_vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
+ pfn_t pfn, pgprot_t pgprot);
+static inline
+vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
+ pfn_t pfn, pgprot_t pgprot)
+{
+ return _kcl_vmf_insert_mixed_prot(vma, addr, pfn, pgprot);
+}
+#endif /* HAVE_VMF_INSERT_MIXED_PROT */
+
+#ifndef HAVE_VMF_INSERT_PFN_PROT
+vm_fault_t _kcl_vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot);
+static inline
+vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot)
+{
+ return _kcl_vmf_insert_pfn_prot(vma, addr, pfn, pgprot);
+}
+#endif /* HAVE_VMF_INSERT_PFN_PROT */
+
+#endif
diff --git a/include/kcl/kcl_mm.h b/include/kcl/kcl_mm.h
index 63bc6f157552..03b1426a9a7c 100644
--- a/include/kcl/kcl_mm.h
+++ b/include/kcl/kcl_mm.h
@@ -9,6 +9,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <kcl/kcl_mm_types.h>
+#include <kcl/kcl_memory.h>
#ifndef untagged_addr
#define untagged_addr(addr) (addr)
@@ -128,59 +129,4 @@ static inline unsigned long zone_managed_pages(struct zone *zone)
}
#endif /* HAVE_ZONE_MANAGED_PAGES */
-#ifndef HAVE_VMF_INSERT
-static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
- unsigned long addr,
- pfn_t pfn)
-{
- int err;
-#if !defined(HAVE_PFN_T_VM_INSERT_MIXED)
- err = vm_insert_mixed(vma, addr, pfn_t_to_pfn(pfn));
-#else
- err = vm_insert_mixed(vma, addr, pfn);
-#endif
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- if (err < 0 && err != -EBUSY)
- return VM_FAULT_SIGBUS;
-
- return VM_FAULT_NOPAGE;
-}
-
-static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
-{
- int err = vm_insert_pfn(vma, addr, pfn);
-
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- if (err < 0 && err != -EBUSY)
- return VM_FAULT_SIGBUS;
-
- return VM_FAULT_NOPAGE;
-}
-
-#endif /* HAVE_VMF_INSERT */
-
-#ifndef HAVE_VMF_INSERT_MIXED_PROT
-vm_fault_t _kcl_vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn, pgprot_t pgprot);
-static inline
-vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn, pgprot_t pgprot)
-{
- return _kcl_vmf_insert_mixed_prot(vma, addr, pfn, pgprot);
-}
-#endif /* HAVE_VMF_INSERT_MIXED_PROT */
-
-#ifndef HAVE_VMF_INSERT_PFN_PROT
-vm_fault_t _kcl_vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, pgprot_t pgprot);
-static inline
-vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, pgprot_t pgprot)
-{
- return _kcl_vmf_insert_pfn_prot(vma, addr, pfn, pgprot);
-}
-#endif /* HAVE_VMF_INSERT_PFN_PROT */
#endif /* AMDKCL_MM_H */