diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 7 | ||||
-rw-r--r-- | lib/Kconfig.debug | 16 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/klist.c | 41 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 38 | ||||
-rw-r--r-- | lib/pci_iomap.c | 66 | ||||
-rw-r--r-- | lib/scatterlist.c | 4 | ||||
-rw-r--r-- | lib/sg_split.c | 202 | ||||
-rw-r--r-- | lib/vsprintf.c | 1 |
9 files changed, 344 insertions, 32 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 278890dd1049..a16555281d53 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -511,6 +511,13 @@ config UCS2_STRING source "lib/fonts/Kconfig" +config SG_SPLIT + def_bool n + help + Provides a heler to split scatterlists into chunks, each chunk being a + scatterlist. This should be selected by a driver or an API which + whishes to split a scatterlist amongst multiple DMA channel. + # # sg chaining option # diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e2894b23efb6..3e0b662cae09 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1353,20 +1353,6 @@ config RCU_CPU_STALL_TIMEOUT RCU grace period persists, additional CPU stall warnings are printed at more widely spaced intervals. -config RCU_CPU_STALL_INFO - bool "Print additional diagnostics on RCU CPU stall" - depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL - default y - help - For each stalled CPU that is aware of the current RCU grace - period, print out additional per-CPU diagnostic information - regarding scheduling-clock ticks, idle state, and, - for RCU_FAST_NO_HZ kernels, idle-entry state. - - Say N if you are unsure. - - Say Y if you want to enable such diagnostics. - config RCU_TRACE bool "Enable tracing for RCU" depends on DEBUG_KERNEL @@ -1379,7 +1365,7 @@ config RCU_TRACE Say N if you are unsure. config RCU_EQS_DEBUG - bool "Use this when adding any sort of NO_HZ support to your arch" + bool "Provide debugging asserts for adding NO_HZ support to an arch" depends on DEBUG_KERNEL help This option provides consistency checks in RCU's handling of diff --git a/lib/Makefile b/lib/Makefile index 0d4b30fb60e6..f2610061bfa4 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -158,6 +158,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o +obj-$(CONFIG_SG_SPLIT) += sg_split.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ diff --git a/lib/klist.c b/lib/klist.c index 89b485a2a58d..d74cf7a29afd 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -324,6 +324,47 @@ static struct klist_node *to_klist_node(struct list_head *n) } /** + * klist_prev - Ante up prev node in list. + * @i: Iterator structure. + * + * First grab list lock. Decrement the reference count of the previous + * node, if there was one. Grab the prev node, increment its reference + * count, drop the lock, and return that prev node. + */ +struct klist_node *klist_prev(struct klist_iter *i) +{ + void (*put)(struct klist_node *) = i->i_klist->put; + struct klist_node *last = i->i_cur; + struct klist_node *prev; + + spin_lock(&i->i_klist->k_lock); + + if (last) { + prev = to_klist_node(last->n_node.prev); + if (!klist_dec_and_del(last)) + put = NULL; + } else + prev = to_klist_node(i->i_klist->k_list.prev); + + i->i_cur = NULL; + while (prev != to_klist_node(&i->i_klist->k_list)) { + if (likely(!knode_dead(prev))) { + kref_get(&prev->n_ref); + i->i_cur = prev; + break; + } + prev = to_klist_node(prev->n_node.prev); + } + + spin_unlock(&i->i_klist->k_lock); + + if (put && last) + put(last); + return i->i_cur; +} +EXPORT_SYMBOL_GPL(klist_prev); + +/** * klist_next - Ante up next node in list. * @i: Iterator structure. * diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index bc0a1da8afba..95c52a95259e 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -146,18 +146,25 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, uint8_t *p; mpi_limb_t alimb; unsigned int n = mpi_get_size(a); - int i; + int i, lzeros = 0; - if (buf_len < n || !buf) + if (buf_len < n || !buf || !nbytes) return -EINVAL; if (sign) *sign = a->sign; - if (nbytes) - *nbytes = n; + p = (void *)&a->d[a->nlimbs] - 1; + + for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) { + if (!*p) + lzeros++; + else + break; + } p = buf; + *nbytes = n - lzeros; for (i = a->nlimbs - 1; i >= 0; i--) { alimb = a->d[i]; @@ -178,6 +185,19 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, #else #error please implement for this limb size. #endif + + if (lzeros > 0) { + if (lzeros >= sizeof(alimb)) { + p -= sizeof(alimb); + } else { + mpi_limb_t *limb1 = (void *)p - sizeof(alimb); + mpi_limb_t *limb2 = (void *)p - sizeof(alimb) + + lzeros; + *limb1 = *limb2; + p -= lzeros; + } + lzeros -= sizeof(alimb); + } } return 0; } @@ -197,7 +217,7 @@ EXPORT_SYMBOL_GPL(mpi_read_buffer); */ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) { - uint8_t *buf, *p; + uint8_t *buf; unsigned int n; int ret; @@ -220,14 +240,6 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) kfree(buf); return NULL; } - - /* this is sub-optimal but we need to do the shift operation - * because the caller has to free the returned buffer */ - for (p = buf; !*p && *nbytes; p++, --*nbytes) - ; - if (p != buf) - memmove(buf, p, *nbytes); - return buf; } EXPORT_SYMBOL_GPL(mpi_get_buffer); diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c index bcce5f149310..5f5d24d1d53f 100644 --- a/lib/pci_iomap.c +++ b/lib/pci_iomap.c @@ -52,6 +52,51 @@ void __iomem *pci_iomap_range(struct pci_dev *dev, EXPORT_SYMBOL(pci_iomap_range); /** + * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @offset: map memory at the given offset in BAR + * @maxlen: max length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. When possible write combining + * is used. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR from offset to the end, pass %0 here. + * */ +void __iomem *pci_iomap_wc_range(struct pci_dev *dev, + int bar, + unsigned long offset, + unsigned long maxlen) +{ + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + + if (flags & IORESOURCE_IO) + return NULL; + + if (len <= offset || !start) + return NULL; + + len -= offset; + start += offset; + if (maxlen && len > maxlen) + len = maxlen; + + if (flags & IORESOURCE_MEM) + return ioremap_wc(start, len); + + /* What? */ + return NULL; +} +EXPORT_SYMBOL_GPL(pci_iomap_wc_range); + +/** * pci_iomap - create a virtual mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number @@ -70,4 +115,25 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) return pci_iomap_range(dev, bar, 0, maxlen); } EXPORT_SYMBOL(pci_iomap); + +/** + * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @maxlen: length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. When possible write combining + * is used. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR without checking for its length first, pass %0 here. + * */ +void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_wc_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL_GPL(pci_iomap_wc); #endif /* CONFIG_PCI */ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d105a9f56878..bafa9933fa76 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -105,16 +105,12 @@ EXPORT_SYMBOL(sg_nents_for_len); **/ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) { -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - struct scatterlist *ret = &sgl[nents - 1]; -#else struct scatterlist *sg, *ret = NULL; unsigned int i; for_each_sg(sgl, sg, nents, i) ret = sg; -#endif #ifdef CONFIG_DEBUG_SG BUG_ON(sgl[0].sg_magic != SG_MAGIC); BUG_ON(!sg_is_last(ret)); diff --git a/lib/sg_split.c b/lib/sg_split.c new file mode 100644 index 000000000000..b063410c3593 --- /dev/null +++ b/lib/sg_split.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr> + * + * Scatterlist splitting helpers. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/scatterlist.h> +#include <linux/slab.h> + +struct sg_splitter { + struct scatterlist *in_sg0; + int nents; + off_t skip_sg0; + unsigned int length_last_sg; + + struct scatterlist *out_sg; +}; + +static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits, + off_t skip, const size_t *sizes, + struct sg_splitter *splitters, bool mapped) +{ + int i; + unsigned int sglen; + size_t size = sizes[0], len; + struct sg_splitter *curr = splitters; + struct scatterlist *sg; + + for (i = 0; i < nb_splits; i++) { + splitters[i].in_sg0 = NULL; + splitters[i].nents = 0; + } + + for_each_sg(in, sg, nents, i) { + sglen = mapped ? sg_dma_len(sg) : sg->length; + if (skip > sglen) { + skip -= sglen; + continue; + } + + len = min_t(size_t, size, sglen - skip); + if (!curr->in_sg0) { + curr->in_sg0 = sg; + curr->skip_sg0 = skip; + } + size -= len; + curr->nents++; + curr->length_last_sg = len; + + while (!size && (skip + len < sglen) && (--nb_splits > 0)) { + curr++; + size = *(++sizes); + skip += len; + len = min_t(size_t, size, sglen - skip); + + curr->in_sg0 = sg; + curr->skip_sg0 = skip; + curr->nents = 1; + curr->length_last_sg = len; + size -= len; + } + skip = 0; + + if (!size && --nb_splits > 0) { + curr++; + size = *(++sizes); + } + + if (!nb_splits) + break; + } + + return (size || !splitters[0].in_sg0) ? -EINVAL : 0; +} + +static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits) +{ + int i, j; + struct scatterlist *in_sg, *out_sg; + struct sg_splitter *split; + + for (i = 0, split = splitters; i < nb_splits; i++, split++) { + in_sg = split->in_sg0; + out_sg = split->out_sg; + for (j = 0; j < split->nents; j++, out_sg++) { + *out_sg = *in_sg; + if (!j) { + out_sg->offset += split->skip_sg0; + out_sg->length -= split->skip_sg0; + } else { + out_sg->offset = 0; + } + sg_dma_address(out_sg) = 0; + sg_dma_len(out_sg) = 0; + in_sg = sg_next(in_sg); + } + out_sg[-1].length = split->length_last_sg; + sg_mark_end(out_sg - 1); + } +} + +static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits) +{ + int i, j; + struct scatterlist *in_sg, *out_sg; + struct sg_splitter *split; + + for (i = 0, split = splitters; i < nb_splits; i++, split++) { + in_sg = split->in_sg0; + out_sg = split->out_sg; + for (j = 0; j < split->nents; j++, out_sg++) { + sg_dma_address(out_sg) = sg_dma_address(in_sg); + sg_dma_len(out_sg) = sg_dma_len(in_sg); + if (!j) { + sg_dma_address(out_sg) += split->skip_sg0; + sg_dma_len(out_sg) -= split->skip_sg0; + } + in_sg = sg_next(in_sg); + } + sg_dma_len(--out_sg) = split->length_last_sg; + } +} + +/** + * sg_split - split a scatterlist into several scatterlists + * @in: the input sg list + * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped. + * @skip: the number of bytes to skip in the input sg list + * @nb_splits: the number of desired sg outputs + * @split_sizes: the respective size of each output sg list in bytes + * @out: an array where to store the allocated output sg lists + * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might + * be NULL if sglist not already mapped (in_mapped_nents = 0) + * @gfp_mask: the allocation flag + * + * This function splits the input sg list into nb_splits sg lists, which are + * allocated and stored into out. + * The @in is split into : + * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in + * - @out[1], which covers bytes [@skip + split_sizes[0] .. + * @skip + @split_sizes[0] + @split_sizes[1] -1] + * etc ... + * It will be the caller's duty to kfree() out array members. + * + * Returns 0 upon success, or error code + */ +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask) +{ + int i, ret; + struct sg_splitter *splitters; + + splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask); + if (!splitters) + return -ENOMEM; + + ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes, + splitters, false); + if (ret < 0) + goto err; + + ret = -ENOMEM; + for (i = 0; i < nb_splits; i++) { + splitters[i].out_sg = kmalloc_array(splitters[i].nents, + sizeof(struct scatterlist), + gfp_mask); + if (!splitters[i].out_sg) + goto err; + } + + /* + * The order of these 3 calls is important and should be kept. + */ + sg_split_phys(splitters, nb_splits); + ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip, + split_sizes, splitters, true); + if (ret < 0) + goto err; + sg_split_mapped(splitters, nb_splits); + + for (i = 0; i < nb_splits; i++) { + out[i] = splitters[i].out_sg; + if (out_mapped_nents) + out_mapped_nents[i] = splitters[i].nents; + } + + kfree(splitters); + return 0; + +err: + for (i = 0; i < nb_splits; i++) + kfree(splitters[i].out_sg); + kfree(splitters); + return ret; +} +EXPORT_SYMBOL(sg_split); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index da39c608a28c..95cd63b43b99 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -17,6 +17,7 @@ */ #include <stdarg.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/module.h> /* for KSYM_SYMBOL_LEN */ #include <linux/types.h> |