diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-12 12:39:21 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-12 12:39:21 +0100 |
commit | 708b8eae0fd532af73ea8350e6dcc10255ff7376 (patch) | |
tree | f336436934fd79bc91aff7112a9beb10bc4e839f /include/linux | |
parent | d98d38f2014ab79f28c126ff175d034891f7aefc (diff) | |
parent | f21f237cf55494c3a4209de323281a3b0528da10 (diff) |
Merge branch 'linus' into core/locking
Diffstat (limited to 'include/linux')
39 files changed, 976 insertions, 355 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h index 1c91a176b9a..6a642098e5c 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -236,12 +236,16 @@ static inline void *bio_data(struct bio *bio) #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) +/* Default implementation of BIOVEC_PHYS_MERGEABLE */ +#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) + /* * allow arch override, for eg virtualized architectures (put in asm/io.h) */ #ifndef BIOVEC_PHYS_MERGEABLE #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ - ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) + __BIOVEC_PHYS_MERGEABLE(vec1, vec2) #endif #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 8b00f6643e9..1164963c3a8 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -25,7 +25,6 @@ struct cgroup; extern int cgroup_init_early(void); extern int cgroup_init(void); -extern void cgroup_init_smp(void); extern void cgroup_lock(void); extern bool cgroup_lock_live_group(struct cgroup *cgrp); extern void cgroup_unlock(void); @@ -348,8 +347,6 @@ struct cgroup_subsys { struct cgroupfs_root *root; struct list_head sibling; - - void *private; }; #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; @@ -410,7 +407,6 @@ void cgroup_mm_owner_callbacks(struct task_struct *old, static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } -static inline void cgroup_init_smp(void) {} static inline void cgroup_fork(struct task_struct *p) {} static inline void cgroup_fork_callbacks(struct task_struct *p) {} static inline void cgroup_post_fork(struct task_struct *p) {} diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h index 8c0f9505b48..7605fdd1eb6 100644 --- a/include/linux/cnt32_to_63.h +++ b/include/linux/cnt32_to_63.h @@ -16,6 +16,7 @@ #include <linux/compiler.h> #include <linux/types.h> #include <asm/byteorder.h> +#include <asm/system.h> /* this is used only to give gcc a clue about good code generation */ union cnt32_to_63 { @@ -53,11 +54,19 @@ union cnt32_to_63 { * needed increment. And any race in updating the value in memory is harmless * as the same value would simply be stored more than once. * - * The only restriction for the algorithm to work properly is that this - * code must be executed at least once per each half period of the 32-bit - * counter to properly update the state bit in memory. This is usually not a - * problem in practice, but if it is then a kernel timer could be scheduled - * to manage for this code to be executed often enough. + * The restrictions for the algorithm to work properly are: + * + * 1) this code must be called at least once per each half period of the + * 32-bit counter; + * + * 2) this code must not be preempted for a duration longer than the + * 32-bit counter half period minus the longest period between two + * calls to this code. + * + * Those requirements ensure proper update to the state bit in memory. + * This is usually not a problem in practice, but if it is then a kernel + * timer should be scheduled to manage for this code to be executed often + * enough. * * Note that the top bit (bit 63) in the returned value should be considered * as garbage. It is not cleared here because callers are likely to use a @@ -68,9 +77,10 @@ union cnt32_to_63 { */ #define cnt32_to_63(cnt_lo) \ ({ \ - static volatile u32 __m_cnt_hi; \ + static u32 __m_cnt_hi; \ union cnt32_to_63 __x; \ __x.hi = __m_cnt_hi; \ + smp_rmb(); \ __x.lo = (cnt_lo); \ if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index d3219d73f8e..21e1dd43e52 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -5,6 +5,9 @@ * Cpumasks provide a bitmap suitable for representing the * set of CPU's in a system, one bit position per CPU number. * + * The new cpumask_ ops take a "struct cpumask *"; the old ones + * use cpumask_t. + * * See detailed comments in the file linux/bitmap.h describing the * data type on which these cpumasks are based. * @@ -31,7 +34,7 @@ * will span the entire range of NR_CPUS. * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . * - * The available cpumask operations are: + * The obsolescent cpumask operations are: * * void cpu_set(cpu, mask) turn on bit 'cpu' in mask * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask @@ -138,7 +141,7 @@ #include <linux/threads.h> #include <linux/bitmap.h> -typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; +typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; extern cpumask_t _unused_cpumask_arg_; #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) @@ -527,4 +530,556 @@ extern cpumask_t cpu_active_map; #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) +/* These are the new versions of the cpumask operators: passed by pointer. + * The older versions will be implemented in terms of these, then deleted. */ +#define cpumask_bits(maskp) ((maskp)->bits) + +#if NR_CPUS <= BITS_PER_LONG +#define CPU_BITS_ALL \ +{ \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} + +/* This produces more efficient code. */ +#define nr_cpumask_bits NR_CPUS + +#else /* NR_CPUS > BITS_PER_LONG */ + +#define CPU_BITS_ALL \ +{ \ + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} + +#define nr_cpumask_bits nr_cpu_ids +#endif /* NR_CPUS > BITS_PER_LONG */ + +/* verify cpu argument to cpumask_* operators */ +static inline unsigned int cpumask_check(unsigned int cpu) +{ +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + WARN_ON_ONCE(cpu >= nr_cpumask_bits); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + return cpu; +} + +#if NR_CPUS == 1 +/* Uniprocessor. Assume all masks are "1". */ +static inline unsigned int cpumask_first(const struct cpumask *srcp) +{ + return 0; +} + +/* Valid inputs for n are -1 and 0. */ +static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) +{ + return n+1; +} + +static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +{ + return n+1; +} + +static inline unsigned int cpumask_next_and(int n, + const struct cpumask *srcp, + const struct cpumask *andp) +{ + return n+1; +} + +/* cpu must be a valid cpu, ie 0, so there's no other choice. */ +static inline unsigned int cpumask_any_but(const struct cpumask *mask, + unsigned int cpu) +{ + return 1; +} + +#define for_each_cpu(cpu, mask) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) +#define for_each_cpu_and(cpu, mask, and) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) +#else +/** + * cpumask_first - get the first cpu in a cpumask + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline unsigned int cpumask_first(const struct cpumask *srcp) +{ + return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_next - get the next cpu in a cpumask + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus set. + */ +static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); +} + +/** + * cpumask_next_zero - get the next unset cpu in a cpumask + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus unset. + */ +static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); +} + +int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); +int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); + +/** + * for_each_cpu - iterate over every cpu in a mask + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask pointer + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next((cpu), (mask)), \ + (cpu) < nr_cpu_ids;) + +/** + * for_each_cpu_and - iterate over every cpu in both masks + * @cpu: the (optionally unsigned) integer iterator + * @mask: the first cpumask pointer + * @and: the second cpumask pointer + * + * This saves a temporary CPU mask in many places. It is equivalent to: + * struct cpumask tmp; + * cpumask_and(&tmp, &mask, &and); + * for_each_cpu(cpu, &tmp) + * ... + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_and(cpu, mask, and) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next_and((cpu), (mask), (and)), \ + (cpu) < nr_cpu_ids;) +#endif /* SMP */ + +#define CPU_BITS_NONE \ +{ \ + [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ +} + +#define CPU_BITS_CPU0 \ +{ \ + [0] = 1UL \ +} + +/** + * cpumask_set_cpu - set a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @dstp: the cpumask pointer + */ +static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +{ + set_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} + +/** + * cpumask_clear_cpu - clear a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @dstp: the cpumask pointer + */ +static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) +{ + clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} + +/** + * cpumask_test_cpu - test for a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * No static inline type checking - see Subtlety (1) above. + */ +#define cpumask_test_cpu(cpu, cpumask) \ + test_bit(cpumask_check(cpu), (cpumask)->bits) + +/** + * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * test_and_set_bit wrapper for cpumasks. + */ +static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); +} + +/** + * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask + * @dstp: the cpumask pointer + */ +static inline void cpumask_setall(struct cpumask *dstp) +{ + bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask + * @dstp: the cpumask pointer + */ +static inline void cpumask_clear(struct cpumask *dstp) +{ + bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_and - *dstp = *src1p & *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline void cpumask_and(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_or - *dstp = *src1p | *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_xor - *dstp = *src1p ^ *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline void cpumask_xor(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_andnot - *dstp = *src1p & ~*src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline void cpumask_andnot(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_complement - *dstp = ~*srcp + * @dstp: the cpumask result + * @srcp: the input to invert + */ +static inline void cpumask_complement(struct cpumask *dstp, + const struct cpumask *srcp) +{ + bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), + nr_cpumask_bits); +} + +/** + * cpumask_equal - *src1p == *src2p + * @src1p: the first input + * @src2p: the second input + */ +static inline bool cpumask_equal(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_intersects - (*src1p & *src2p) != 0 + * @src1p: the first input + * @src2p: the second input + */ +static inline bool cpumask_intersects(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_subset - (*src1p & ~*src2p) == 0 + * @src1p: the first input + * @src2p: the second input + */ +static inline int cpumask_subset(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_empty - *srcp == 0 + * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. + */ +static inline bool cpumask_empty(const struct cpumask *srcp) +{ + return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_full - *srcp == 0xFFFFFFFF... + * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. + */ +static inline bool cpumask_full(const struct cpumask *srcp) +{ + return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_weight - Count of bits in *srcp + * @srcp: the cpumask to count bits (< nr_cpu_ids) in. + */ +static inline unsigned int cpumask_weight(const struct cpumask *srcp) +{ + return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_shift_right - *dstp = *srcp >> n + * @dstp: the cpumask result + * @srcp: the input to shift + * @n: the number of bits to shift by + */ +static inline void cpumask_shift_right(struct cpumask *dstp, + const struct cpumask *srcp, int n) +{ + bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, + nr_cpumask_bits); +} + +/** + * cpumask_shift_left - *dstp = *srcp << n + * @dstp: the cpumask result + * @srcp: the input to shift + * @n: the number of bits to shift by + */ +static inline void cpumask_shift_left(struct cpumask *dstp, + const struct cpumask *srcp, int n) +{ + bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, + nr_cpumask_bits); +} + +/** + * cpumask_copy - *dstp = *srcp + * @dstp: the result + * @srcp: the input cpumask + */ +static inline void cpumask_copy(struct cpumask *dstp, + const struct cpumask *srcp) +{ + bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_any - pick a "random" cpu from *srcp + * @srcp: the input cpumask + * + * Returns >= nr_cpu_ids if no cpus set. + */ +#define cpumask_any(srcp) cpumask_first(srcp) + +/** + * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 + * @src1p: the first input + * @src2p: the second input + * + * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). + */ +#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) + +/** + * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 + * @mask1: the first input cpumask + * @mask2: the second input cpumask + * + * Returns >= nr_cpu_ids if no cpus set. + */ +#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) + +/** + * cpumask_of - the cpumask containing just a given cpu + * @cpu: the cpu (<= nr_cpu_ids) + */ +#define cpumask_of(cpu) (get_cpu_mask(cpu)) + +/** + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * @bitmap: the bitmap + * + * There are a few places where cpumask_var_t isn't appropriate and + * static cpumasks must be used (eg. very early boot), yet we don't + * expose the definition of 'struct cpumask'. + * + * This does the conversion, and can be used as a constant initializer. + */ +#define to_cpumask(bitmap) \ + ((struct cpumask *)(1 ? (bitmap) \ + : (void *)sizeof(__check_is_bitmap(bitmap)))) + +static inline int __check_is_bitmap(const unsigned long *bitmap) +{ + return 1; +} + +/** + * cpumask_size - size to allocate for a 'struct cpumask' in bytes + * + * This will eventually be a runtime variable, depending on nr_cpu_ids. + */ +static inline size_t cpumask_size(void) +{ + /* FIXME: Once all cpumask assignments are eliminated, this + * can be nr_cpumask_bits */ + return BITS_TO_LONGS(NR_CPUS) * sizeof(long); +} + +/* + * cpumask_var_t: struct cpumask for stack usage. + * + * Oh, the wicked games we play! In order to make kernel coding a + * little more difficult, we typedef cpumask_var_t to an array or a + * pointer: doing &mask on an array is a noop, so it still works. + * + * ie. + * cpumask_var_t tmpmask; + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) + * return -ENOMEM; + * + * ... use 'tmpmask' like a normal struct cpumask * ... + * + * free_cpumask_var(tmpmask); + */ +#ifdef CONFIG_CPUMASK_OFFSTACK +typedef struct cpumask *cpumask_var_t; + +bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +void alloc_bootmem_cpumask_var(cpumask_var_t *mask); +void free_cpumask_var(cpumask_var_t mask); +void free_bootmem_cpumask_var(cpumask_var_t mask); + +#else +typedef struct cpumask cpumask_var_t[1]; + +static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + return true; +} + +static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) +{ +} + +static inline void free_cpumask_var(cpumask_var_t mask) +{ +} + +static inline void free_bootmem_cpumask_var(cpumask_var_t mask) +{ +} +#endif /* CONFIG_CPUMASK_OFFSTACK */ + +/* The pointer versions of the maps, these will become the primary versions. */ +#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) +#define cpu_online_mask ((const struct cpumask *)&cpu_online_map) +#define cpu_present_mask ((const struct cpumask *)&cpu_present_map) +#define cpu_active_mask ((const struct cpumask *)&cpu_active_map) + +/* It's common to want to use cpu_all_mask in struct member initializers, + * so it has to refer to an address rather than a pointer. */ +extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); +#define cpu_all_mask to_cpumask(cpu_all_bits) + +/* First bits of cpu_bit_bitmap are in fact unset. */ +#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) + +/* Wrappers for arch boot code to manipulate normally-constant masks */ +static inline void set_cpu_possible(unsigned int cpu, bool possible) +{ + if (possible) + cpumask_set_cpu(cpu, &cpu_possible_map); + else + cpumask_clear_cpu(cpu, &cpu_possible_map); +} + +static inline void set_cpu_present(unsigned int cpu, bool present) +{ + if (present) + cpumask_set_cpu(cpu, &cpu_present_map); + else + cpumask_clear_cpu(cpu, &cpu_present_map); +} + +static inline void set_cpu_online(unsigned int cpu, bool online) +{ + if (online) + cpumask_set_cpu(cpu, &cpu_online_map); + else + cpumask_clear_cpu(cpu, &cpu_online_map); +} + +static inline void set_cpu_active(unsigned int cpu, bool active) +{ + if (active) + cpumask_set_cpu(cpu, &cpu_active_map); + else + cpumask_clear_cpu(cpu, &cpu_active_map); +} + +static inline void init_cpu_present(const struct cpumask *src) +{ + cpumask_copy(&cpu_present_map, src); +} + +static inline void init_cpu_possible(const struct cpumask *src) +{ + cpumask_copy(&cpu_possible_map, src); +} + +static inline void init_cpu_online(const struct cpumask *src) +{ + cpumask_copy(&cpu_online_map, src); +} #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 8f225339eee..5a361f85cfe 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -44,11 +44,6 @@ static inline bool should_send_signal(struct task_struct *p) return !(p->flags & PF_FREEZER_NOSIG); } -/* - * Wake up a frozen process - */ -extern int __thaw_process(struct task_struct *p); - /* Takes and releases task alloc lock using task_lock() */ extern int thaw_process(struct task_struct *p); diff --git a/include/linux/fs.h b/include/linux/fs.h index 5b248d61430..0dcdd9458f4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -489,13 +489,6 @@ struct address_space_operations { int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); - /* - * ext3 requires that a successful prepare_write() call be followed - * by a commit_write() call - they must be balanced - */ - int (*prepare_write)(struct file *, struct page *, unsigned, unsigned); - int (*commit_write)(struct file *, struct page *, unsigned, unsigned); - int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 4e625e0094c..708bab58d8d 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -49,7 +49,8 @@ struct gianfar_platform_data { u32 device_flags; /* board specific information */ u32 board_flags; - char bus_id[MII_BUS_ID_SIZE]; + int mdio_bus; /* Bus controlled by us */ + char bus_id[MII_BUS_ID_SIZE]; /* Bus PHY is on */ u32 phy_id; u8 mac_addr[6]; phy_interface_t interface; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a3d46151be1..703eb53cfa2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -8,7 +8,7 @@ #include <linux/types.h> #include <linux/kallsyms.h> -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER extern int ftrace_enabled; extern int @@ -36,16 +36,14 @@ void clear_ftrace_function(void); extern void ftrace_stub(unsigned long a0, unsigned long a1); -#else /* !CONFIG_FTRACE */ +#else /* !CONFIG_FUNCTION_TRACER */ # define register_ftrace_function(ops) do { } while (0) # define unregister_ftrace_function(ops) do { } while (0) # define clear_ftrace_function(ops) do { } while (0) -static inline void ftrace_kill_atomic(void) { } -#endif /* CONFIG_FTRACE */ +static inline void ftrace_kill(void) { } +#endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_DYNAMIC_FTRACE -# define FTRACE_HASHBITS 10 -# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS) enum { FTRACE_FL_FREE = (1 << 0), @@ -58,9 +56,9 @@ enum { }; struct dyn_ftrace { - struct hlist_node node; - unsigned long ip; /* address of mcount call-site */ - unsigned long flags; + struct list_head list; + unsigned long ip; /* address of mcount call-site */ + unsigned long flags; }; int ftrace_force_update(void); @@ -71,14 +69,33 @@ extern int ftrace_ip_converted(unsigned long ip); extern unsigned char *ftrace_nop_replace(void); extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); extern int ftrace_dyn_arch_init(void *data); -extern int ftrace_mcount_set(unsigned long *data); -extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, - unsigned char *new_code); extern int ftrace_update_ftrace_func(ftrace_func_t func); extern void ftrace_caller(void); extern void ftrace_call(void); extern void mcount_call(void); +/** + * ftrace_modify_code - modify code segment + * @ip: the address of the code segment + * @old_code: the contents of what is expected to be there + * @new_code: the code to patch in + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, + unsigned char *new_code); + extern int skip_trace(unsigned long ip); extern void ftrace_release(void *start, unsigned long size); @@ -97,11 +114,10 @@ static inline void ftrace_release(void *start, unsigned long size) { } /* totally disable ftrace - can not re-enable after this */ void ftrace_kill(void); -void ftrace_kill_atomic(void); static inline void tracer_disable(void) { -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER ftrace_enabled = 0; #endif } @@ -113,7 +129,7 @@ static inline void tracer_disable(void) */ static inline int __ftrace_enabled_save(void) { -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER int saved_ftrace_enabled = ftrace_enabled; ftrace_enabled = 0; return saved_ftrace_enabled; @@ -124,7 +140,7 @@ static inline int __ftrace_enabled_save(void) static inline void __ftrace_enabled_restore(int enabled) { -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER ftrace_enabled = enabled; #endif } diff --git a/include/linux/hid.h b/include/linux/hid.h index f13bca2dd53..e5780f8c934 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -410,6 +410,7 @@ struct hid_output_fifo { #define HID_SUSPENDED 5 #define HID_CLEAR_HALT 6 #define HID_DISCONNECTED 7 +#define HID_STARTED 8 struct hid_input { struct list_head list; @@ -417,6 +418,11 @@ struct hid_input { struct input_dev *input; }; +enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE +}; + struct hid_driver; struct hid_ll_driver; @@ -431,6 +437,7 @@ struct hid_device { /* device report descriptor */ __u32 vendor; /* Vendor ID */ __u32 product; /* Product ID */ __u32 version; /* HID version */ + enum hid_type type; /* device type (mouse, kbd, ...) */ unsigned country; /* HID country */ struct hid_report_enum report_enum[HID_REPORT_TYPES]; diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 2b3645b1acf..07e510a3b00 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -239,7 +239,7 @@ static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) timer->_softexpires = ktime_add_safe(timer->_softexpires, time); } -static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns) +static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) { timer->_expires = ktime_add_ns(timer->_expires, ns); timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h new file mode 100644 index 00000000000..05a80c44513 --- /dev/null +++ b/include/linux/i7300_idle.h @@ -0,0 +1,83 @@ + +#ifndef I7300_IDLE_H +#define I7300_IDLE_H + +#include <linux/pci.h> + +/* + * I/O AT controls (PCI bus 0 device 8 function 0) + * DIMM controls (PCI bus 0 device 16 function 1) + */ +#define IOAT_BUS 0 +#define IOAT_DEVFN PCI_DEVFN(8, 0) +#define MEMCTL_BUS 0 +#define MEMCTL_DEVFN PCI_DEVFN(16, 1) + +struct fbd_ioat { + unsigned int vendor; + unsigned int ioat_dev; +}; + +/* + * The i5000 chip-set has the same hooks as the i7300 + * but support is disabled by default because this driver + * has not been validated on that platform. + */ +#define SUPPORT_I5000 0 + +static const struct fbd_ioat fbd_ioat_list[] = { + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB}, +#if SUPPORT_I5000 + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT}, +#endif + {0, 0} +}; + +/* table of devices that work with this driver */ +static const struct pci_device_id pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) }, +#if SUPPORT_I5000 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, +#endif + { } /* Terminating entry */ +}; + +/* Check for known platforms with I/O-AT */ +static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, + struct pci_dev **ioat_dev) +{ + int i; + struct pci_dev *memdev, *dmadev; + + memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN); + if (!memdev) + return -ENODEV; + + for (i = 0; pci_tbl[i].vendor != 0; i++) { + if (memdev->vendor == pci_tbl[i].vendor && + memdev->device == pci_tbl[i].device) { + break; + } + } + if (pci_tbl[i].vendor == 0) + return -ENODEV; + + dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN); + if (!dmadev) + return -ENODEV; + + for (i = 0; fbd_ioat_list[i].vendor != 0; i++) { + if (dmadev->vendor == fbd_ioat_list[i].vendor && + dmadev->device == fbd_ioat_list[i].ioat_dev) { + if (fbd_dev) + *fbd_dev = memdev; + if (ioat_dev) + *ioat_dev = dmadev; + + return 0; + } + } + return -ENODEV; +} + +#endif diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 9e7b49b8062..a5cb0c3f6dc 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -114,6 +114,8 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling); +extern int vlan_hwaccel_do_receive(struct sk_buff *skb); + #else static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) { @@ -133,6 +135,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, BUG(); return NET_XMIT_SUCCESS; } + +static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) +{ + return 0; +} #endif /** diff --git a/include/linux/init.h b/include/linux/init.h index 0c1264668be..68cb0265d00 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -112,21 +112,25 @@ #define __FINIT .previous #define __INITDATA .section ".init.data","aw" +#define __INITRODATA .section ".init.rodata","a" #define __FINITDATA .previous #define __DEVINIT .section ".devinit.text", "ax" #define __DEVINITDATA .section ".devinit.data", "aw" +#define __DEVINITRODATA .section ".devinit.rodata", "a" #define __CPUINIT .section ".cpuinit.text", "ax" #define __CPUINITDATA .section ".cpuinit.data", "aw" +#define __CPUINITRODATA .section ".cpuinit.rodata", "a" #define __MEMINIT .section ".meminit.text", "ax" #define __MEMINITDATA .section ".meminit.data", "aw" +#define __MEMINITRODATA .section ".meminit.rodata", "a" /* silence warnings when references are OK */ #define __REF .section ".ref.text", "ax" #define __REFDATA .section ".ref.data", "aw" -#define __REFCONST .section ".ref.rodata", "aw" +#define __REFCONST .section ".ref.rodata", "a" #ifndef __ASSEMBLY__ /* diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h new file mode 100644 index 00000000000..82df31726a5 --- /dev/null +++ b/include/linux/io-mapping.h @@ -0,0 +1,125 @@ +/* + * Copyright © 2008 Keith Packard <keithp@keithp.com> + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _LINUX_IO_MAPPING_H +#define _LINUX_IO_MAPPING_H + +#include <linux/types.h> +#include <asm/io.h> +#include <asm/page.h> +#include <asm/iomap.h> + +/* + * The io_mapping mechanism provides an abstraction for mapping + * individual pages from an io device to the CPU in an efficient fashion. + * + * See Documentation/io_mapping.txt + */ + +/* this struct isn't actually defined anywhere */ +struct io_mapping; + +#ifdef CONFIG_HAVE_ATOMIC_IOMAP + +/* + * For small address space machines, mapping large objects + * into the kernel virtual space isn't practical. Where + * available, use fixmap support to dynamically map pages + * of the object at run time. + */ + +static inline struct io_mapping * +io_mapping_create_wc(unsigned long base, unsigned long size) +{ + return (struct io_mapping *) base; +} + +static inline void +io_mapping_free(struct io_mapping *mapping) +{ +} + +/* Atomic map/unmap */ +static inline void * +io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) +{ + offset += (unsigned long) mapping; + return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0, + __pgprot(__PAGE_KERNEL_WC)); +} + +static inline void +io_mapping_unmap_atomic(void *vaddr) +{ + iounmap_atomic(vaddr, KM_USER0); +} + +static inline void * +io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +{ + offset += (unsigned long) mapping; + return ioremap_wc(offset, PAGE_SIZE); +} + +static inline void +io_mapping_unmap(void *vaddr) +{ + iounmap(vaddr); +} + +#else + +/* Create the io_mapping object*/ +static inline struct io_mapping * +io_mapping_create_wc(unsigned long base, unsigned long size) +{ + return (struct io_mapping *) ioremap_wc(base, size); +} + +static inline void +io_mapping_free(struct io_mapping *mapping) +{ + iounmap(mapping); +} + +/* Atomic map/unmap */ +static inline void * +io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) +{ + return ((char *) mapping) + offset; +} + +static inline void +io_mapping_unmap_atomic(void *vaddr) +{ +} + +/* Non-atomic map/unmap */ +static inline void * +io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +{ + return ((char *) mapping) + offset; +} + +static inline void +io_mapping_unmap(void *vaddr) +{ +} + +#endif /* HAVE_ATOMIC_IOMAP */ + +#endif /* _LINUX_IO_MAPPING_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index fa2853b49f7..69a9bfdf9c8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -116,6 +116,8 @@ extern int _cond_resched(void); # define might_resched() do { } while (0) #endif +#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP + void __might_sleep(char *file, int line); /** * might_sleep - annotation for functions that can sleep * @@ -126,8 +128,6 @@ extern int _cond_resched(void); * be bitten later when the calling function happens to sleep when it is not * supposed to. */ -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP - void __might_sleep(char *file, int line); # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) #else diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 797fcd78124..f18b86fa865 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -489,6 +489,9 @@ struct kvm_assigned_pci_dev { __u32 busnr; __u32 devfn; __u32 flags; + union { + __u32 reserved[12]; + }; }; struct kvm_assigned_irq { @@ -496,6 +499,9 @@ struct kvm_assigned_irq { __u32 host_irq; __u32 guest_irq; __u32 flags; + union { + __u32 reserved[12]; + }; }; #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3833c48fae3..bb92be2153b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -37,6 +37,8 @@ #define KVM_REQ_UNHALT 6 #define KVM_REQ_MMU_SYNC 7 +#define KVM_USERSPACE_IRQ_SOURCE_ID 0 + struct kvm_vcpu; extern struct kmem_cache *kvm_vcpu_cache; @@ -306,15 +308,18 @@ struct kvm_assigned_dev_kernel { int host_irq; int guest_irq; int irq_requested; + int irq_source_id; struct pci_dev *dev; struct kvm *kvm; }; -void kvm_set_irq(struct kvm *kvm, int irq, int level); +void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); +int kvm_request_irq_source_id(struct kvm *kvm); +void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); #ifdef CONFIG_DMAR int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, diff --git a/include/linux/libata.h b/include/linux/libata.h index 507f53ef803..59b0f1c807b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -372,6 +372,9 @@ enum { ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */ ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ + ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ + ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands + not multiple of 16 bytes */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -695,6 +698,7 @@ struct ata_port { unsigned int cbl; /* cable type; ATA_CBL_xxx */ struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; + unsigned long qc_allocated; unsigned int qc_active; int nr_active_links; /* #links with active qcs */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index ee6e822d599..403aa505f27 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -130,7 +130,7 @@ struct mmc_card { #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) #define mmc_card_name(c) ((c)->cid.prod_name) -#define mmc_card_id(c) ((c)->dev.bus_id) +#define mmc_card_id(c) (dev_name(&(c)->dev)) #define mmc_list_to_card(l) container_of(l, struct mmc_card, node) #define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index bde891f6459..f842f234e44 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -176,7 +176,7 @@ static inline void *mmc_priv(struct mmc_host *host) #define mmc_dev(x) ((x)->parent) #define mmc_classdev(x) (&(x)->class_dev) -#define mmc_hostname(x) ((x)->class_dev.bus_id) +#define mmc_hostname(x) (dev_name(&(x)->class_dev)) extern int mmc_suspend_host(struct mmc_host *, pm_message_t); extern int mmc_resume_host(struct mmc_host *); diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 07bee4a0d45..451bdfc8583 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -63,7 +63,7 @@ struct sdio_func { #define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT) -#define sdio_func_id(f) ((f)->dev.bus_id) +#define sdio_func_id(f) (dev_name(&(f)->dev)) #define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) #define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index eb71b45fdf5..97b91d1abb4 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -135,6 +135,7 @@ struct usb_device_id { struct hid_device_id { __u16 bus; + __u16 pad1; __u32 vendor; __u32 product; kernel_ulong_t driver_data diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index ba63858056c..e0a9b207920 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -46,11 +46,6 @@ #define DELETED_FLAG 0xe5 /* marks file as deleted when in name[0] */ #define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG) -/* valid file mode bits */ -#define MSDOS_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO) -/* Convert attribute bits and a mask to the UNIX mode. */ -#define MSDOS_MKMODE(a, m) (m & (a & ATTR_RO ? S_IRUGO|S_IXUGO : S_IRWXUGO)) - #define MSDOS_NAME 11 /* maximum name length */ #define MSDOS_LONGNAME 256 /* maximum name length */ #define MSDOS_SLOTS 21 /* max # of slots for short and long names */ @@ -167,282 +162,10 @@ struct msdos_dir_slot { }; #ifdef __KERNEL__ - -#include <linux/buffer_head.h> -#include <linux/string.h> -#include <linux/nls.h> -#include <linux/fs.h> -#include <linux/mutex.h> - -/* - * vfat shortname flags - */ -#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */ -#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */ -#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */ -#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */ -#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */ - -struct fat_mount_options { - uid_t fs_uid; - gid_t fs_gid; - unsigned short fs_fmask; - unsigned short fs_dmask; - unsigned short codepage; /* Codepage for shortname conversions */ - char *iocharset; /* Charset used for filename input/display */ - unsigned short shortname; /* flags for shortname display/create rule */ - unsigned char name_check; /* r = relaxed, n = normal, s = strict */ - unsigned short allow_utime;/* permission for setting the [am]time */ - unsigned quiet:1, /* set = fake successful chmods and chowns */ - showexec:1, /* set = only set x bit for com/exe/bat */ - sys_immutable:1, /* set = system files are immutable */ - dotsOK:1, /* set = hidden and system files are named '.filename' */ - isvfat:1, /* 0=no vfat long filename support, 1=vfat support */ - utf8:1, /* Use of UTF-8 character set (Default) */ - unicode_xlate:1, /* create escape sequences for unhandled Unicode */ - numtail:1, /* Does first alias have a numeric '~1' type tail? */ - flush:1, /* write things quickly */ - nocase:1, /* Does this need case conversion? 0=need case conversion*/ - usefree:1, /* Use free_clusters for FAT32 */ - tz_utc:1; /* Filesystem timestamps are in UTC */ -}; - -#define FAT_HASH_BITS 8 -#define FAT_HASH_SIZE (1UL << FAT_HASH_BITS) -#define FAT_HASH_MASK (FAT_HASH_SIZE-1) - -/* - * MS-DOS file system in-core superblock data - */ -struct msdos_sb_info { - unsigned short sec_per_clus; /* sectors/cluster */ - unsigned short cluster_bits; /* log2(cluster_size) */ - unsigned int cluster_size; /* cluster size */ - unsigned char fats,fat_bits; /* number of FATs, FAT bits (12 or 16) */ - unsigned short fat_start; - unsigned long fat_length; /* FAT start & length (sec.) */ - unsigned long dir_start; - unsigned short dir_entries; /* root dir start & entries */ - unsigned long data_start; /* first data sector */ - unsigned long max_cluster; /* maximum cluster number */ - unsigned long root_cluster; /* first cluster of the root directory */ - unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */ - struct mutex fat_lock; - unsigned int prev_free; /* previously allocated cluster number */ - unsigned int free_clusters; /* -1 if undefined */ - unsigned int free_clus_valid; /* is free_clusters valid? */ - struct fat_mount_options options; - struct nls_table *nls_disk; /* Codepage used on disk */ - struct nls_table *nls_io; /* Charset used for input and display */ - const void *dir_ops; /* Opaque; default directory operations */ - int dir_per_block; /* dir entries per block */ - int dir_per_block_bits; /* log2(dir_per_block) */ - - int fatent_shift; - struct fatent_operations *fatent_ops; - - spinlock_t inode_hash_lock; - struct hlist_head inode_hashtable[FAT_HASH_SIZE]; -}; - -#define FAT_CACHE_VALID 0 /* special case for valid cache */ - -/* - * MS-DOS file system inode data in memory - */ -struct msdos_inode_info { - spinlock_t cache_lru_lock; - struct list_head cache_lru; - int nr_caches; - /* for avoiding the race between fat_free() and fat_get_cluster() */ - unsigned int cache_valid_id; - - loff_t mmu_private; - int i_start; /* first cluster or 0 */ - int i_logstart; /* logical first cluster */ - int i_attrs; /* unused attribute bits */ - loff_t i_pos; /* on-disk position of directory entry or 0 */ - struct hlist_node i_fat_hash; /* hash by i_location */ - struct inode vfs_inode; -}; - -struct fat_slot_info { - loff_t i_pos; /* on-disk position of directory entry */ - loff_t slot_off; /* offset for slot or de start */ - int nr_slots; /* number of slots + 1(de) in filename */ - struct msdos_dir_entry *de; - struct buffer_head *bh; -}; - -static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb) -{ - return sb->s_fs_info; -} - -static inline struct msdos_inode_info *MSDOS_I(struct inode *inode) -{ - return container_of(inode, struct msdos_inode_info, vfs_inode); -} - -/* Return the FAT attribute byte for this inode */ -static inline u8 fat_attr(struct inode *inode) -{ - return ((inode->i_mode & S_IWUGO) ? ATTR_NONE : ATTR_RO) | - (S_ISDIR(inode->i_mode) ? ATTR_DIR : ATTR_NONE) | - MSDOS_I(inode)->i_attrs; -} - -static inline unsigned char fat_checksum(const __u8 *name) -{ - unsigned char s = name[0]; - s = (s<<7) + (s>>1) + name[1]; s = (s<<7) + (s>>1) + name[2]; - s = (s<<7) + (s>>1) + name[3]; s = (s<<7) + (s>>1) + name[4]; - s = (s<<7) + (s>>1) + name[5]; s = (s<<7) + (s>>1) + name[6]; - s = (s<<7) + (s>>1) + name[7]; s = (s<<7) + (s>>1) + name[8]; - s = (s<<7) + (s>>1) + name[9]; s = (s<<7) + (s>>1) + name[10]; - return s; -} - -static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus) -{ - return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus - + sbi->data_start; -} - -static inline void fat16_towchar(wchar_t *dst, const __u8 *src, size_t len) -{ -#ifdef __BIG_ENDIAN - while (len--) { - *dst++ = src[0] | (src[1] << 8); - src += 2; - } -#else - memcpy(dst, src, len * 2); -#endif -} - -static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len) -{ -#ifdef __BIG_ENDIAN - while (len--) { - dst[0] = *src & 0x00FF; - dst[1] = (*src & 0xFF00) >> 8; - dst += 2; - src++; - } -#else - memcpy(dst, src, len * 2); -#endif -} - /* media of boot sector */ static inline int fat_valid_media(u8 media) { return 0xf8 <= media || media == 0xf0; } - -/* fat/cache.c */ -extern void fat_cache_inval_inode(struct inode *inode); -extern int fat_get_cluster(struct inode *inode, int cluster, - int *fclus, int *dclus); -extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, - unsigned long *mapped_blocks); - -/* fat/dir.c */ -extern const struct file_operations fat_dir_operations; -extern int fat_search_long(struct inode *inode, const unsigned char *name, - int name_len, struct fat_slot_info *sinfo); -extern int fat_dir_empty(struct inode *dir); -extern int fat_subdirs(struct inode *dir); -extern int fat_scan(struct inode *dir, const unsigned char *name, - struct fat_slot_info *sinfo); -extern int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh, - struct msdos_dir_entry **de, loff_t *i_pos); -extern int fat_alloc_new_dir(struct inode *dir, struct timespec *ts); -extern int fat_add_entries(struct inode *dir, void *slots, int nr_slots, - struct fat_slot_info *sinfo); -extern int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo); - -/* fat/fatent.c */ -struct fat_entry { - int entry; - union { - u8 *ent12_p[2]; - __le16 *ent16_p; - __le32 *ent32_p; - } u; - int nr_bhs; - struct buffer_head *bhs[2]; -}; - -static inline void fatent_init(struct fat_entry *fatent) -{ - fatent->nr_bhs = 0; - fatent->entry = 0; - fatent->u.ent32_p = NULL; - fatent->bhs[0] = fatent->bhs[1] = NULL; -} - -static inline void fatent_set_entry(struct fat_entry *fatent, int entry) -{ - fatent->entry = entry; - fatent->u.ent32_p = NULL; -} - -static inline void fatent_brelse(struct fat_entry *fatent) -{ - int i; - fatent->u.ent32_p = NULL; - for (i = 0; i < fatent->nr_bhs; i++) - brelse(fatent->bhs[i]); - fatent->nr_bhs = 0; - fatent->bhs[0] = fatent->bhs[1] = NULL; -} - -extern void fat_ent_access_init(struct super_block *sb); -extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent, - int entry); -extern int fat_ent_write(struct inode *inode, struct fat_entry *fatent, - int new, int wait); -extern int fat_alloc_clusters(struct inode *inode, int *cluster, - int nr_cluster); -extern int fat_free_clusters(struct inode *inode, int cluster); -extern int fat_count_free_clusters(struct super_block *sb); - -/* fat/file.c */ -extern int fat_generic_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern const struct file_operations fat_file_operations; -extern const struct inode_operations fat_file_inode_operations; -extern int fat_setattr(struct dentry * dentry, struct iattr * attr); -extern void fat_truncate(struct inode *inode); -extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); - -/* fat/inode.c */ -extern void fat_attach(struct inode *inode, loff_t i_pos); -extern void fat_detach(struct inode *inode); -extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos); -extern struct inode *fat_build_inode(struct super_block *sb, - struct msdos_dir_entry *de, loff_t i_pos); -extern int fat_sync_inode(struct inode *inode); -extern int fat_fill_super(struct super_block *sb, void *data, int silent, - const struct inode_operations *fs_dir_inode_ops, int isvfat); - -extern int fat_flush_inodes(struct super_block *sb, struct inode *i1, - struct inode *i2); -/* fat/misc.c */ -extern void fat_fs_panic(struct super_block *s, const char *fmt, ...); -extern void fat_clusters_flush(struct super_block *sb); -extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster); -extern int date_dos2unix(unsigned short time, unsigned short date, int tz_utc); -extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date, - int tz_utc); -extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs); - -int fat_cache_init(void); -void fat_cache_destroy(void); - -#endif /* __KERNEL__ */ - -#endif +#endif /* !__KERNEL__ */ +#endif /* !_LINUX_MSDOS_FS_H */ diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index ee5124ec319..00e2b575021 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -282,9 +282,25 @@ struct cfi_private { /* * Returns the command address according to the given geometry. */ -static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type) +static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, + struct map_info *map, struct cfi_private *cfi) { - return (cmd_ofs * type) * interleave; + unsigned bankwidth = map_bankwidth(map); + unsigned interleave = cfi_interleave(cfi); + unsigned type = cfi->device_type; + uint32_t addr; + + addr = (cmd_ofs * type) * interleave; + + /* Modify the unlock address if we are in compatiblity mode. + * For 16bit devices on 8 bit busses + * and 32bit devices on 16 bit busses + * set the low bit of the alternating bit sequence of the address. + */ + if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa)) + addr |= (type >> 1)*interleave; + + return addr; } /* @@ -430,7 +446,7 @@ static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t int type, map_word *prev_val) { map_word val; - uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type); + uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi); val = cfi_build_cmd(cmd, map, cfi); if (prev_val) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c8bcb59adfd..9d77b1d7dca 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1537,7 +1537,6 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) /** * netif_tx_lock - grab network device transmit lock * @dev: network device - * @cpu: cpu number of lock owner * * Get network device transmit lock */ diff --git a/include/linux/pci.h b/include/linux/pci.h index c75b82bda32..feb4657bb04 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1136,7 +1136,7 @@ static inline void pci_mmcfg_late_init(void) { } #endif #ifdef CONFIG_HAS_IOMEM -static inline void * pci_ioremap_bar(struct pci_dev *pdev, int bar) +static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) { /* * Make sure the BAR is actually a memory resource, not an IO resource diff --git a/include/linux/phonet.h b/include/linux/phonet.h index c9609f9aeda..4157faa857b 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h @@ -72,6 +72,7 @@ struct phonetmsg { } pn_msg_u; }; #define PN_COMMON_MESSAGE 0xF0 +#define PN_COMMGR 0x10 #define PN_PREFIX 0xE0 /* resource for extended messages */ #define pn_submsg_id pn_msg_u.base.pn_submsg_id #define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index c200b9a34af..8fc909ef678 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -115,6 +115,9 @@ struct mdk_rdev_s * in superblock. */ struct work_struct del_work; /* used for delayed sysfs removal */ + + struct sysfs_dirent *sysfs_state; /* handle for 'state' + * sysfs entry */ }; struct mddev_s @@ -128,7 +131,6 @@ struct mddev_s #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* superblock update in progress */ -#define MD_NOTIFY_ARRAY_STATE 3 /* atomic context wants to notify userspace */ int ro; @@ -239,6 +241,10 @@ struct mddev_s sector_t resync_max; /* resync should pause * when it gets here */ + struct sysfs_dirent *sysfs_state; /* handle for 'array_state' + * file in sysfs. + */ + spinlock_t write_lock; wait_queue_head_t sb_wait; /* for waiting on superblock updates */ atomic_t pending_writes; /* number of active superblock writes */ diff --git a/include/linux/resource.h b/include/linux/resource.h index aaa423a6f3d..40fc7e62608 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h @@ -59,10 +59,10 @@ struct rlimit { #define _STK_LIM (8*1024*1024) /* - * GPG wants 32kB of mlocked memory, to make sure pass phrases + * GPG2 wants 64kB of mlocked memory, to make sure pass phrases * and other sensitive information are never written to disk. */ -#define MLOCK_LIMIT (8 * PAGE_SIZE) +#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024) /* * Due to binary compatibility, the actual resource numbers diff --git a/include/linux/sched.h b/include/linux/sched.h index 8478f334d73..644ffbda17c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -247,6 +247,7 @@ extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); extern int runqueue_is_locked(void); +extern void task_rq_unlock_wait(struct task_struct *p); extern cpumask_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) @@ -936,7 +937,6 @@ struct sched_class { void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); void (*yield_task) (struct rq *rq); - int (*select_task_rq)(struct task_struct *p, int sync); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); @@ -944,6 +944,8 @@ struct sched_class { void (*put_prev_task) (struct rq *rq, struct task_struct *p); #ifdef CONFIG_SMP + int (*select_task_rq)(struct task_struct *p, int sync); + unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, @@ -955,16 +957,17 @@ struct sched_class { void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*post_schedule) (struct rq *this_rq); void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); -#endif - void (*set_curr_task) (struct rq *rq); - void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); - void (*task_new) (struct rq *rq, struct task_struct *p); void (*set_cpus_allowed)(struct task_struct *p, const cpumask_t *newmask); void (*rq_online)(struct rq *rq); void (*rq_offline)(struct rq *rq); +#endif + + void (*set_curr_task) (struct rq *rq); + void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); + void (*task_new) (struct rq *rq, struct task_struct *p); void (*switched_from) (struct rq *this_rq, struct task_struct *task, int running); @@ -1347,6 +1350,8 @@ struct task_struct { */ unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; + + struct list_head *scm_work_list; }; /* diff --git a/include/linux/security.h b/include/linux/security.h index f5c4a51eb42..c13f1cec9ab 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1585,6 +1585,7 @@ int security_syslog(int type); int security_settime(struct timespec *ts, struct timezone *tz); int security_vm_enough_memory(long pages); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); +int security_vm_enough_memory_kern(long pages); int security_bprm_alloc(struct linux_binprm *bprm); void security_bprm_free(struct linux_binprm *bprm); void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); @@ -1820,6 +1821,11 @@ static inline int security_vm_enough_memory(long pages) return cap_vm_enough_memory(current->mm, pages); } +static inline int security_vm_enough_memory_kern(long pages) +{ + return cap_vm_enough_memory(current->mm, pages); +} + static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { return cap_vm_enough_memory(mm, pages); diff --git a/include/linux/smp.h b/include/linux/smp.h index 2e4d58b26c0..3f9a60043a9 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -64,8 +64,17 @@ extern void smp_cpus_done(unsigned int max_cpus); * Call a function on all other processors */ int smp_call_function(void(*func)(void *info), void *info, int wait); +/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, int wait); + +static inline void smp_call_function_many(const struct cpumask *mask, + void (*func)(void *info), void *info, + int wait) +{ + smp_call_function_mask(*mask, func, info, wait); +} + int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, int wait); void __smp_call_function_single(int cpuid, struct call_single_data *data); diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index b8db32cea1d..bf8de281b4e 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -18,6 +18,9 @@ * duplex (MicroWire) controllers. Provide chipslect() and txrx_bufs(), * and custom setup()/cleanup() methods. */ + +#include <linux/workqueue.h> + struct spi_bitbang { struct workqueue_struct *workqueue; struct work_struct work; diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index e530026eedf..17d9b58f637 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -427,12 +427,16 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST return pci_dma_mapping_error(dev->bus->host_pci, addr); +#endif + break; case SSB_BUSTYPE_SSB: return dma_mapping_error(dev->dev, addr); default: - __ssb_dma_not_implemented(dev); + break; } + __ssb_dma_not_implemented(dev); return -ENOSYS; } @@ -441,12 +445,16 @@ static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p, { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST return pci_map_single(dev->bus->host_pci, p, size, dir); +#endif + break; case SSB_BUSTYPE_SSB: return dma_map_single(dev->dev, p, size, dir); default: - __ssb_dma_not_implemented(dev); + break; } + __ssb_dma_not_implemented(dev); return 0; } @@ -455,14 +463,18 @@ static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_a { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir); return; +#endif + break; case SSB_BUSTYPE_SSB: dma_unmap_single(dev->dev, dma_addr, size, dir); return; default: - __ssb_dma_not_implemented(dev); + break; } + __ssb_dma_not_implemented(dev); } static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, @@ -472,15 +484,19 @@ static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, size, dir); return; +#endif + break; case SSB_BUSTYPE_SSB: dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir); return; default: - __ssb_dma_not_implemented(dev); + break; } + __ssb_dma_not_implemented(dev); } static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, @@ -490,15 +506,19 @@ static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, size, dir); return; +#endif + break; case SSB_BUSTYPE_SSB: dma_sync_single_for_device(dev->dev, dma_addr, size, dir); return; default: - __ssb_dma_not_implemented(dev); + break; } + __ssb_dma_not_implemented(dev); } static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, @@ -509,17 +529,21 @@ static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST /* Just sync everything. That's all the PCI API can do. */ pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, offset + size, dir); return; +#endif + break; case SSB_BUSTYPE_SSB: dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset, size, dir); return; default: - __ssb_dma_not_implemented(dev); + break; } + __ssb_dma_not_implemented(dev); } static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, @@ -530,17 +554,21 @@ static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST /* Just sync everything. That's all the PCI API can do. */ pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, offset + size, dir); return; +#endif + break; case SSB_BUSTYPE_SSB: dma_sync_single_range_for_device(dev->dev, dma_addr, offset, size, dir); return; default: - __ssb_dma_not_implemented(dev); + break; } + __ssb_dma_not_implemented(dev); } diff --git a/include/linux/string.h b/include/linux/string.h index 810d80df0a1..d18fc198aa2 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -1,7 +1,7 @@ #ifndef _LINUX_STRING_H_ #define _LINUX_STRING_H_ -/* We don't want strings.h stuff being user by user stuff by accident */ +/* We don't want strings.h stuff being used by user stuff by accident */ #ifndef __KERNEL__ #include <string.h> diff --git a/include/linux/telephony.h b/include/linux/telephony.h index 5b2b6261f19..f63afe330ad 100644 --- a/include/linux/telephony.h +++ b/include/linux/telephony.h @@ -14,7 +14,7 @@ * Authors: Ed Okerson, <eokerson@quicknet.net> * Greg Herlein, <gherlein@quicknet.net> * - * Contributors: Alan Cox, <alan@redhat.com> + * Contributors: Alan Cox, <alan@lxorguk.ukuu.org.uk> * David W. Erhart, <derhart@quicknet.net> * * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR diff --git a/include/linux/timer.h b/include/linux/timer.h index d4ba79248a2..daf9685b861 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -186,4 +186,9 @@ unsigned long __round_jiffies_relative(unsigned long j, int cpu); unsigned long round_jiffies(unsigned long j); unsigned long round_jiffies_relative(unsigned long j); +unsigned long __round_jiffies_up(unsigned long j, int cpu); +unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); +unsigned long round_jiffies_up(unsigned long j); +unsigned long round_jiffies_up_relative(unsigned long j); + #endif diff --git a/include/linux/topology.h b/include/linux/topology.h index 2158fc0d5a5..117f1b7405c 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -99,7 +99,7 @@ void arch_update_cpu_topology(void); | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ - | SD_WAKE_IDLE \ + | SD_WAKE_BALANCE \ | SD_SHARE_CPUPOWER, \ .last_balance = jiffies, \ .balance_interval = 1, \ @@ -120,10 +120,10 @@ void arch_update_cpu_topology(void); .wake_idx = 1, \ .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_NEWIDLE \ | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ + | SD_WAKE_BALANCE \ | SD_SHARE_PKG_RESOURCES\ | BALANCE_FOR_MC_POWER, \ .last_balance = jiffies, \ @@ -146,10 +146,10 @@ void arch_update_cpu_topology(void); .wake_idx = 1, \ .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_NEWIDLE \ - | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ + | SD_BALANCE_FORK \ | SD_WAKE_AFFINE \ + | SD_WAKE_BALANCE \ | BALANCE_FOR_PKG_POWER,\ .last_balance = jiffies, \ .balance_interval = 1, \ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 89a5a1231ff..b36291130f2 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -240,4 +240,12 @@ void cancel_rearming_delayed_work(struct delayed_work *work) cancel_delayed_work_sync(work); } +#ifndef CONFIG_SMP +static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) +{ + return fn(arg); +} +#else +long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); +#endif /* CONFIG_SMP */ #endif |