summaryrefslogtreecommitdiff
path: root/ipc
diff options
context:
space:
mode:
Diffstat (limited to 'ipc')
-rw-r--r--ipc/compat.c1
-rw-r--r--ipc/sem.c19
-rw-r--r--ipc/shm.c2
-rw-r--r--ipc/util.h2
4 files changed, 9 insertions, 15 deletions
diff --git a/ipc/compat.c b/ipc/compat.c
index 70e4e4e10fd1..3881d564c668 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -572,6 +572,7 @@ static inline int put_compat_shminfo(struct shminfo64 *smi,
err |= __put_user(smi->shmmni, &up->shmmni);
err |= __put_user(smi->shmseg, &up->shmseg);
err |= __put_user(smi->shmall, &up->shmall);
+ return err;
}
static inline int put_compat_shm_info(struct shm_info __user *ip,
diff --git a/ipc/sem.c b/ipc/sem.c
index 5ad7ac0ed60d..70975ce0784a 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -895,7 +895,7 @@ static inline void lock_semundo(void)
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
- if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1))
+ if (undo_list)
spin_lock(&undo_list->lock);
}
@@ -915,7 +915,7 @@ static inline void unlock_semundo(void)
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
- if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1))
+ if (undo_list)
spin_unlock(&undo_list->lock);
}
@@ -943,9 +943,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
if (undo_list == NULL)
return -ENOMEM;
memset(undo_list, 0, size);
- /* don't initialize unodhd->lock here. It's done
- * in copy_semundo() instead.
- */
+ spin_lock_init(&undo_list->lock);
atomic_set(&undo_list->refcnt, 1);
current->sysvsem.undo_list = undo_list;
}
@@ -1054,7 +1052,7 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, decrease = 0, alter = 0, max;
+ int undos = 0, alter = 0, max;
struct sem_queue queue;
unsigned long jiffies_left = 0;
@@ -1089,13 +1087,10 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
if (sop->sem_num >= max)
max = sop->sem_num;
if (sop->sem_flg & SEM_UNDO)
- undos++;
- if (sop->sem_op < 0)
- decrease = 1;
- if (sop->sem_op > 0)
+ undos = 1;
+ if (sop->sem_op != 0)
alter = 1;
}
- alter |= decrease;
retry_undos:
if (undos) {
@@ -1234,8 +1229,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
error = get_undo_list(&undo_list);
if (error)
return error;
- if (atomic_read(&undo_list->refcnt) == 1)
- spin_lock_init(&undo_list->lock);
atomic_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
} else
diff --git a/ipc/shm.c b/ipc/shm.c
index cce022435dbc..1d6cf08d950b 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -170,7 +170,7 @@ static struct vm_operations_struct shm_vm_ops = {
.open = shm_open, /* callback for a new vm-area open */
.close = shm_close, /* callback for when the vm-area is released */
.nopage = shmem_nopage,
-#ifdef CONFIG_NUMA
+#if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
diff --git a/ipc/util.h b/ipc/util.h
index 07d689452363..44348ca5a707 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -67,7 +67,7 @@ int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid);
void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
-#if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__)
+#if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__) || defined(__XTENSA__)
/* On IA-64, we always use the "64-bit version" of the IPC structures. */
# define ipc_parse_version(cmd) IPC_64
#else