1 diff -Naur 2.6.12-5.0-org/include/asm-mips/futex.h 2.6.12-5.0-patched/include/asm-mips/futex.h
2 --- 2.6.12-5.0-org/include/asm-mips/futex.h 1970-01-01 01:00:00.000000000 +0100
3 +++ 2.6.12-5.0-patched/include/asm-mips/futex.h 2007-12-11 12:34:52.000000000 +0100
10 +#include <linux/config.h>
11 +#include <linux/futex.h>
12 +#include <asm/errno.h>
13 +#include <asm/uaccess.h>
17 +#define __FUTEX_SMP_SYNC " sync \n"
19 +#define __FUTEX_SMP_SYNC
22 +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
24 + if (cpu_has_llsc && R10000_LLSC_WAR) { \
25 + __asm__ __volatile__( \
29 + "1: ll %1, %4 # __futex_atomic_op \n" \
34 + " beqzl $1, 1b \n" \
39 + " .section .fixup,\"ax\" \n" \
43 + " .section __ex_table,\"a\" \n" \
44 + " "__UA_ADDR "\t1b, 4b \n" \
45 + " "__UA_ADDR "\t2b, 4b \n" \
47 + : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
48 + : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
50 + } else if (cpu_has_llsc) { \
51 + __asm__ __volatile__( \
55 + "1: ll %1, %4 # __futex_atomic_op \n" \
65 + " .section .fixup,\"ax\" \n" \
69 + " .section __ex_table,\"a\" \n" \
70 + " "__UA_ADDR "\t1b, 4b \n" \
71 + " "__UA_ADDR "\t2b, 4b \n" \
73 + : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
74 + : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
81 +futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
83 + int op = (encoded_op >> 28) & 7;
84 + int cmp = (encoded_op >> 24) & 15;
85 + int oparg = (encoded_op << 8) >> 20;
86 + int cmparg = (encoded_op << 20) >> 20;
87 + int oldval = 0, ret;
88 + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
91 + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
94 + inc_preempt_count();
98 + __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
102 + __futex_atomic_op("addu $1, %1, %z5",
103 + ret, oldval, uaddr, oparg);
106 + __futex_atomic_op("or $1, %1, %z5",
107 + ret, oldval, uaddr, oparg);
109 + case FUTEX_OP_ANDN:
110 + __futex_atomic_op("and $1, %1, %z5",
111 + ret, oldval, uaddr, ~oparg);
114 + __futex_atomic_op("xor $1, %1, %z5",
115 + ret, oldval, uaddr, oparg);
121 + dec_preempt_count();
125 + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
126 + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
127 + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
128 + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
129 + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
130 + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
131 + default: ret = -ENOSYS;
139 diff -Naur 2.6.12-5.0-org/include/linux/futex.h 2.6.12-5.0-patched/include/linux/futex.h
140 --- 2.6.12-5.0-org/include/linux/futex.h 2007-07-26 00:57:03.000000000 +0200
141 +++ 2.6.12-5.0-patched/include/linux/futex.h 2007-12-11 12:34:52.000000000 +0100
143 /* Second argument to futex syscall */
146 -#define FUTEX_WAIT (0)
147 -#define FUTEX_WAKE (1)
148 -#define FUTEX_FD (2)
149 -#define FUTEX_REQUEUE (3)
150 -#define FUTEX_CMP_REQUEUE (4)
151 +#define FUTEX_WAIT 0
152 +#define FUTEX_WAKE 1
154 +#define FUTEX_REQUEUE 3
155 +#define FUTEX_CMP_REQUEUE 4
156 +#define FUTEX_WAKE_OP 5
158 long do_futex(unsigned long uaddr, int op, int val,
159 unsigned long timeout, unsigned long uaddr2, int val2,
162 +#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
163 +#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
164 +#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */
165 +#define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */
166 +#define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */
168 +#define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */
170 +#define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */
171 +#define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */
172 +#define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */
173 +#define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */
174 +#define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */
175 +#define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */
177 +/* FUTEX_WAKE_OP will perform atomically
178 + int oldval = *(int *)UADDR2;
179 + *(int *)UADDR2 = oldval OP OPARG;
180 + if (oldval CMP CMPARG)
183 +#define FUTEX_OP(op, oparg, cmp, cmparg) \
184 + (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \
185 + | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
188 diff -Naur 2.6.12-5.0-org/kernel/futex.c 2.6.12-5.0-patched/kernel/futex.c
189 --- 2.6.12-5.0-org/kernel/futex.c 2007-07-26 00:57:20.000000000 +0200
190 +++ 2.6.12-5.0-patched/kernel/futex.c 2007-12-11 12:34:52.000000000 +0100
192 #include <linux/pagemap.h>
193 #include <linux/syscalls.h>
194 #include <linux/signal.h>
195 +#include <asm/futex.h>
197 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
200 * for a rare case, so we simply fetch the page.
204 - * Do a quick atomic lookup first - this is the fastpath.
206 - spin_lock(¤t->mm->page_table_lock);
207 - page = follow_page(mm, uaddr, 0);
208 - if (likely(page != NULL)) {
209 - key->shared.pgoff =
210 - page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
211 - spin_unlock(¤t->mm->page_table_lock);
214 - spin_unlock(¤t->mm->page_table_lock);
217 - * Do it the general way.
219 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
222 @@ -327,6 +312,123 @@
226 + * Wake up all waiters hashed on the physical page that is mapped
227 + * to this virtual address:
229 +static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op)
231 + union futex_key key1, key2;
232 + struct futex_hash_bucket *bh1, *bh2;
233 + struct list_head *head;
234 + struct futex_q *this, *next;
235 + int ret, op_ret, attempt = 0;
238 + down_read(¤t->mm->mmap_sem);
240 + ret = get_futex_key(uaddr1, &key1);
241 + if (unlikely(ret != 0))
243 + ret = get_futex_key(uaddr2, &key2);
244 + if (unlikely(ret != 0))
247 + bh1 = hash_futex(&key1);
248 + bh2 = hash_futex(&key2);
252 + spin_lock(&bh1->lock);
253 + spin_lock(&bh2->lock);
255 + spin_lock(&bh1->lock);
257 + op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2);
258 + if (unlikely(op_ret < 0)) {
261 + spin_unlock(&bh1->lock);
263 + spin_unlock(&bh2->lock);
265 + if (unlikely(op_ret != -EFAULT)) {
270 + /* futex_atomic_op_inuser needs to both read and write
271 + * *(int __user *)uaddr2, but we can't modify it
272 + * non-atomically. Therefore, if get_user below is not
273 + * enough, we need to handle the fault ourselves, while
274 + * still holding the mmap_sem. */
276 + struct vm_area_struct * vma;
277 + struct mm_struct *mm = current->mm;
280 + if (attempt >= 2 ||
281 + !(vma = find_vma(mm, uaddr2)) ||
282 + vma->vm_start > uaddr2 ||
283 + !(vma->vm_flags & VM_WRITE))
286 + switch (handle_mm_fault(mm, vma, uaddr2, 1)) {
287 + case VM_FAULT_MINOR:
288 + current->min_flt++;
290 + case VM_FAULT_MAJOR:
291 + current->maj_flt++;
299 + /* If we would have faulted, release mmap_sem,
300 + * fault it in and start all over again. */
301 + up_read(¤t->mm->mmap_sem);
303 + ret = get_user(dummy, (int __user *)uaddr2);
310 + head = &bh1->chain;
312 + list_for_each_entry_safe(this, next, head, list) {
313 + if (match_futex (&this->key, &key1)) {
315 + if (++ret >= nr_wake)
321 + head = &bh2->chain;
324 + list_for_each_entry_safe(this, next, head, list) {
325 + if (match_futex (&this->key, &key2)) {
327 + if (++op_ret >= nr_wake2)
334 + spin_unlock(&bh1->lock);
336 + spin_unlock(&bh2->lock);
338 + up_read(¤t->mm->mmap_sem);
343 * Requeue all waiters hashed on one physical page to another
347 case FUTEX_CMP_REQUEUE:
348 ret = futex_requeue(uaddr, uaddr2, val, val2, &val3);
350 + case FUTEX_WAKE_OP:
351 + ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
357 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
360 - if ((op == FUTEX_WAIT) && utime) {
361 + if (utime && (op == FUTEX_WAIT)) {
362 if (copy_from_user(&t, utime, sizeof(t)) != 0)
364 + if (!timespec_valid(&t))
366 timeout = timespec_to_jiffies(&t) + 1;