Merge branch 'org.openembedded.dev' of git://git.openembedded.net/openembedded into...
[vuplus_openembedded] / packages / linux / linux-dm800 / linuxmips-2.6.12-fix-futex.patch
1 diff -Naur 2.6.12-5.0-org/include/asm-mips/futex.h 2.6.12-5.0-patched/include/asm-mips/futex.h
2 --- 2.6.12-5.0-org/include/asm-mips/futex.h     1970-01-01 01:00:00.000000000 +0100
3 +++ 2.6.12-5.0-patched/include/asm-mips/futex.h 2007-12-11 12:34:52.000000000 +0100
4 @@ -0,0 +1,134 @@
5 +#ifndef _ASM_FUTEX_H
6 +#define _ASM_FUTEX_H
7 +
8 +#ifdef __KERNEL__
9 +
10 +#include <linux/config.h>
11 +#include <linux/futex.h>
12 +#include <asm/errno.h>
13 +#include <asm/uaccess.h>
14 +#include <asm/war.h>
15 +
16 +#ifdef CONFIG_SMP
17 +#define __FUTEX_SMP_SYNC "     sync                                    \n"
18 +#else
19 +#define __FUTEX_SMP_SYNC
20 +#endif
21 +
22 +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)             \
23 +{                                                                      \
24 +       if (cpu_has_llsc && R10000_LLSC_WAR) {                          \
25 +               __asm__ __volatile__(                                   \
26 +               "       .set    push                            \n"     \
27 +               "       .set    noat                            \n"     \
28 +               "       .set    mips3                           \n"     \
29 +               "1:     ll      %1, %4  # __futex_atomic_op     \n"     \
30 +               "       .set    mips0                           \n"     \
31 +               "       " insn  "                               \n"     \
32 +               "       .set    mips3                           \n"     \
33 +               "2:     sc      $1, %2                          \n"     \
34 +               "       beqzl   $1, 1b                          \n"     \
35 +               __FUTEX_SMP_SYNC                                        \
36 +               "3:                                             \n"     \
37 +               "       .set    pop                             \n"     \
38 +               "       .set    mips0                           \n"     \
39 +               "       .section .fixup,\"ax\"                  \n"     \
40 +               "4:     li      %0, %6                          \n"     \
41 +               "       j       2b                              \n"     \
42 +               "       .previous                               \n"     \
43 +               "       .section __ex_table,\"a\"               \n"     \
44 +               "       "__UA_ADDR "\t1b, 4b                    \n"     \
45 +               "       "__UA_ADDR "\t2b, 4b                    \n"     \
46 +               "       .previous                               \n"     \
47 +               : "=r" (ret), "=&r" (oldval), "=R" (*uaddr)             \
48 +               : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT)    \
49 +               : "memory");                                            \
50 +       } else if (cpu_has_llsc) {                                      \
51 +               __asm__ __volatile__(                                   \
52 +               "       .set    push                            \n"     \
53 +               "       .set    noat                            \n"     \
54 +               "       .set    mips3                           \n"     \
55 +               "1:     ll      %1, %4  # __futex_atomic_op     \n"     \
56 +               "       .set    mips0                           \n"     \
57 +               "       " insn  "                               \n"     \
58 +               "       .set    mips3                           \n"     \
59 +               "2:     sc      $1, %2                          \n"     \
60 +               "       beqz    $1, 1b                          \n"     \
61 +               __FUTEX_SMP_SYNC                                        \
62 +               "3:                                             \n"     \
63 +               "       .set    pop                             \n"     \
64 +               "       .set    mips0                           \n"     \
65 +               "       .section .fixup,\"ax\"                  \n"     \
66 +               "4:     li      %0, %6                          \n"     \
67 +               "       j       2b                              \n"     \
68 +               "       .previous                               \n"     \
69 +               "       .section __ex_table,\"a\"               \n"     \
70 +               "       "__UA_ADDR "\t1b, 4b                    \n"     \
71 +               "       "__UA_ADDR "\t2b, 4b                    \n"     \
72 +               "       .previous                               \n"     \
73 +               : "=r" (ret), "=&r" (oldval), "=R" (*uaddr)             \
74 +               : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT)    \
75 +               : "memory");                                            \
76 +       } else                                                          \
77 +               ret = -ENOSYS;                                          \
78 +}
79 +
80 +static inline int
81 +futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
82 +{
83 +       int op = (encoded_op >> 28) & 7;
84 +       int cmp = (encoded_op >> 24) & 15;
85 +       int oparg = (encoded_op << 8) >> 20;
86 +       int cmparg = (encoded_op << 20) >> 20;
87 +       int oldval = 0, ret;
88 +       if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
89 +               oparg = 1 << oparg;
90 +
91 +       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
92 +               return -EFAULT;
93 +
94 +       inc_preempt_count();
95 +
96 +       switch (op) {
97 +       case FUTEX_OP_SET:
98 +               __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
99 +               break;
100 +
101 +       case FUTEX_OP_ADD:
102 +               __futex_atomic_op("addu $1, %1, %z5",
103 +                                 ret, oldval, uaddr, oparg);
104 +               break;
105 +       case FUTEX_OP_OR:
106 +               __futex_atomic_op("or   $1, %1, %z5",
107 +                                 ret, oldval, uaddr, oparg);
108 +               break;
109 +       case FUTEX_OP_ANDN:
110 +               __futex_atomic_op("and  $1, %1, %z5",
111 +                                 ret, oldval, uaddr, ~oparg);
112 +               break;
113 +       case FUTEX_OP_XOR:
114 +               __futex_atomic_op("xor  $1, %1, %z5",
115 +                                 ret, oldval, uaddr, oparg);
116 +               break;
117 +       default:
118 +               ret = -ENOSYS;
119 +       }
120 +
121 +       dec_preempt_count();
122 +
123 +       if (!ret) {
124 +               switch (cmp) {
125 +               case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
126 +               case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
127 +               case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
128 +               case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
129 +               case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
130 +               case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
131 +               default: ret = -ENOSYS;
132 +               }
133 +       }
134 +       return ret;
135 +}
136 +
137 +#endif
138 +#endif
139 diff -Naur 2.6.12-5.0-org/include/linux/futex.h 2.6.12-5.0-patched/include/linux/futex.h
140 --- 2.6.12-5.0-org/include/linux/futex.h        2007-07-26 00:57:03.000000000 +0200
141 +++ 2.6.12-5.0-patched/include/linux/futex.h    2007-12-11 12:34:52.000000000 +0100
142 @@ -4,14 +4,40 @@
143  /* Second argument to futex syscall */
144  
145  
146 -#define FUTEX_WAIT (0)
147 -#define FUTEX_WAKE (1)
148 -#define FUTEX_FD (2)
149 -#define FUTEX_REQUEUE (3)
150 -#define FUTEX_CMP_REQUEUE (4)
151 +#define FUTEX_WAIT             0
152 +#define FUTEX_WAKE             1
153 +#define FUTEX_FD               2
154 +#define FUTEX_REQUEUE          3
155 +#define FUTEX_CMP_REQUEUE      4
156 +#define FUTEX_WAKE_OP          5
157  
158  long do_futex(unsigned long uaddr, int op, int val,
159                 unsigned long timeout, unsigned long uaddr2, int val2,
160                 int val3);
161  
162 +#define FUTEX_OP_SET           0       /* *(int *)UADDR2 = OPARG; */
163 +#define FUTEX_OP_ADD           1       /* *(int *)UADDR2 += OPARG; */
164 +#define FUTEX_OP_OR            2       /* *(int *)UADDR2 |= OPARG; */
165 +#define FUTEX_OP_ANDN          3       /* *(int *)UADDR2 &= ~OPARG; */
166 +#define FUTEX_OP_XOR           4       /* *(int *)UADDR2 ^= OPARG; */
167 +
168 +#define FUTEX_OP_OPARG_SHIFT   8       /* Use (1 << OPARG) instead of OPARG.  */
169 +
170 +#define FUTEX_OP_CMP_EQ                0       /* if (oldval == CMPARG) wake */
171 +#define FUTEX_OP_CMP_NE                1       /* if (oldval != CMPARG) wake */
172 +#define FUTEX_OP_CMP_LT                2       /* if (oldval < CMPARG) wake */
173 +#define FUTEX_OP_CMP_LE                3       /* if (oldval <= CMPARG) wake */
174 +#define FUTEX_OP_CMP_GT                4       /* if (oldval > CMPARG) wake */
175 +#define FUTEX_OP_CMP_GE                5       /* if (oldval >= CMPARG) wake */
176 +
177 +/* FUTEX_WAKE_OP will perform atomically
178 +   int oldval = *(int *)UADDR2;
179 +   *(int *)UADDR2 = oldval OP OPARG;
180 +   if (oldval CMP CMPARG)
181 +     wake UADDR2;  */
182 +
183 +#define FUTEX_OP(op, oparg, cmp, cmparg) \
184 +  (((op & 0xf) << 28) | ((cmp & 0xf) << 24)            \
185 +   | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
186 +
187  #endif
188 diff -Naur 2.6.12-5.0-org/kernel/futex.c 2.6.12-5.0-patched/kernel/futex.c
189 --- 2.6.12-5.0-org/kernel/futex.c       2007-07-26 00:57:20.000000000 +0200
190 +++ 2.6.12-5.0-patched/kernel/futex.c   2007-12-11 12:34:52.000000000 +0100
191 @@ -40,6 +40,7 @@
192  #include <linux/pagemap.h>
193  #include <linux/syscalls.h>
194  #include <linux/signal.h>
195 +#include <asm/futex.h>
196  
197  #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
198  
199 @@ -201,22 +202,6 @@
200          * for a rare case, so we simply fetch the page.
201          */
202  
203 -       /*
204 -        * Do a quick atomic lookup first - this is the fastpath.
205 -        */
206 -       spin_lock(&current->mm->page_table_lock);
207 -       page = follow_page(mm, uaddr, 0);
208 -       if (likely(page != NULL)) {
209 -               key->shared.pgoff =
210 -                       page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
211 -               spin_unlock(&current->mm->page_table_lock);
212 -               return 0;
213 -       }
214 -       spin_unlock(&current->mm->page_table_lock);
215 -
216 -       /*
217 -        * Do it the general way.
218 -        */
219         err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
220         if (err >= 0) {
221                 key->shared.pgoff =
222 @@ -327,6 +312,123 @@
223  }
224  
225  /*
226 + * Wake up all waiters hashed on the physical page that is mapped
227 + * to this virtual address:
228 + */
229 +static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op)
230 +{
231 +       union futex_key key1, key2;
232 +       struct futex_hash_bucket *bh1, *bh2;
233 +       struct list_head *head;
234 +       struct futex_q *this, *next;
235 +       int ret, op_ret, attempt = 0;
236 +
237 +retryfull:
238 +       down_read(&current->mm->mmap_sem);
239 +
240 +       ret = get_futex_key(uaddr1, &key1);
241 +       if (unlikely(ret != 0))
242 +               goto out;
243 +       ret = get_futex_key(uaddr2, &key2);
244 +       if (unlikely(ret != 0))
245 +               goto out;
246 +
247 +       bh1 = hash_futex(&key1);
248 +       bh2 = hash_futex(&key2);
249 +
250 +retry:
251 +       if (bh1 < bh2)
252 +               spin_lock(&bh1->lock);
253 +       spin_lock(&bh2->lock);
254 +       if (bh1 > bh2)
255 +               spin_lock(&bh1->lock);
256 +
257 +       op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2);
258 +       if (unlikely(op_ret < 0)) {
259 +               int dummy;
260 +
261 +               spin_unlock(&bh1->lock);
262 +               if (bh1 != bh2)
263 +                       spin_unlock(&bh2->lock);
264 +
265 +               if (unlikely(op_ret != -EFAULT)) {
266 +                       ret = op_ret;
267 +                       goto out;
268 +               }
269 +
270 +               /* futex_atomic_op_inuser needs to both read and write
271 +                * *(int __user *)uaddr2, but we can't modify it
272 +                * non-atomically.  Therefore, if get_user below is not
273 +                * enough, we need to handle the fault ourselves, while
274 +                * still holding the mmap_sem.  */
275 +               if (attempt++) {
276 +                       struct vm_area_struct * vma;
277 +                       struct mm_struct *mm = current->mm;
278 +
279 +                       ret = -EFAULT;
280 +                       if (attempt >= 2 ||
281 +                           !(vma = find_vma(mm, uaddr2)) ||
282 +                           vma->vm_start > uaddr2 ||
283 +                           !(vma->vm_flags & VM_WRITE))
284 +                               goto out;
285 +
286 +                       switch (handle_mm_fault(mm, vma, uaddr2, 1)) {
287 +                       case VM_FAULT_MINOR:
288 +                               current->min_flt++;
289 +                               break;
290 +                       case VM_FAULT_MAJOR:
291 +                               current->maj_flt++;
292 +                               break;
293 +                       default:
294 +                               goto out;
295 +                       }
296 +                       goto retry;
297 +               }
298 +
299 +               /* If we would have faulted, release mmap_sem,
300 +                * fault it in and start all over again.  */
301 +               up_read(&current->mm->mmap_sem);
302 +
303 +               ret = get_user(dummy, (int __user *)uaddr2);
304 +               if (ret)
305 +                       return ret;
306 +
307 +               goto retryfull;
308 +       }
309 +
310 +       head = &bh1->chain;
311 +
312 +       list_for_each_entry_safe(this, next, head, list) {
313 +               if (match_futex (&this->key, &key1)) {
314 +                       wake_futex(this);
315 +                       if (++ret >= nr_wake)
316 +                               break;
317 +               }
318 +       }
319 +
320 +       if (op_ret > 0) {
321 +               head = &bh2->chain;
322 +
323 +               op_ret = 0;
324 +               list_for_each_entry_safe(this, next, head, list) {
325 +                       if (match_futex (&this->key, &key2)) {
326 +                               wake_futex(this);
327 +                               if (++op_ret >= nr_wake2)
328 +                                       break;
329 +                       }
330 +               }
331 +               ret += op_ret;
332 +       }
333 +
334 +       spin_unlock(&bh1->lock);
335 +       if (bh1 != bh2)
336 +               spin_unlock(&bh2->lock);
337 +out:
338 +       up_read(&current->mm->mmap_sem);
339 +       return ret;
340 +}
341 +
342 +/*
343   * Requeue all waiters hashed on one physical page to another
344   * physical page.
345   */
346 @@ -740,6 +842,9 @@
347         case FUTEX_CMP_REQUEUE:
348                 ret = futex_requeue(uaddr, uaddr2, val, val2, &val3);
349                 break;
350 +       case FUTEX_WAKE_OP:
351 +               ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
352 +               break;
353         default:
354                 ret = -ENOSYS;
355         }
356 @@ -755,9 +860,11 @@
357         unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
358         int val2 = 0;
359  
360 -       if ((op == FUTEX_WAIT) && utime) {
361 +       if (utime && (op == FUTEX_WAIT)) {
362                 if (copy_from_user(&t, utime, sizeof(t)) != 0)
363                         return -EFAULT;
364 +               if (!timespec_valid(&t))
365 +                       return -EINVAL;
366                 timeout = timespec_to_jiffies(&t) + 1;
367         }
368         /*