| Index: apps/fastboot/version.h | 
| — | — | @@ -25,10 +25,10 @@ | 
| 26 | 26 | #define __VERSION_H__ | 
| 27 | 27 |  | 
| 28 | 28 |  | 
| 29 |  | -#define VERSION "0.1.0"
 | 
|  | 29 | +#define VERSION "0.1.1" | 
| 30 | 30 | #define VERSION_MAJOR 0 | 
| 31 | 31 | #define VERSION_MINOR 1 | 
| 32 |  | -#define VERSION_PATCH 0
 | 
|  | 32 | +#define VERSION_PATCH 1 | 
| 33 | 33 | #define VERSION_SVN "$REVISION$" | 
| 34 | 34 | #define VERSION_SVN_INT $REVISIONINT$ | 
| 35 | 35 |  | 
| Index: emcore/trunk/version.h | 
| — | — | @@ -25,10 +25,10 @@ | 
| 26 | 26 | #define __VERSION_H__ | 
| 27 | 27 |  | 
| 28 | 28 |  | 
| 29 |  | -#define VERSION "0.2.0"
 | 
|  | 29 | +#define VERSION "0.2.1" | 
| 30 | 30 | #define VERSION_MAJOR 0 | 
| 31 | 31 | #define VERSION_MINOR 2 | 
| 32 |  | -#define VERSION_PATCH 0
 | 
|  | 32 | +#define VERSION_PATCH 1 | 
| 33 | 33 | #define VERSION_SVN "$REVISION$" | 
| 34 | 34 | #define VERSION_SVN_INT $REVISIONINT$ | 
| 35 | 35 |  | 
| Index: emcore/trunk/export/syscallapi.h | 
| — | — | @@ -71,12 +71,12 @@ | 
| 72 | 72 | #endif | 
| 73 | 73 |  | 
| 74 | 74 | /* increase this every time the api struct changes */ | 
| 75 |  | -#define EMCORE_API_VERSION 2 | 
|  | 75 | +#define EMCORE_API_VERSION 3 | 
| 76 | 76 |  | 
| 77 | 77 | /* update this to latest version if a change to the api struct breaks | 
| 78 | 78 | backwards compatibility (and please take the opportunity to sort in any | 
| 79 | 79 | new function which are "waiting" at the end of the function table) */ | 
| 80 |  | -#define EMCORE_MIN_API_VERSION 2 | 
|  | 80 | +#define EMCORE_MIN_API_VERSION 3 | 
| 81 | 81 |  | 
| 82 | 82 | /* NOTE: To support backwards compatibility, only add new functions at | 
| 83 | 83 | the end of the structure.  Every time you add a new function, | 
| Index: emcore/trunk/thread.c | 
| — | — | @@ -93,6 +93,8 @@ | 
| 94 | 94 | { | 
| 95 | 95 | obj->count = 1; | 
| 96 | 96 | obj->owner = current_thread; | 
|  | 97 | +        obj->owned_next = current_thread->owned_mutexes; | 
|  | 98 | +        current_thread->owned_mutexes = obj; | 
| 97 | 99 | } | 
| 98 | 100 | else if (obj->owner == current_thread) obj->count++; | 
| 99 | 101 | else | 
| — | — | @@ -117,6 +119,35 @@ | 
| 118 | 120 | return ret; | 
| 119 | 121 | } | 
| 120 | 122 |  | 
|  | 123 | +void mutex_unlock_internal(struct mutex* obj) | 
|  | 124 | +{ | 
|  | 125 | +    struct mutex* o; | 
|  | 126 | +    if (!obj->owner->owned_mutexes) return; | 
|  | 127 | +    if (obj->owner->owned_mutexes == obj) obj->owner->owned_mutexes = obj->owned_next; | 
|  | 128 | +    else | 
|  | 129 | +    { | 
|  | 130 | +        o = obj->owner->owned_mutexes; | 
|  | 131 | +        while (o->owned_next) | 
|  | 132 | +        { | 
|  | 133 | +            if (o->owned_next == obj) o->owned_next = obj->owned_next; | 
|  | 134 | +            o = o->owned_next; | 
|  | 135 | +        } | 
|  | 136 | +    } | 
|  | 137 | +    if (obj->waiters) | 
|  | 138 | +    { | 
|  | 139 | +        obj->count = 1; | 
|  | 140 | +        obj->owner = obj->waiters; | 
|  | 141 | +        obj->waiters->state = THREAD_READY; | 
|  | 142 | +        obj->waiters->block_type = THREAD_NOT_BLOCKED; | 
|  | 143 | +        obj->waiters->blocked_by = NULL; | 
|  | 144 | +        obj->waiters->timeout = 0; | 
|  | 145 | +        obj->waiters = obj->waiters->queue_next; | 
|  | 146 | +        obj->owned_next = obj->owner->owned_mutexes; | 
|  | 147 | +        obj->owner->owned_mutexes = obj; | 
|  | 148 | +    } | 
|  | 149 | +    else obj->count = 0; | 
|  | 150 | +} | 
|  | 151 | + | 
| 121 | 152 | int mutex_unlock(struct mutex* obj) | 
| 122 | 153 | { | 
| 123 | 154 | int ret = THREAD_OK; | 
| — | — | @@ -133,18 +164,8 @@ | 
| 134 | 165 | leave_critical_section(mode); | 
| 135 | 166 | panicf(PANIC_KILLTHREAD, "Trying to unlock mutex owned by different thread! (%08X)", obj); | 
| 136 | 167 | } | 
| 137 |  | -
 | 
| 138 | 168 | if (--(obj->count)) ret = obj->count; | 
| 139 |  | -    else if (obj->waiters)
 | 
| 140 |  | -    {
 | 
| 141 |  | -        obj->count = 1;
 | 
| 142 |  | -        obj->owner = obj->waiters;
 | 
| 143 |  | -        obj->waiters->state = THREAD_READY;
 | 
| 144 |  | -        obj->waiters->block_type = THREAD_NOT_BLOCKED;
 | 
| 145 |  | -        obj->waiters->blocked_by = NULL;
 | 
| 146 |  | -        obj->waiters->timeout = 0;
 | 
| 147 |  | -        obj->waiters = obj->waiters->queue_next;
 | 
| 148 |  | -    }
 | 
|  | 169 | +    else mutex_unlock_internal(obj); | 
| 149 | 170 |  | 
| 150 | 171 | leave_critical_section(mode); | 
| 151 | 172 | return ret; | 
| — | — | @@ -485,6 +506,10 @@ | 
| 486 | 507 | thread->state = THREAD_SUSPENDED; | 
| 487 | 508 | } | 
| 488 | 509 |  | 
|  | 510 | +    struct mutex* m; | 
|  | 511 | +    for (m = thread->owned_mutexes; m; m = m->owned_next) | 
|  | 512 | +        mutex_unlock_internal(m); | 
|  | 513 | + | 
| 489 | 514 | leave_critical_section(mode); | 
| 490 | 515 |  | 
| 491 | 516 | library_release_all_of_thread(thread); | 
| Index: emcore/trunk/thread.h | 
| — | — | @@ -68,8 +68,10 @@ | 
| 69 | 69 | }; | 
| 70 | 70 |  | 
| 71 | 71 |  | 
| 72 |  | -#define SCHEDULER_THREAD_INFO_VERSION 2
 | 
|  | 72 | +#define SCHEDULER_THREAD_INFO_VERSION 3 | 
| 73 | 73 |  | 
|  | 74 | +struct mutex; | 
|  | 75 | + | 
| 74 | 76 | struct scheduler_thread | 
| 75 | 77 | { | 
| 76 | 78 | uint32_t regs[16]; | 
| — | — | @@ -81,6 +83,7 @@ | 
| 82 | 84 | uint32_t startusec; | 
| 83 | 85 | struct scheduler_thread* thread_next; | 
| 84 | 86 | struct scheduler_thread* queue_next; | 
|  | 87 | +    struct mutex* owned_mutexes; | 
| 85 | 88 | uint32_t timeout; | 
| 86 | 89 | uint32_t blocked_since; | 
| 87 | 90 | void* blocked_by; | 
| — | — | @@ -96,6 +99,7 @@ | 
| 97 | 100 | { | 
| 98 | 101 | struct scheduler_thread* owner; | 
| 99 | 102 | struct scheduler_thread* waiters; | 
|  | 103 | +    struct mutex* owned_next; | 
| 100 | 104 | int count; | 
| 101 | 105 | }; | 
| 102 | 106 |  |