| Index: emcore/trunk/target/ipodnano2g/lcd.c |
| — | — | @@ -91,9 +91,7 @@ |
| 92 | 92 | void displaylcd_sync() ICODE_ATTR;
|
| 93 | 93 | void displaylcd_sync()
|
| 94 | 94 | {
|
| 95 | | - mutex_lock(&lcd_mutex, TIMEOUT_BLOCK);
|
| 96 | 95 | while (displaylcd_busy()) sleep(100);
|
| 97 | | - mutex_unlock(&lcd_mutex);
|
| 98 | 96 | }
|
| 99 | 97 |
|
| 100 | 98 | void displaylcd_setup(unsigned int startx, unsigned int endx,
|
| — | — | @@ -101,8 +99,8 @@ |
| 102 | 100 | void displaylcd_setup(unsigned int startx, unsigned int endx,
|
| 103 | 101 | unsigned int starty, unsigned int endy)
|
| 104 | 102 | {
|
| | 103 | + displaylcd_sync();
|
| 105 | 104 | mutex_lock(&lcd_mutex, TIMEOUT_BLOCK);
|
| 106 | | - displaylcd_sync();
|
| 107 | 105 | if (lcd_detect() == 2)
|
| 108 | 106 | {
|
| 109 | 107 | lcd_send_cmd(0x50);
|
| — | — | @@ -190,61 +188,121 @@ |
| 191 | 189 |
|
| 192 | 190 | void displaylcd_dither(unsigned int x, unsigned int y, unsigned int width,
|
| 193 | 191 | unsigned int height, void* data, unsigned int datax,
|
| 194 | | - unsigned int datay, unsigned int stride, bool solid) ICODE_ATTR;
|
| | 192 | + unsigned int datay, unsigned int stride, bool solid)
|
| | 193 | + ICODE_ATTR __attribute__((naked,noinline));
|
| 195 | 194 | void displaylcd_dither(unsigned int x, unsigned int y, unsigned int width,
|
| 196 | 195 | unsigned int height, void* data, unsigned int datax,
|
| 197 | 196 | unsigned int datay, unsigned int stride, bool solid)
|
| 198 | 197 | {
|
| 199 | | - int pixels = width * height;
|
| 200 | | - if (pixels <= 0) return;
|
| 201 | | - displaylcd_setup(x, x + width - 1, y, y + height - 1);
|
| 202 | | - int corrsize = width * 3;
|
| 203 | | - signed char* corr = (signed char*)malloc(corrsize);
|
| 204 | | - if (!corr)
|
| 205 | | - {
|
| 206 | | - mutex_unlock(&lcd_mutex);
|
| 207 | | - return;
|
| 208 | | - }
|
| 209 | | - memset(corr, 0, corrsize);
|
| 210 | | - unsigned char* in = (unsigned char*)data + (stride * datay + datax) * 3;
|
| 211 | | - for (y = 0; y < height; y++)
|
| 212 | | - {
|
| 213 | | - int i;
|
| 214 | | - signed char* corrptr = corr;
|
| 215 | | - signed char lastcorr[3] = {0};
|
| 216 | | - for (x = 0; x < width; x++)
|
| 217 | | - {
|
| 218 | | - unsigned int pixel = 0;
|
| 219 | | - signed char* lastcorrptr = lastcorr;
|
| 220 | | - int orig = *in++ + *corrptr + *lastcorrptr;
|
| 221 | | - orig = MAX(0, MIN(255, orig));
|
| 222 | | - unsigned int real = orig >> 3;
|
| 223 | | - pixel |= real << 11;
|
| 224 | | - int err = orig - ((real << 3) | (real >> 2));
|
| 225 | | - *corrptr++ = (*lastcorrptr >> 1) + err >> 2;
|
| 226 | | - *lastcorrptr++ = err >> 1;
|
| 227 | | - orig = *in++ + *corrptr + *lastcorrptr;
|
| 228 | | - orig = MAX(0, MIN(255, orig));
|
| 229 | | - real = orig >> 2;
|
| 230 | | - pixel |= real << 5;
|
| 231 | | - err = orig - ((real << 2) | (real >> 4));
|
| 232 | | - *corrptr++ = (*lastcorrptr >> 1) + err >> 2;
|
| 233 | | - *lastcorrptr++ = err >> 1;
|
| 234 | | - orig = *in++ + *corrptr + *lastcorrptr;
|
| 235 | | - orig = MAX(0, MIN(255, orig));
|
| 236 | | - real = orig >> 3;
|
| 237 | | - pixel |= real;
|
| 238 | | - err = orig - ((real << 3) | (real >> 2));
|
| 239 | | - *corrptr++ = (*lastcorrptr >> 1) + err >> 2;
|
| 240 | | - *lastcorrptr++ = err >> 1;
|
| 241 | | - LCDWDATA = pixel;
|
| 242 | | - if (solid) in -= 3;
|
| 243 | | - }
|
| 244 | | - if (solid) in += stride * 3;
|
| 245 | | - else in += (stride - width) * 3;
|
| 246 | | - }
|
| 247 | | - free(corr);
|
| 248 | | - mutex_unlock(&lcd_mutex);
|
| | 198 | + __asm__ volatile(" muls r12, r2, r3 \n");
|
| | 199 | + __asm__ volatile(" bxeq lr \n");
|
| | 200 | + __asm__ volatile(" stmfd sp!, {r2-r11,lr} \n");
|
| | 201 | + __asm__ volatile(" mov r12, r2 \n");
|
| | 202 | + __asm__ volatile(" add r8, r2, r2,lsl#1 \n");
|
| | 203 | + __asm__ volatile(" add r3, r1, r3 \n");
|
| | 204 | + __asm__ volatile(" sub r3, r3, #1 \n");
|
| | 205 | + __asm__ volatile(" mov r2, r1 \n");
|
| | 206 | + __asm__ volatile(" add r1, r0, r12 \n");
|
| | 207 | + __asm__ volatile(" sub r1, r1, #1 \n");
|
| | 208 | + __asm__ volatile(" bl displaylcd_setup \n");
|
| | 209 | + __asm__ volatile(" mov r0, r8 \n");
|
| | 210 | + __asm__ volatile(" bl malloc \n");
|
| | 211 | + __asm__ volatile(" cmp r0, #0 \n");
|
| | 212 | + __asm__ volatile(" beq displaylcd_dither_unlock \n");
|
| | 213 | + __asm__ volatile(" mov r2, r8 \n");
|
| | 214 | + __asm__ volatile(" mov r1, #0 \n");
|
| | 215 | + __asm__ volatile(" mov r8, r0 \n");
|
| | 216 | + __asm__ volatile(" bl memset \n");
|
| | 217 | + __asm__ volatile(" ldr r0, [sp,#0x30] \n");
|
| | 218 | + __asm__ volatile(" ldr r1, [sp,#0x34] \n");
|
| | 219 | + __asm__ volatile(" ldr r11, [sp,#0x38] \n");
|
| | 220 | + __asm__ volatile(" ldr r3, [sp,#0x2c] \n");
|
| | 221 | + __asm__ volatile(" mla r0, r1, r11, r0 \n");
|
| | 222 | + __asm__ volatile(" ldr r12, [sp,#0x04] \n");
|
| | 223 | + __asm__ volatile(" ldr r2, [sp,#0x3c] \n");
|
| | 224 | + __asm__ volatile(" add r3, r3, r0,lsl#1 \n");
|
| | 225 | + __asm__ volatile(" cmp r2, #0 \n");
|
| | 226 | + __asm__ volatile(" ldreq r1, [sp] \n");
|
| | 227 | + __asm__ volatile(" add r3, r3, r0 \n");
|
| | 228 | + __asm__ volatile(" subeq r11, r11, r1 \n");
|
| | 229 | + __asm__ volatile(" add r11, r11, r11,lsl#1 \n");
|
| | 230 | + __asm__ volatile(" movne r10, #3 \n");
|
| | 231 | + __asm__ volatile(" moveq r10, #0 \n");
|
| | 232 | + __asm__ volatile(" ldr r9, =0x38600040 \n");
|
| | 233 | + __asm__ volatile("displaylcd_dither_y: \n");
|
| | 234 | + __asm__ volatile(" ldr lr, [sp] \n");
|
| | 235 | + __asm__ volatile(" mov r4, #0 \n");
|
| | 236 | + __asm__ volatile(" mov r5, #0 \n");
|
| | 237 | + __asm__ volatile(" mov r6, #0 \n");
|
| | 238 | + __asm__ volatile(" mov r7, r8 \n");
|
| | 239 | + __asm__ volatile("displaylcd_dither_x: \n");
|
| | 240 | + __asm__ volatile(" mov r2, #0 \n");
|
| | 241 | + __asm__ volatile(" ldrb r1, [r3], #1 \n");
|
| | 242 | + __asm__ volatile(" ldrsb r0, [r7] \n");
|
| | 243 | + __asm__ volatile(" add r1, r1, r4 \n");
|
| | 244 | + __asm__ volatile(" add r1, r1, r0 \n");
|
| | 245 | + __asm__ volatile(" cmp r1, #0 \n");
|
| | 246 | + __asm__ volatile(" movlt r1, #0 \n");
|
| | 247 | + __asm__ volatile(" cmp r1, #0xff \n");
|
| | 248 | + __asm__ volatile(" movgt r1, #0xff \n");
|
| | 249 | + __asm__ volatile(" mov r0, r1,lsr#3 \n");
|
| | 250 | + __asm__ volatile(" orr r2, r0,lsl#11 \n");
|
| | 251 | + __asm__ volatile(" sub r1, r1, r0,lsl#3 \n");
|
| | 252 | + __asm__ volatile(" sub r1, r1, r0,lsr#2 \n");
|
| | 253 | + __asm__ volatile(" mov r4, r4,lsr#1 \n");
|
| | 254 | + __asm__ volatile(" add r4, r4, r1,lsr#2 \n");
|
| | 255 | + __asm__ volatile(" strb r4, [r7], #1 \n");
|
| | 256 | + __asm__ volatile(" mov r4, r1,asr#1 \n");
|
| | 257 | + __asm__ volatile(" ldrb r1, [r3], #1 \n");
|
| | 258 | + __asm__ volatile(" ldrsb r0, [r7] \n");
|
| | 259 | + __asm__ volatile(" add r1, r1, r5 \n");
|
| | 260 | + __asm__ volatile(" add r1, r1, r0 \n");
|
| | 261 | + __asm__ volatile(" cmp r1, #0 \n");
|
| | 262 | + __asm__ volatile(" movlt r1, #0 \n");
|
| | 263 | + __asm__ volatile(" cmp r1, #0xff \n");
|
| | 264 | + __asm__ volatile(" movgt r1, #0xff \n");
|
| | 265 | + __asm__ volatile(" mov r0, r1,lsr#2 \n");
|
| | 266 | + __asm__ volatile(" orr r2, r0,lsl#5 \n");
|
| | 267 | + __asm__ volatile(" sub r1, r1, r0,lsl#2 \n");
|
| | 268 | + __asm__ volatile(" sub r1, r1, r0,lsr#4 \n");
|
| | 269 | + __asm__ volatile(" mov r5, r5,lsr#1 \n");
|
| | 270 | + __asm__ volatile(" add r5, r5, r1,lsr#2 \n");
|
| | 271 | + __asm__ volatile(" strb r5, [r7], #1 \n");
|
| | 272 | + __asm__ volatile(" mov r5, r1,asr#1 \n");
|
| | 273 | + __asm__ volatile(" ldrb r1, [r3], #1 \n");
|
| | 274 | + __asm__ volatile(" ldrsb r0, [r7] \n");
|
| | 275 | + __asm__ volatile(" add r1, r1, r6 \n");
|
| | 276 | + __asm__ volatile(" add r1, r1, r0 \n");
|
| | 277 | + __asm__ volatile(" cmp r1, #0 \n");
|
| | 278 | + __asm__ volatile(" movlt r1, #0 \n");
|
| | 279 | + __asm__ volatile(" cmp r1, #0xff \n");
|
| | 280 | + __asm__ volatile(" movgt r1, #0xff \n");
|
| | 281 | + __asm__ volatile(" mov r0, r1,lsr#3 \n");
|
| | 282 | + __asm__ volatile(" orr r2, r0 \n");
|
| | 283 | + __asm__ volatile(" sub r1, r1, r0,lsl#3 \n");
|
| | 284 | + __asm__ volatile(" sub r1, r1, r0,lsr#2 \n");
|
| | 285 | + __asm__ volatile(" mov r6, r6,lsr#1 \n");
|
| | 286 | + __asm__ volatile(" add r6, r6, r1,lsr#2 \n");
|
| | 287 | + __asm__ volatile(" strb r6, [r7], #1 \n");
|
| | 288 | + __asm__ volatile("displaylcd_dither_waitlcd: \n");
|
| | 289 | + __asm__ volatile(" ldr r0, [r9,#-0x24] \n");
|
| | 290 | + __asm__ volatile(" mov r6, r1,asr#1 \n");
|
| | 291 | + __asm__ volatile(" tst r0, #0x10 \n");
|
| | 292 | + __asm__ volatile(" bne displaylcd_dither_waitlcd\n");
|
| | 293 | + __asm__ volatile(" str r2, [r9] \n");
|
| | 294 | + __asm__ volatile(" sub r3, r3, r10 \n");
|
| | 295 | + __asm__ volatile(" subs lr, lr, #1 \n");
|
| | 296 | + __asm__ volatile(" bne displaylcd_dither_x \n");
|
| | 297 | + __asm__ volatile(" add r3, r3, r11 \n");
|
| | 298 | + __asm__ volatile(" subs r12, r12, #1 \n");
|
| | 299 | + __asm__ volatile(" bne displaylcd_dither_y \n");
|
| | 300 | + __asm__ volatile("displaylcd_dither_free: \n");
|
| | 301 | + __asm__ volatile(" mov r0, r8 \n");
|
| | 302 | + __asm__ volatile(" bl free \n");
|
| | 303 | + __asm__ volatile("displaylcd_dither_unlock: \n");
|
| | 304 | + __asm__ volatile(" ldr r0, =lcd_mutex \n");
|
| | 305 | + __asm__ volatile(" bl mutex_unlock \n");
|
| | 306 | + __asm__ volatile(" ldmfd sp!, {r2-r11,pc} \n");
|
| 249 | 307 | }
|
| 250 | 308 |
|
| 251 | 309 | void displaylcd(unsigned int x, unsigned int y, unsigned int width, unsigned int height,
|
| — | — | @@ -256,6 +314,7 @@ |
| 257 | 315 | void filllcd(unsigned int x, unsigned int y, unsigned int width, unsigned int height, int color)
|
| 258 | 316 | {
|
| 259 | 317 | if (width * height <= 0) return;
|
| | 318 | + displaylcd_sync();
|
| 260 | 319 | mutex_lock(&lcd_mutex, TIMEOUT_BLOCK);
|
| 261 | 320 | displaylcd_dither(x, y, width, height, &color, 0, 0, 0, true);
|
| 262 | 321 | mutex_unlock(&lcd_mutex);
|