Index: umsboot/target/ipodnano4g/lcd.c |
— | — | @@ -86,6 +86,8 @@ |
87 | 87 | unsigned int starty, unsigned int endy, void* data, int color)
|
88 | 88 | {
|
89 | 89 | displaylcd_sync();
|
| 90 | + while (!(LCDSTATUS & 0x2));
|
| 91 | + LCDCON = 0x41100db8;
|
90 | 92 | lcd_send_cmd(0x2a);
|
91 | 93 | lcd_send_data(startx);
|
92 | 94 | lcd_send_data(endx);
|
Index: umsboot/target/ipodnano4g/s5l8720.h |
— | — | @@ -528,6 +528,7 @@ |
529 | 529 |
|
530 | 530 |
|
531 | 531 | /////LCD/////
|
| 532 | +#define LCDCON (*((uint32_t volatile*)(0x38300000)))
|
532 | 533 | #define LCDWCMD (*((uint32_t volatile*)(0x38300004)))
|
533 | 534 | #define LCDSTATUS (*((uint32_t volatile*)(0x3830001c)))
|
534 | 535 | #define LCDWDATA (*((uint32_t volatile*)(0x38300040)))
|
Index: emcore/trunk/target/ipodnano2g/lcd.c |
— | — | @@ -219,6 +219,7 @@ |
220 | 220 | __asm__ volatile(" str r12, [sp] \n"); |
221 | 221 | __asm__ volatile(" mov r12, r2 \n"); |
222 | 222 | __asm__ volatile(" add r8, r2, r2,lsl#1 \n"); |
| 223 | + __asm__ volatile(" add r8, r8, #3 \n"); |
223 | 224 | __asm__ volatile(" add r3, r1, r3 \n"); |
224 | 225 | __asm__ volatile(" sub r3, r3, #1 \n"); |
225 | 226 | __asm__ volatile(" mov r2, r1 \n"); |
— | — | @@ -258,7 +259,7 @@ |
259 | 260 | __asm__ volatile(" mov r7, r8 \n"); |
260 | 261 | __asm__ volatile("displaylcd_dither_x: \n"); |
261 | 262 | __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
262 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 263 | + __asm__ volatile(" ldrsb r0, [r7,#3] \n"); |
263 | 264 | __asm__ volatile(" add r1, r1, r4 \n"); |
264 | 265 | __asm__ volatile(" add r1, r1, r0 \n"); |
265 | 266 | __asm__ volatile(" cmp r1, #0xff \n"); |
— | — | @@ -273,7 +274,7 @@ |
274 | 275 | __asm__ volatile(" strb r4, [r7], #1 \n"); |
275 | 276 | __asm__ volatile(" mov r4, r1,asr#1 \n"); |
276 | 277 | __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
277 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 278 | + __asm__ volatile(" ldrsb r0, [r7,#3] \n"); |
278 | 279 | __asm__ volatile(" add r1, r1, r5 \n"); |
279 | 280 | __asm__ volatile(" add r1, r1, r0 \n"); |
280 | 281 | __asm__ volatile(" cmp r1, #0xff \n"); |
— | — | @@ -280,7 +281,7 @@ |
281 | 282 | __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
282 | 283 | __asm__ volatile(" andhi r1, r1, #0xff \n"); |
283 | 284 | __asm__ volatile(" mov r0, r1,lsr#2 \n"); |
284 | | - __asm__ volatile(" orr r2, r2, r0,lsl#5 \n"); |
| 285 | + __asm__ volatile(" orr r2, r2, r0,lsl#5 \n"); |
285 | 286 | __asm__ volatile(" sub r1, r1, r0,lsl#2 \n"); |
286 | 287 | __asm__ volatile(" sub r1, r1, r0,lsr#4 \n"); |
287 | 288 | __asm__ volatile(" mov r5, r5,lsr#1 \n"); |
— | — | @@ -288,7 +289,7 @@ |
289 | 290 | __asm__ volatile(" strb r5, [r7], #1 \n"); |
290 | 291 | __asm__ volatile(" mov r5, r1,asr#1 \n"); |
291 | 292 | __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
292 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 293 | + __asm__ volatile(" ldrsb r0, [r7,#3] \n"); |
293 | 294 | __asm__ volatile(" add r1, r1, r6 \n"); |
294 | 295 | __asm__ volatile(" add r1, r1, r0 \n"); |
295 | 296 | __asm__ volatile(" cmp r1, #0xff \n"); |
Index: emcore/trunk/target/ipodnano3g/lcd.c |
— | — | @@ -236,6 +236,7 @@ |
237 | 237 | __asm__ volatile(" str r12, [sp] \n"); |
238 | 238 | __asm__ volatile(" mov r12, r2 \n"); |
239 | 239 | __asm__ volatile(" add r8, r2, r2,lsl#1 \n"); |
| 240 | + __asm__ volatile(" add r8, r8, #3 \n"); |
240 | 241 | __asm__ volatile(" add r3, r1, r3 \n"); |
241 | 242 | __asm__ volatile(" sub r3, r3, #1 \n"); |
242 | 243 | __asm__ volatile(" mov r2, r1 \n"); |
— | — | @@ -281,7 +282,7 @@ |
282 | 283 | __asm__ volatile(" mov r7, r8 \n"); |
283 | 284 | __asm__ volatile("displaylcd_dither_x: \n"); |
284 | 285 | __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
285 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 286 | + __asm__ volatile(" ldrsb r0, [r7,#3] \n"); |
286 | 287 | __asm__ volatile(" add r1, r1, r4 \n"); |
287 | 288 | __asm__ volatile(" add r1, r1, r0 \n"); |
288 | 289 | __asm__ volatile(" cmp r1, #0xff \n"); |
— | — | @@ -296,7 +297,7 @@ |
297 | 298 | __asm__ volatile(" strb r4, [r7], #1 \n"); |
298 | 299 | __asm__ volatile(" mov r4, r1,asr#1 \n"); |
299 | 300 | __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
300 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 301 | + __asm__ volatile(" ldrsb r0, [r7,#3] \n"); |
301 | 302 | __asm__ volatile(" add r1, r1, r5 \n"); |
302 | 303 | __asm__ volatile(" add r1, r1, r0 \n"); |
303 | 304 | __asm__ volatile(" cmp r1, #0xff \n"); |
— | — | @@ -311,7 +312,7 @@ |
312 | 313 | __asm__ volatile(" strb r5, [r7], #1 \n"); |
313 | 314 | __asm__ volatile(" mov r5, r1,asr#1 \n"); |
314 | 315 | __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
315 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 316 | + __asm__ volatile(" ldrsb r0, [r7,#3] \n"); |
316 | 317 | __asm__ volatile(" add r1, r1, r6 \n"); |
317 | 318 | __asm__ volatile(" add r1, r1, r0 \n"); |
318 | 319 | __asm__ volatile(" cmp r1, #0xff \n"); |
Index: emcore/trunk/target/ipodnano4g/lcd.c |
— | — | @@ -92,7 +92,7 @@ |
93 | 93 | mutex_lock(&lcd_mutex, TIMEOUT_BLOCK); |
94 | 94 | displaylcd_sync(); |
95 | 95 | while (!(LCDSTATUS & 0x2)); |
96 | | - LCDCON = 0x41100db8; |
| 96 | + LCDCON = 0x81100db8; |
97 | 97 | } |
98 | 98 | |
99 | 99 | bool displaylcd_busy() ICODE_ATTR; |
— | — | @@ -121,7 +121,7 @@ |
122 | 122 | } |
123 | 123 | else while (DMAC0C4CONFIG & 1); |
124 | 124 | while (!(LCDSTATUS & 0x2)); |
125 | | - LCDCON = 0x41100db8; |
| 125 | + LCDCON = 0x81100db8; |
126 | 126 | lcd_send_cmd(0x2a); |
127 | 127 | lcd_send_data(startx); |
128 | 128 | lcd_send_data(endx); |
— | — | @@ -190,7 +190,6 @@ |
191 | 191 | unsigned int height, void* data, unsigned int datax, |
192 | 192 | unsigned int datay, unsigned int stride, bool solid) |
193 | 193 | { |
194 | | -//TODO: This is ARMv5E optimized assembly, should be converted to ARMv6 |
195 | 194 | __asm__ volatile(" muls r12, r2, r3 \n"); |
196 | 195 | __asm__ volatile(" bxeq lr \n"); |
197 | 196 | __asm__ volatile(" stmfd sp!, {r1-r11,lr} \n"); |
— | — | @@ -197,8 +196,9 @@ |
198 | 197 | __asm__ volatile(" mov r12, #0 \n"); |
199 | 198 | __asm__ volatile(" str r12, [sp] \n"); |
200 | 199 | __asm__ volatile(" mov r12, r2 \n"); |
201 | | - __asm__ volatile(" add r8, r2, r2,lsl#1 \n"); |
| 200 | + __asm__ volatile(" mov r8, r2,lsl#2 \n"); |
202 | 201 | __asm__ volatile(" add r3, r1, r3 \n"); |
| 202 | + __asm__ volatile(" add r8, r8, #4 \n"); |
203 | 203 | __asm__ volatile(" sub r3, r3, #1 \n"); |
204 | 204 | __asm__ volatile(" mov r2, r1 \n"); |
205 | 205 | __asm__ volatile(" add r1, r0, r12 \n"); |
— | — | @@ -226,74 +226,43 @@ |
227 | 227 | __asm__ volatile(" add r3, r3, r0 \n"); |
228 | 228 | __asm__ volatile(" subeq r11, r11, r1 \n"); |
229 | 229 | __asm__ volatile(" add r11, r11, r11,lsl#1 \n"); |
230 | | - __asm__ volatile(" movne r10, #3 \n"); |
231 | | - __asm__ volatile(" moveq r10, #0 \n"); |
| 230 | + __asm__ volatile(" movne r10, #0 \n"); |
| 231 | + __asm__ volatile(" moveq r10, #3 \n"); |
232 | 232 | __asm__ volatile(" ldr r9, =0x38300040 \n"); |
233 | 233 | __asm__ volatile("displaylcd_dither_wait : \n"); |
234 | 234 | __asm__ volatile(" ldr r4, [r9,#-0x24] \n"); |
235 | 235 | __asm__ volatile(" tst r4, #2 \n"); |
236 | 236 | __asm__ volatile(" beq displaylcd_dither_wait \n"); |
237 | | - __asm__ volatile(" ldr r4, =0x41104eb8 \n"); |
| 237 | + __asm__ volatile(" ldr r4, =0x81104eb8 \n"); |
238 | 238 | __asm__ volatile(" str r4, [r9,#-0x40] \n"); |
| 239 | + __asm__ volatile(" ldr r6, =0x30303 \n"); |
| 240 | + __asm__ volatile(" ldr r4, =0x808080 \n"); |
239 | 241 | __asm__ volatile("displaylcd_dither_y: \n"); |
240 | 242 | __asm__ volatile(" ldr lr, [sp] \n"); |
241 | | - __asm__ volatile(" mov r4, #0 \n"); |
242 | 243 | __asm__ volatile(" mov r5, #0 \n"); |
243 | | - __asm__ volatile(" mov r6, #0 \n"); |
244 | 244 | __asm__ volatile(" mov r7, r8 \n"); |
245 | | - __asm__ volatile("displaylcd_dither_x: \n"); |
246 | | - __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
247 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
248 | | - __asm__ volatile(" add r1, r1, r4 \n"); |
249 | | - __asm__ volatile(" add r1, r1, r0 \n"); |
250 | | - __asm__ volatile(" cmp r1, #0xff \n"); |
251 | | - __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
252 | | - __asm__ volatile(" andhi r1, r1, #0xff \n"); |
253 | | - __asm__ volatile(" mov r0, r1,lsr#2 \n"); |
254 | | - __asm__ volatile(" mov r2, r0,lsl#18 \n"); |
255 | | - __asm__ volatile(" sub r1, r1, r0,lsl#2 \n"); |
256 | | - __asm__ volatile(" sub r1, r1, r0,lsr#4 \n"); |
257 | | - __asm__ volatile(" mov r4, r4,lsr#1 \n"); |
258 | | - __asm__ volatile(" add r4, r4, r1,lsr#2 \n"); |
259 | | - __asm__ volatile(" strb r4, [r7], #1 \n"); |
260 | | - __asm__ volatile(" mov r4, r1,asr#1 \n"); |
261 | | - __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
262 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
263 | | - __asm__ volatile(" add r1, r1, r5 \n"); |
264 | | - __asm__ volatile(" add r1, r1, r0 \n"); |
265 | | - __asm__ volatile(" cmp r1, #0xff \n"); |
266 | | - __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
267 | | - __asm__ volatile(" andhi r1, r1, #0xff \n"); |
268 | | - __asm__ volatile(" mov r0, r1,lsr#2 \n"); |
269 | | - __asm__ volatile(" orr r2, r2, r0,lsl#10 \n"); |
270 | | - __asm__ volatile(" sub r1, r1, r0,lsl#2 \n"); |
271 | | - __asm__ volatile(" sub r1, r1, r0,lsr#4 \n"); |
272 | | - __asm__ volatile(" mov r5, r5,lsr#1 \n"); |
273 | | - __asm__ volatile(" add r5, r5, r1,lsr#2 \n"); |
274 | | - __asm__ volatile(" strb r5, [r7], #1 \n"); |
275 | | - __asm__ volatile(" mov r5, r1,asr#1 \n"); |
276 | | - __asm__ volatile(" ldrb r1, [r3], #1 \n"); |
277 | | - __asm__ volatile(" ldrsb r0, [r7] \n"); |
278 | | - __asm__ volatile(" add r1, r1, r6 \n"); |
279 | | - __asm__ volatile(" add r1, r1, r0 \n"); |
280 | | - __asm__ volatile(" cmp r1, #0xff \n"); |
281 | | - __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
282 | | - __asm__ volatile(" andhi r1, r1, #0xff \n"); |
283 | | - __asm__ volatile(" mov r0, r1,lsr#2 \n"); |
284 | | - __asm__ volatile(" orr r2, r2, r0,lsl#2 \n"); |
285 | | - __asm__ volatile(" sub r1, r1, r0,lsl#2 \n"); |
286 | | - __asm__ volatile(" sub r1, r1, r0,lsr#4 \n"); |
287 | | - __asm__ volatile(" mov r6, r6,lsr#1 \n"); |
288 | | - __asm__ volatile(" add r6, r6, r1,lsr#2 \n"); |
289 | | - __asm__ volatile(" strb r6, [r7], #1 \n"); |
290 | | - __asm__ volatile("displaylcd_dither_wait2: \n"); |
291 | | - __asm__ volatile(" ldr r0, [r9,#-0x24] \n"); |
292 | | - __asm__ volatile(" mov r6, r1,asr#1 \n"); |
293 | | - __asm__ volatile(" tst r0, #0x10 \n"); |
294 | | - __asm__ volatile(" bne displaylcd_dither_wait2 \n"); |
295 | | - __asm__ volatile(" str r2, [r9] \n"); |
296 | | - __asm__ volatile(" sub r3, r3, r10 \n"); |
297 | | - __asm__ volatile(" subs lr, lr, #1 \n"); |
| 245 | + __asm__ volatile("displaylcd_dither_x: \n"); // the lcd can accept one pixel every 25 clocks |
| 246 | + __asm__ volatile(" ldr r0, [r3] \n"); // 1 cycle, 2 mem, r0 latency 4, r3 early
|
| 247 | + __asm__ volatile(" add r3, r3, r10 \n"); // 1 cycle |
| 248 | + __asm__ volatile(" ldr r1, [r7,#4] \n"); // 1 cycle, 1 mem, r1 latency 3, r7 early
|
| 249 | + __asm__ volatile(" subs lr, lr, #1 \n"); // 1 cycle |
| 250 | + __asm__ volatile(" ssub8 r0, r0, r4 \n"); // 1 cycle
|
| 251 | + __asm__ volatile(" sadd8 r1, r1, r5 \n"); // 1 cycle
|
| 252 | + __asm__ volatile(" qadd8 r0, r0, r1 \n"); // 1 cycle, r0 latency 2
|
| 253 | + // bubble (due to r0 latency)
|
| 254 | + __asm__ volatile(" sadd8 r0, r0, r4 \n"); // 1 cycle
|
| 255 | + __asm__ volatile(" str r0, [r9] \n"); // 1 cycle, 1 mem, r9 early
|
| 256 | + __asm__ volatile(" bic r2, r0, r6 \n"); // 1 cycle
|
| 257 | + __asm__ volatile(" and r1, r6, r0,lsr#6 \n"); // 1 cycle, r0 early
|
| 258 | + __asm__ volatile(" orr r2, r2, r1 \n"); // 1 cycle
|
| 259 | + __asm__ volatile(" mov r1, r5 \n"); // 1 cycle
|
| 260 | + __asm__ volatile(" shsub8 r5, r0, r2 \n"); // 1 cycle
|
| 261 | + __asm__ volatile(" shadd8 r1, r1, r5 \n"); // 1 cycle
|
| 262 | + __asm__ volatile(" str r1, [r7], #4 \n"); // 1 cycle, 1 mem, r7 early |
| 263 | + __asm__ volatile(" nop \n"); // 2 cycles
|
| 264 | + __asm__ volatile(" nop \n"); // 2 cycles
|
| 265 | + __asm__ volatile(" nop \n"); // 2 cycles
|
| 266 | + __asm__ volatile(" nop \n"); // 2 cycles
|
298 | 267 | __asm__ volatile(" bne displaylcd_dither_x \n"); |
299 | 268 | __asm__ volatile(" add r3, r3, r11 \n"); |
300 | 269 | __asm__ volatile(" subs r12, r12, #1 \n"); |
Index: emcore/trunk/target/ipodnano4g/crt0.S |
— | — | @@ -85,6 +85,7 @@ |
86 | 86 | bne .mmuloop5
|
87 | 87 | mrc p15, 0, r0,c1,c0
|
88 | 88 | orr r0, r0, #5
|
| 89 | + orr r0, r0, #0x400000
|
89 | 90 | mcr p15, 0, r0,c1,c0
|
90 | 91 | ldr r0, =_sramsource
|
91 | 92 | ldr r1, =_sramstart
|
Index: emcore/trunk/target/ipodnano4g/s5l8720.h |
— | — | @@ -530,6 +530,7 @@ |
531 | 531 | /////LCD/////
|
532 | 532 | #define LCDCON (*((uint32_t volatile*)(0x38300000)))
|
533 | 533 | #define LCDWCMD (*((uint32_t volatile*)(0x38300004)))
|
| 534 | +#define LCDPHTIME (*((uint32_t volatile*)(0x38300010)))
|
534 | 535 | #define LCDSTATUS (*((uint32_t volatile*)(0x3830001c)))
|
535 | 536 | #define LCDWDATA (*((uint32_t volatile*)(0x38300040)))
|
536 | 537 |
|