| Index: emcore/trunk/target/ipodnano2g/lcd.c |
| — | — | @@ -257,10 +257,9 @@ |
| 258 | 258 | __asm__ volatile(" ldrsb r0, [r7] \n");
|
| 259 | 259 | __asm__ volatile(" add r1, r1, r4 \n");
|
| 260 | 260 | __asm__ volatile(" add r1, r1, r0 \n");
|
| 261 | | - __asm__ volatile(" cmp r1, #0 \n");
|
| 262 | | - __asm__ volatile(" movlt r1, #0 \n");
|
| 263 | | - __asm__ volatile(" cmp r1, #0xff \n");
|
| 264 | | - __asm__ volatile(" movgt r1, #0xff \n");
|
| | 261 | + __asm__ volatile(" cmp r1, #0xff \n"); |
| | 262 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 263 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 265 | 264 | __asm__ volatile(" mov r0, r1,lsr#3 \n");
|
| 266 | 265 | __asm__ volatile(" orr r2, r0,lsl#11 \n");
|
| 267 | 266 | __asm__ volatile(" sub r1, r1, r0,lsl#3 \n");
|
| — | — | @@ -273,10 +272,9 @@ |
| 274 | 273 | __asm__ volatile(" ldrsb r0, [r7] \n");
|
| 275 | 274 | __asm__ volatile(" add r1, r1, r5 \n");
|
| 276 | 275 | __asm__ volatile(" add r1, r1, r0 \n");
|
| 277 | | - __asm__ volatile(" cmp r1, #0 \n");
|
| 278 | | - __asm__ volatile(" movlt r1, #0 \n");
|
| 279 | | - __asm__ volatile(" cmp r1, #0xff \n");
|
| 280 | | - __asm__ volatile(" movgt r1, #0xff \n");
|
| | 276 | + __asm__ volatile(" cmp r1, #0xff \n"); |
| | 277 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 278 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 281 | 279 | __asm__ volatile(" mov r0, r1,lsr#2 \n");
|
| 282 | 280 | __asm__ volatile(" orr r2, r0,lsl#5 \n");
|
| 283 | 281 | __asm__ volatile(" sub r1, r1, r0,lsl#2 \n");
|
| — | — | @@ -289,10 +287,9 @@ |
| 290 | 288 | __asm__ volatile(" ldrsb r0, [r7] \n");
|
| 291 | 289 | __asm__ volatile(" add r1, r1, r6 \n");
|
| 292 | 290 | __asm__ volatile(" add r1, r1, r0 \n");
|
| 293 | | - __asm__ volatile(" cmp r1, #0 \n");
|
| 294 | | - __asm__ volatile(" movlt r1, #0 \n");
|
| 295 | | - __asm__ volatile(" cmp r1, #0xff \n");
|
| 296 | | - __asm__ volatile(" movgt r1, #0xff \n");
|
| | 291 | + __asm__ volatile(" cmp r1, #0xff \n"); |
| | 292 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 293 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 297 | 294 | __asm__ volatile(" mov r0, r1,lsr#3 \n");
|
| 298 | 295 | __asm__ volatile(" orr r2, r0 \n");
|
| 299 | 296 | __asm__ volatile(" sub r1, r1, r0,lsl#3 \n");
|
| Index: emcore/trunk/target/ipodnano3g/lcd.c |
| — | — | @@ -95,15 +95,21 @@ |
| 96 | 96 | } |
| 97 | 97 | switch (lcd_detect()) |
| 98 | 98 | { |
| | 99 | +// case 0: |
| | 100 | +// pmu_write(0x31, 0x0b); // Vlcd @ 2.000V |
| | 101 | +// break; |
| 99 | 102 | case 1: |
| 100 | | - pmu_write(0x31, 0x0e); // Vlcd @ 2.400V
|
| 101 | | - break;
|
| | 103 | + pmu_write(0x31, 0x0e); // Vlcd @ 2.300V |
| | 104 | + break; |
| 102 | 105 | case 2: |
| 103 | | - pmu_write(0x31, 0x12); // Vlcd @ 2.700V
|
| 104 | | - break;
|
| | 106 | + pmu_write(0x31, 0x12); // Vlcd @ 2.700V |
| | 107 | + break; |
| | 108 | +// case 3: |
| | 109 | +// pmu_write(0x31, 0x0b); // Vlcd @ 2.000V |
| | 110 | +// break; |
| 105 | 111 | default: |
| 106 | | - pmu_write(0x31, 0x0b); // Vlcd @ 2.000V
|
| 107 | | - }
|
| | 112 | + pmu_write(0x31, 0x0b); // Vlcd @ 2.000V |
| | 113 | + } |
| 108 | 114 | } |
| 109 | 115 | |
| 110 | 116 | bool displaylcd_busy() ICODE_ATTR; |
| — | — | @@ -184,7 +190,7 @@ |
| 185 | 191 | lli->nextlli = last ? NULL : &lcd_lli[i + 1]; |
| 186 | 192 | lli->control = 0x70240000 | (last ? pixels : 0xfff) |
| 187 | 193 | | (last ? 0x80000000 : 0) | (solid ? 0 : 0x4000000); |
| 188 | | - if (!solid) data = (void*)(((uint32_t)data) + 0x1ffe); |
| | 194 | + if (!solid) data += 0x1ffe; |
| 189 | 195 | } |
| 190 | 196 | clean_dcache(); |
| 191 | 197 | DMAC0C4CONFIG = 0x88c1; |
| — | — | @@ -266,10 +272,9 @@ |
| 267 | 273 | __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 268 | 274 | __asm__ volatile(" add r1, r1, r4 \n"); |
| 269 | 275 | __asm__ volatile(" add r1, r1, r0 \n"); |
| 270 | | - __asm__ volatile(" cmp r1, #0 \n"); |
| 271 | | - __asm__ volatile(" movlt r1, #0 \n"); |
| 272 | 276 | __asm__ volatile(" cmp r1, #0xff \n"); |
| 273 | | - __asm__ volatile(" movgt r1, #0xff \n"); |
| | 277 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 278 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 274 | 279 | __asm__ volatile(" mov r0, r1,lsr#3 \n"); |
| 275 | 280 | __asm__ volatile(" orr r2, r0,lsl#11 \n"); |
| 276 | 281 | __asm__ volatile(" sub r1, r1, r0,lsl#3 \n"); |
| — | — | @@ -282,10 +287,9 @@ |
| 283 | 288 | __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 284 | 289 | __asm__ volatile(" add r1, r1, r5 \n"); |
| 285 | 290 | __asm__ volatile(" add r1, r1, r0 \n"); |
| 286 | | - __asm__ volatile(" cmp r1, #0 \n"); |
| 287 | | - __asm__ volatile(" movlt r1, #0 \n"); |
| 288 | 291 | __asm__ volatile(" cmp r1, #0xff \n"); |
| 289 | | - __asm__ volatile(" movgt r1, #0xff \n"); |
| | 292 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 293 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 290 | 294 | __asm__ volatile(" mov r0, r1,lsr#2 \n"); |
| 291 | 295 | __asm__ volatile(" orr r2, r0,lsl#5 \n"); |
| 292 | 296 | __asm__ volatile(" sub r1, r1, r0,lsl#2 \n"); |
| — | — | @@ -298,10 +302,9 @@ |
| 299 | 303 | __asm__ volatile(" ldrsb r0, [r7] \n"); |
| 300 | 304 | __asm__ volatile(" add r1, r1, r6 \n"); |
| 301 | 305 | __asm__ volatile(" add r1, r1, r0 \n"); |
| 302 | | - __asm__ volatile(" cmp r1, #0 \n"); |
| 303 | | - __asm__ volatile(" movlt r1, #0 \n"); |
| 304 | 306 | __asm__ volatile(" cmp r1, #0xff \n"); |
| 305 | | - __asm__ volatile(" movgt r1, #0xff \n"); |
| | 307 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 308 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 306 | 309 | __asm__ volatile(" mov r0, r1,lsr#3 \n"); |
| 307 | 310 | __asm__ volatile(" orr r2, r0 \n"); |
| 308 | 311 | __asm__ volatile(" sub r1, r1, r0,lsl#3 \n"); |
| Index: emcore/trunk/target/ipodnano4g/lcd.c |
| — | — | @@ -230,10 +230,9 @@ |
| 231 | 231 | __asm__ volatile(" ldrsb r0, [r7] \n");
|
| 232 | 232 | __asm__ volatile(" add r1, r1, r4 \n");
|
| 233 | 233 | __asm__ volatile(" add r1, r1, r0 \n");
|
| 234 | | - __asm__ volatile(" cmp r1, #0 \n");
|
| 235 | | - __asm__ volatile(" movlt r1, #0 \n");
|
| 236 | | - __asm__ volatile(" cmp r1, #0xff \n");
|
| 237 | | - __asm__ volatile(" movgt r1, #0xff \n");
|
| | 234 | + __asm__ volatile(" cmp r1, #0xff \n"); |
| | 235 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 236 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 238 | 237 | __asm__ volatile(" mov r0, r1,lsr#3 \n");
|
| 239 | 238 | __asm__ volatile(" orr r2, r0,lsl#11 \n");
|
| 240 | 239 | __asm__ volatile(" sub r1, r1, r0,lsl#3 \n");
|
| — | — | @@ -246,10 +245,9 @@ |
| 247 | 246 | __asm__ volatile(" ldrsb r0, [r7] \n");
|
| 248 | 247 | __asm__ volatile(" add r1, r1, r5 \n");
|
| 249 | 248 | __asm__ volatile(" add r1, r1, r0 \n");
|
| 250 | | - __asm__ volatile(" cmp r1, #0 \n");
|
| 251 | | - __asm__ volatile(" movlt r1, #0 \n");
|
| 252 | | - __asm__ volatile(" cmp r1, #0xff \n");
|
| 253 | | - __asm__ volatile(" movgt r1, #0xff \n");
|
| | 249 | + __asm__ volatile(" cmp r1, #0xff \n"); |
| | 250 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 251 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 254 | 252 | __asm__ volatile(" mov r0, r1,lsr#2 \n");
|
| 255 | 253 | __asm__ volatile(" orr r2, r0,lsl#5 \n");
|
| 256 | 254 | __asm__ volatile(" sub r1, r1, r0,lsl#2 \n");
|
| — | — | @@ -262,10 +260,9 @@ |
| 263 | 261 | __asm__ volatile(" ldrsb r0, [r7] \n");
|
| 264 | 262 | __asm__ volatile(" add r1, r1, r6 \n");
|
| 265 | 263 | __asm__ volatile(" add r1, r1, r0 \n");
|
| 266 | | - __asm__ volatile(" cmp r1, #0 \n");
|
| 267 | | - __asm__ volatile(" movlt r1, #0 \n");
|
| 268 | | - __asm__ volatile(" cmp r1, #0xff \n");
|
| 269 | | - __asm__ volatile(" movgt r1, #0xff \n");
|
| | 264 | + __asm__ volatile(" cmp r1, #0xff \n"); |
| | 265 | + __asm__ volatile(" mvnhi r1, r1,asr#31 \n"); |
| | 266 | + __asm__ volatile(" andhi r1, r1, #0xff \n"); |
| 270 | 267 | __asm__ volatile(" mov r0, r1,lsr#3 \n");
|
| 271 | 268 | __asm__ volatile(" orr r2, r0 \n");
|
| 272 | 269 | __asm__ volatile(" sub r1, r1, r0,lsl#3 \n");
|