root/arch/mips/kernel/r4xx0.S

/* [previous][next][first][last][top][bottom][index][help] */
   1 /*
   2  * arch/mips/kernel/r4xx0.S
   3  *
   4  * Copyright (C) 1994, 1995 Waldorf Electronics
   5  * Written by Ralf Baechle and Andreas Busse
   6  *
   7  * This file contains most of the R4xx0 specific routines.  Due to the
   8  * similarities this should hopefully also be fine for the R10000.  For
   9  * now we especially support the R10000 by not invalidating entries out of
  10  * the TLB before calling the C handlers.
  11  *
  12  * This code is evil magic. Read appendix f (coprozessor 0 hazards) of
  13  * all R4xx0 manuals and think about that MIPS means "Microprocessor without
  14  * Interlocked Pipeline Stages" before you even think about changing this code!
  15  */
  16 #include <linux/config.h>
  17 
  18 #include <asm/asm.h>
  19 #include <asm/bootinfo.h>
  20 #include <asm/cachectl.h>
  21 #include <asm/mipsconfig.h>
  22 #include <asm/mipsregs.h>
  23 #include <asm/page.h>
  24 #include <asm/pgtable.h>
  25 #include <asm/processor.h>
  26 #include <asm/mipsregs.h>
  27 #include <asm/segment.h>
  28 #include <asm/stackframe.h>
  29 
  30 #ifdef __SMP__
  31 #error "Fix this for SMP"
  32 #else
  33 #define current current_set
  34 #endif
  35 
  36 MODE_ALIAS      =       0x0016                  # uncachable
  37 
  38                 .text
  39                 .set    mips3
  40                 .set    noreorder
  41 
  42                 .align  5
  43                 NESTED(handle_tlbl, FR_SIZE, sp)
  44                 .set    noat
  45                 /*
  46                  * Check whether this is a refill or an invalid exception
  47                  *
  48                  * NOTE: Some MIPS manuals say that the R4x00 sets the
  49                  * BadVAddr only when EXL == 0. This is wrong - BadVAddr
  50                  * is being set for all Reload, Invalid and Modified
  51                  * exceptions.
  52                  */
  53                 mfc0    k0,CP0_BADVADDR
  54                 mfc0    k1,CP0_ENTRYHI
  55                 ori     k0,0x1fff
  56                 xori    k0,0x1fff
  57                 andi    k1,0xff
  58                 or      k0,k1
  59                 mfc0    k1,CP0_ENTRYHI
  60                 mtc0    k0,CP0_ENTRYHI
  61                 nop                                     # for R4[04]00 pipeline
  62                 nop
  63                 nop
  64                 tlbp
  65                 nop                                     # for R4[04]00 pipeline
  66                 nop
  67                 mfc0    k0,CP0_INDEX
  68                 bgez    k0,invalid_tlbl                 # bad addr in c0_badvaddr
  69                 mtc0    k1,CP0_ENTRYHI                  # delay slot
  70                 /*
  71                  * Damn... The next nop is required on my R4400PC V5.0, but
  72                  * I don't know why - at least there is no documented
  73                  * reason as for the others :-(
  74                  */
  75                 nop
  76 
  77 #ifdef CONF_DEBUG_TLB
  78                 /*
  79                  * OK, this is a double fault. Let's see whether this is
  80                  * due to an invalid entry in the page_table.
  81                  */
  82                 dmfc0   k0,CP0_BADVADDR
  83                 srl     k0,12
  84                 sll     k0,2
  85                 lui     k1,%HI(TLBMAP)
  86                 addu    k0,k1
  87                 lw      k1,(k0)
  88                 andi    k1,(_PAGE_PRESENT|_PAGE_ACCESSED)
  89                 bnez    k1,reload_pgd_entries
  90                 nop                                     # delay slot
  91 
  92                 .set    noat
  93                 SAVE_ALL
  94                 .set    at
  95                 PRINT("Double fault caused by invalid entries in pgd:\n")
  96                 dmfc0   a1,CP0_BADVADDR
  97                 PRINT("Double fault address     : %08lx\n")
  98                 dmfc0   a1,CP0_EPC
  99                 PRINT("c0_epc                   : %08lx\n")
 100                 jal     show_regs
 101                 move    a0,sp
 102                 jal     dump_tlb_all
 103                 nop
 104                 dmfc0   a0,CP0_BADVADDR
 105                 jal     dump_list_current
 106                 nop
 107                 .set    noat
 108                 STI
 109                 .set    at
 110                 PANIC("Corrupted pagedir")
 111                 .set    noat
 112 
 113 reload_pgd_entries:
 114 #endif /* CONF_DEBUG_TLB */
 115 
 116                 /*
 117                  * Load missing pair of entries from the pgd and return.
 118                  */
 119                 dmfc0   k1,CP0_CONTEXT
 120                 dsra    k1,1
 121                 lwu     k0,(k1)                 # Never causes nested exception
 122                 lwu     k1,4(k1)
 123                 dsrl    k0,6                    # Convert to EntryLo format
 124                 dsrl    k1,6                    # Convert to EntryLo format
 125                 dmtc0   k0,CP0_ENTRYLO0
 126                 dmtc0   k1,CP0_ENTRYLO1
 127                 nop                             # for R4[04]00 pipeline
 128                 tlbwr
 129                 nop                             # for R4[04]00 pipeline
 130                 nop
 131                 nop
 132                 /*
 133                  * We don't know whether the original access was read or
 134                  * write, so return and see what happens...
 135                  */
 136                 eret
 137 
 138                 /*
 139                  * Handle invalid exception
 140                  *
 141                  * There are two possible causes for an invalid (tlbl)
 142                  * exception:
 143                  * 1) pages with present bit set but the valid bit clear
 144                  * 2) nonexistant pages
 145                  * Case one needs fast handling, therefore don't save
 146                  * registers yet.
 147                  *
 148                  * k0 contains c0_index.
 149                  */
 150 invalid_tlbl:
 151 #ifdef CONFIG_TLB_SHUTDOWN
 152                 /*
 153                  * Remove entry so we don't need to care later
 154                  * For sake of the R4000 V2.2 pipeline the tlbwi insn
 155                  * has been moved down. Moving it around is juggling with
 156                  * explosives...
 157                  */
 158                 lui     k1,0x0008
 159                 or      k0,k1
 160                 dsll    k0,13
 161                 dmtc0   k0,CP0_ENTRYHI
 162                 dmtc0   zero,CP0_ENTRYLO0
 163                 dmtc0   zero,CP0_ENTRYLO1
 164 #endif
 165                 /*
 166                  * Test present bit in entry
 167                  */
 168                 dmfc0   k0,CP0_BADVADDR
 169                 srl     k0,12
 170                 sll     k0,2
 171 #ifdef CONFIG_TLB_SHUTDOWN
 172                 tlbwi                                           # do not move!
 173 #endif
 174                 lui     k1,%HI(TLBMAP)
 175                 addu    k0,k1
 176                 lw      k1,(k0)
 177                 andi    k1,(_PAGE_PRESENT|_PAGE_READ)
 178                 xori    k1,(_PAGE_PRESENT|_PAGE_READ)
 179                 bnez    k1,nopage_tlbl
 180                 /*
 181                  * Present and read bits are set -> set valid and accessed bits
 182                  */
 183                 lw      k1,(k0)                                 # delay slot
 184                 ori     k1,(_PAGE_VALID|_PAGE_ACCESSED)
 185                 sw      k1,(k0)
 186                 eret
 187 
 188                 /*
 189                  * Page doesn't exist. Lots of work which is less important
 190                  * for speed needs to be done, so hand it all over to the
 191                  * kernel memory management routines.
 192                  */
 193 nopage_tlbl:    SAVE_ALL
 194                 dmfc0   a2,CP0_BADVADDR
 195                 STI
 196                 .set    at
 197                 /*
 198                  * a0 (struct pt_regs *) regs
 199                  * a1 (unsigned long)    0 for read access
 200                  * a2 (unsigned long)    faulting virtual address
 201                  */
 202                 move    a0,sp
 203                 jal     do_page_fault
 204                 li      a1,0                            # delay slot
 205                 j       ret_from_sys_call
 206                 nop                                     # delay slot
 207                 END(handle_tlbl)
 208 
 209                 .text
 210                 .align  5
 211                 NESTED(handle_tlbs, FR_SIZE, sp)
 212                 .set    noat
 213                 /*
 214                  * It is impossible that is a nested reload exception.
 215                  * Therefore this must be a invalid exception.
 216                  * Two possible cases:
 217                  * 1) Page exists but not dirty.
 218                  * 2) Page doesn't exist yet. Hand over to the kernel.
 219                  *
 220                  * Test whether present bit in entry is set
 221                  */
 222                 dmfc0   k0,CP0_BADVADDR
 223                 srl     k0,12
 224                 sll     k0,2
 225                 lui     k1,%HI(TLBMAP)
 226                 addu    k0,k1
 227                 lw      k1,(k0)
 228                 tlbp                                    # find faulting entry
 229                 andi    k1,(_PAGE_PRESENT|_PAGE_WRITE)
 230                 xori    k1,(_PAGE_PRESENT|_PAGE_WRITE)
 231                 bnez    k1,nopage_tlbs
 232                 /*
 233                  * Present and writable bits set: set accessed and dirty bits.
 234                  */
 235                 lw      k1,(k0)                         # delay slot
 236                 ori     k1,k1,(_PAGE_ACCESSED|_PAGE_MODIFIED| \
 237                                _PAGE_VALID|_PAGE_DIRTY)
 238                 sw      k1,(k0)
 239                 /*
 240                  * Now reload the entry into the TLB
 241                  */
 242                 ori     k0,0x0004
 243                 xori    k0,0x0004
 244                 lw      k1,4(k0)
 245                 lw      k0,(k0)
 246                 srl     k1,6
 247                 srl     k0,6
 248                 dmtc0   k1,CP0_ENTRYLO1
 249                 dmtc0   k0,CP0_ENTRYLO0
 250                 nop                             # for R4[04]00 pipeline
 251                 tlbwi
 252                 nop                             # for R4[04]00 pipeline
 253                 nop
 254                 nop
 255                 eret
 256 
 257                 /*
 258                  * Page doesn't exist. Lots of work which is less important
 259                  * for speed needs to be done, so hand it all over to the
 260                  * kernel memory management routines.
 261                  */
 262 nopage_tlbs:
 263 nowrite_mod:
 264 #ifdef CONFIG_TLB_SHUTDOWN
 265                 /*
 266                  * Remove entry so we don't need to care later
 267                  */
 268                 mfc0    k0,CP0_INDEX
 269 #ifdef CONF_DEBUG_TLB
 270                 bgez    k0,2f
 271                 nop
 272                 /*
 273                  * We got a tlbs exception but found no matching entry in
 274                  * the tlb.  This should never happen.  Paranoia makes us
 275                  * check it, though.
 276                  */
 277                 SAVE_ALL
 278                 jal     show_regs
 279                 move    a0,sp
 280                 .set    at
 281                 mfc0    a1,CP0_BADVADDR
 282                 PRINT("c0_badvaddr == %08lx\n")
 283                 mfc0    a1,CP0_INDEX
 284                 PRINT("c0_index    == %08x\n")
 285                 mfc0    a1,CP0_ENTRYHI
 286                 PRINT("c0_entryhi  == %08x\n")
 287                 .set    noat
 288                 STI
 289                 .set    at
 290                 PANIC("Tlbs or tlbm exception with no matching entry in tlb")
 291 1:              j       1b
 292                 nop
 293 2:
 294 #endif /* CONF_DEBUG_TLB */
 295                 lui     k1,0x0008
 296                 or      k0,k1
 297                 dsll    k0,13
 298                 dmtc0   k0,CP0_ENTRYHI
 299                 dmtc0   zero,CP0_ENTRYLO0
 300                 dmtc0   zero,CP0_ENTRYLO1
 301                 nop                             # for R4[04]00 pipeline
 302                 nop                             # R4000 V2.2 requires 4 NOPs
 303                 nop
 304                 nop
 305                 tlbwi
 306 #endif
 307                 .set    noat
 308                 SAVE_ALL
 309                 dmfc0   a2,CP0_BADVADDR
 310                 STI
 311                 .set    at
 312                 /*
 313                  * a0 (struct pt_regs *) regs
 314                  * a1 (unsigned long)    1 for write access
 315                  * a2 (unsigned long)    faulting virtual address
 316                  */
 317                 move    a0,sp
 318                 jal     do_page_fault
 319                 li      a1,1                            # delay slot
 320                 j       ret_from_sys_call
 321                 nop                                     # delay slot
 322                 END(handle_tlbs)
 323 
 324                 .align  5
 325                 NESTED(handle_mod, FR_SIZE, sp)
 326                 .set    noat
 327                 /*
 328                  * Two possible cases:
 329                  * 1) Page is writable but not dirty -> set dirty and return
 330                  * 2) Page is not writable -> call C handler
 331                  */
 332                 dmfc0   k0,CP0_BADVADDR
 333                 srl     k0,12
 334                 sll     k0,2
 335                 lui     k1,%HI(TLBMAP)
 336                 addu    k0,k1
 337                 lw      k1,(k0)
 338                 tlbp                                    # find faulting entry
 339                 andi    k1,_PAGE_WRITE
 340                 beqz    k1,nowrite_mod
 341                 /*
 342                  * Present and writable bits set: set accessed and dirty bits.
 343                  */
 344                 lw      k1,(k0)                         # delay slot
 345                 ori     k1,(_PAGE_ACCESSED|_PAGE_DIRTY)
 346                 sw      k1,(k0)
 347                 /*
 348                  * Now reload the entry into the tlb
 349                  */
 350                 ori     k0,0x0004
 351                 xori    k0,0x0004
 352                 lw      k1,4(k0)
 353                 lw      k0,(k0)
 354                 srl     k1,6
 355                 srl     k0,6
 356                 dmtc0   k1,CP0_ENTRYLO1
 357                 dmtc0   k0,CP0_ENTRYLO0
 358                 nop                             # for R4[04]00 pipeline
 359                 nop
 360                 nop
 361                 tlbwi
 362                 nop                             # for R4[04]00 pipeline
 363                 nop
 364                 nop
 365                 eret
 366                 END(handle_mod)
 367                 .set    at
 368 
 369 /*
 370  * Until SAVE_ALL/RESTORE_ALL handle registers 64-bit wide we have to
 371  * disable interrupts here.
 372  */
 373                 .set    noreorder
 374                 LEAF(tlbflush)
 375                 mfc0    t3,CP0_STATUS
 376                 ori     t4,t3,1
 377                 xori    t4,1
 378                 mtc0    t4,CP0_STATUS
 379                 li      t0,PM_4K
 380                 mtc0    t0,CP0_PAGEMASK
 381                 la      t0,boot_info
 382                 lw      t0,OFFSET_BOOTINFO_TLB_ENTRIES(t0)
 383                 dmtc0   zero,CP0_ENTRYLO0
 384                 dmtc0   zero,CP0_ENTRYLO1
 385                 mfc0    t2,CP0_WIRED
 386 1:              subu    t0,1
 387                 mtc0    t0,CP0_INDEX
 388                 lui     t1,0x0008
 389                 or      t1,t0,t1
 390                 dsll    t1,13
 391                 dmtc0   t1,CP0_ENTRYHI
 392                 bne     t2,t0,1b
 393                 tlbwi                                   # delay slot
 394                 jr      ra
 395                 mtc0    t3,CP0_STATUS                   # delay slot
 396                 END(tlbflush)
 397 
 398                 /*
 399                  * Code necessary to switch tasks on an Linux/MIPS machine.
 400                  */
 401                 .align  5
 402                 LEAF(resume)
 403                 /*
 404                  * Current task's task_struct
 405                  */
 406                 lui     t5,%hi(current)
 407                 lw      t0,%lo(current)(t5)
 408 
 409                 /*
 410                  * Save status register
 411                  */
 412                 mfc0    t1,CP0_STATUS
 413                 addu    t0,a1                           # Add tss offset
 414                 sw      t1,TOFF_CP0_STATUS(t0)
 415 
 416                 /*
 417                  * Disable interrupts
 418                  */
 419                 ori     t2,t1,0x1f
 420                 xori    t2,0x1e
 421                 mtc0    t2,CP0_STATUS
 422 
 423                 /*
 424                  * Save non-scratch registers
 425                  * All other registers have been saved on the kernel stack
 426                  */
 427                 sw      s0,TOFF_REG16(t0)
 428                 sw      s1,TOFF_REG17(t0)
 429                 sw      s2,TOFF_REG18(t0)
 430                 sw      s3,TOFF_REG19(t0)
 431                 sw      s4,TOFF_REG20(t0)
 432                 sw      s5,TOFF_REG21(t0)
 433                 sw      s6,TOFF_REG22(t0)
 434                 sw      s7,TOFF_REG23(t0)
 435                 sw      gp,TOFF_REG28(t0)
 436                 sw      sp,TOFF_REG29(t0)
 437                 sw      fp,TOFF_REG30(t0)
 438 
 439                 /*
 440                  * Save floating point state
 441                  */
 442                 sll     t2,t1,2
 443                 bgez    t2,2f
 444                 sw      ra,TOFF_REG31(t0)               # delay slot
 445                 sll     t2,t1,5
 446                 bgez    t2,1f
 447                 sdc1    $f0,(TOFF_FPU+0)(t0)            # delay slot
 448                 /*
 449                  * Store the 16 odd double precision registers
 450                  */
 451                 sdc1    $f1,(TOFF_FPU+8)(t0)
 452                 sdc1    $f3,(TOFF_FPU+24)(t0)
 453                 sdc1    $f5,(TOFF_FPU+40)(t0)
 454                 sdc1    $f7,(TOFF_FPU+56)(t0)
 455                 sdc1    $f9,(TOFF_FPU+72)(t0)
 456                 sdc1    $f11,(TOFF_FPU+88)(t0)
 457                 sdc1    $f13,(TOFF_FPU+104)(t0)
 458                 sdc1    $f15,(TOFF_FPU+120)(t0)
 459                 sdc1    $f17,(TOFF_FPU+136)(t0)
 460                 sdc1    $f19,(TOFF_FPU+152)(t0)
 461                 sdc1    $f21,(TOFF_FPU+168)(t0)
 462                 sdc1    $f23,(TOFF_FPU+184)(t0)
 463                 sdc1    $f25,(TOFF_FPU+200)(t0)
 464                 sdc1    $f27,(TOFF_FPU+216)(t0)
 465                 sdc1    $f29,(TOFF_FPU+232)(t0)
 466                 sdc1    $f31,(TOFF_FPU+248)(t0)
 467 
 468                 /*
 469                  * Store the 16 even double precision registers
 470                  */
 471 1:              cfc1    t1,fcr31
 472                 sdc1    $f2,(TOFF_FPU+16)(t0)
 473                 sdc1    $f4,(TOFF_FPU+32)(t0)
 474                 sdc1    $f6,(TOFF_FPU+48)(t0)
 475                 sdc1    $f8,(TOFF_FPU+64)(t0)
 476                 sdc1    $f10,(TOFF_FPU+80)(t0)
 477                 sdc1    $f12,(TOFF_FPU+96)(t0)
 478                 sdc1    $f14,(TOFF_FPU+112)(t0)
 479                 sdc1    $f16,(TOFF_FPU+128)(t0)
 480                 sdc1    $f18,(TOFF_FPU+144)(t0)
 481                 sdc1    $f20,(TOFF_FPU+160)(t0)
 482                 sdc1    $f22,(TOFF_FPU+176)(t0)
 483                 sdc1    $f24,(TOFF_FPU+192)(t0)
 484                 sdc1    $f26,(TOFF_FPU+208)(t0)
 485                 sdc1    $f28,(TOFF_FPU+224)(t0)
 486                 sdc1    $f30,(TOFF_FPU+240)(t0)
 487                 sw      t1,(TOFF_FPU+256)(t0)
 488 
 489                 /*
 490                  * Switch current task
 491                  */
 492 2:              sw      a0,%lo(current)(t5)
 493                 addu    a0,a1                   # Add tss offset
 494 
 495                 /*
 496                  * Switch address space
 497                  */
 498 
 499                 /*
 500                  * (Choose new ASID for process)
 501                  * This isn't really required, but would speed up
 502                  * context switching.
 503                  */
 504 
 505                 /*
 506                  * Switch the root pointer
 507                  */
 508                 lw      t0,TOFF_PG_DIR(a0)
 509                 li      t1,TLB_ROOT
 510                 mtc0    t1,CP0_ENTRYHI
 511                 mtc0    zero,CP0_INDEX
 512                 srl     t0,6
 513                 ori     t0,MODE_ALIAS
 514                 mtc0    t0,CP0_ENTRYLO0
 515                 mtc0    zero,CP0_ENTRYLO1
 516                 lw      a2,TOFF_CP0_STATUS(a0)
 517 
 518                 /*
 519                  * Flush tlb
 520                  * (probably not needed, doesn't clobber a0-a3)
 521                  */
 522                 jal     tlbflush
 523                 tlbwi                                   # delay slot
 524 
 525                 /*
 526                  * Restore fpu state:
 527                  *  - cp0 status register bits
 528                  *  - fp gp registers
 529                  *  - cp1 status/control register
 530                  */
 531                 ori     t1,a2,1                         # pipeline magic
 532                 xori    t1,1
 533                 mtc0    t1,CP0_STATUS
 534                 sll     t0,a2,2
 535                 bgez    t0,2f
 536                 sll     t0,a2,5                         # delay slot
 537                 bgez    t0,1f
 538                 ldc1    $f0,(TOFF_FPU+0)(a0)            # delay slot
 539                 /*
 540                  * Restore the 16 odd double precision registers only
 541                  * when enabled in the cp0 status register.
 542                  */
 543                 ldc1    $f1,(TOFF_FPU+8)(a0)
 544                 ldc1    $f3,(TOFF_FPU+24)(a0)
 545                 ldc1    $f5,(TOFF_FPU+40)(a0)
 546                 ldc1    $f7,(TOFF_FPU+56)(a0)
 547                 ldc1    $f9,(TOFF_FPU+72)(a0)
 548                 ldc1    $f11,(TOFF_FPU+88)(a0)
 549                 ldc1    $f13,(TOFF_FPU+104)(a0)
 550                 ldc1    $f15,(TOFF_FPU+120)(a0)
 551                 ldc1    $f17,(TOFF_FPU+136)(a0)
 552                 ldc1    $f19,(TOFF_FPU+152)(a0)
 553                 ldc1    $f21,(TOFF_FPU+168)(a0)
 554                 ldc1    $f23,(TOFF_FPU+184)(a0)
 555                 ldc1    $f25,(TOFF_FPU+200)(a0)
 556                 ldc1    $f27,(TOFF_FPU+216)(a0)
 557                 ldc1    $f29,(TOFF_FPU+232)(a0)
 558                 ldc1    $f31,(TOFF_FPU+248)(a0)
 559 
 560                 /*
 561                  * Restore the 16 even double precision registers
 562                  * when cp1 was enabled in the cp0 status register.
 563                  */
 564 1:              lw      t0,(TOFF_FPU+256)(a0)
 565                 ldc1    $f2,(TOFF_FPU+16)(a0)
 566                 ldc1    $f4,(TOFF_FPU+32)(a0)
 567                 ldc1    $f6,(TOFF_FPU+48)(a0)
 568                 ldc1    $f8,(TOFF_FPU+64)(a0)
 569                 ldc1    $f10,(TOFF_FPU+80)(a0)
 570                 ldc1    $f12,(TOFF_FPU+96)(a0)
 571                 ldc1    $f14,(TOFF_FPU+112)(a0)
 572                 ldc1    $f16,(TOFF_FPU+128)(a0)
 573                 ldc1    $f18,(TOFF_FPU+144)(a0)
 574                 ldc1    $f20,(TOFF_FPU+160)(a0)
 575                 ldc1    $f22,(TOFF_FPU+176)(a0)
 576                 ldc1    $f24,(TOFF_FPU+192)(a0)
 577                 ldc1    $f26,(TOFF_FPU+208)(a0)
 578                 ldc1    $f28,(TOFF_FPU+224)(a0)
 579                 ldc1    $f30,(TOFF_FPU+240)(a0)
 580                 ctc1    t0,fcr31
 581 
 582                 /*
 583                  * Restore non-scratch registers
 584                  */
 585 2:              lw      s0,TOFF_REG16(a0)
 586                 lw      s1,TOFF_REG17(a0)
 587                 lw      s2,TOFF_REG18(a0)
 588                 lw      s3,TOFF_REG19(a0)
 589                 lw      s4,TOFF_REG20(a0)
 590                 lw      s5,TOFF_REG21(a0)
 591                 lw      s6,TOFF_REG22(a0)
 592                 lw      s7,TOFF_REG23(a0)
 593                 lw      gp,TOFF_REG28(a0)
 594                 lw      sp,TOFF_REG29(a0)
 595                 lw      fp,TOFF_REG30(a0)
 596                 lw      ra,TOFF_REG31(a0)
 597 
 598                 /*
 599                  * Restore status register
 600                  */
 601                 lw      t0,TOFF_KSP(a0)
 602                 sw      t0,kernelsp
 603 
 604                 jr      ra
 605                 mtc0    a2,CP0_STATUS                   # delay slot
 606                 END(resume)
 607 
 608                 /*
 609                  * Load a new root pointer into the tlb
 610                  */
 611                 .set    noreorder
 612                 LEAF(load_pgd)
 613                 /*
 614                  * Switch the root pointer
 615                  */
 616                 mfc0    t0,CP0_STATUS
 617                 ori     t1,t0,1
 618                 xori    t1,1
 619                 mtc0    t1,CP0_STATUS
 620                 srl     a0,6
 621                 ori     a0,MODE_ALIAS
 622                 li      t1,TLB_ROOT
 623                 mtc0    t1,CP0_ENTRYHI
 624                 mtc0    zero,CP0_INDEX
 625                 mtc0    a0,CP0_ENTRYLO0
 626                 mtc0    zero,CP0_ENTRYLO1
 627                 mtc0    t0,CP0_STATUS
 628                 j       tlbflush
 629                 tlbwi                                   # delay slot
 630                 END(load_pgd)
 631 
 632 /*
 633  * Some bits in the config register
 634  */
 635 #define CONFIG_DB       (1<<4)
 636 #define CONFIG_IB       (1<<5)
 637 
 638 /*
 639  * Flush instruction/data caches
 640  *
 641  * Parameters: a0 - starting address to flush
 642  *             a1 - size of area to be flushed
 643  *             a2 - which caches to be flushed
 644  *
 645  * FIXME:      - ignores parameters in a0/a1
 646  *             - doesn't know about second level caches
 647  */
 648                 .set    noreorder
 649                 LEAF(sys_cacheflush)
 650                 andi    t1,a2,DCACHE
 651                 beqz    t1,do_icache
 652                 li      t0,KSEG0                        # delay slot
 653 
 654                 /*
 655                  * Writeback data cache, even lines
 656                  */
 657                 li      t1,CACHELINES-1
 658 1:              cache   Index_Writeback_Inv_D,0(t0)
 659                 cache   Index_Writeback_Inv_D,32(t0)
 660                 cache   Index_Writeback_Inv_D,64(t0)
 661                 cache   Index_Writeback_Inv_D,96(t0)
 662                 cache   Index_Writeback_Inv_D,128(t0)
 663                 cache   Index_Writeback_Inv_D,160(t0)
 664                 cache   Index_Writeback_Inv_D,192(t0)
 665                 cache   Index_Writeback_Inv_D,224(t0)
 666                 cache   Index_Writeback_Inv_D,256(t0)
 667                 cache   Index_Writeback_Inv_D,288(t0)
 668                 cache   Index_Writeback_Inv_D,320(t0)
 669                 cache   Index_Writeback_Inv_D,352(t0)
 670                 cache   Index_Writeback_Inv_D,384(t0)
 671                 cache   Index_Writeback_Inv_D,416(t0)
 672                 cache   Index_Writeback_Inv_D,448(t0)
 673                 cache   Index_Writeback_Inv_D,480(t0)
 674                 addiu   t0,512
 675                 bnez    t1,1b
 676                 subu    t1,1
 677 
 678                 /*
 679                  * Writeback data cache, odd lines
 680                  * Only needed for 16 byte line size
 681                  */
 682                 mfc0    t1,CP0_CONFIG
 683                 andi    t1,CONFIG_DB
 684                 bnez    t1,do_icache
 685                 li      t1,CACHELINES-1
 686 1:              cache   Index_Writeback_Inv_D,16(t0)
 687                 cache   Index_Writeback_Inv_D,48(t0)
 688                 cache   Index_Writeback_Inv_D,80(t0)
 689                 cache   Index_Writeback_Inv_D,112(t0)
 690                 cache   Index_Writeback_Inv_D,144(t0)
 691                 cache   Index_Writeback_Inv_D,176(t0)
 692                 cache   Index_Writeback_Inv_D,208(t0)
 693                 cache   Index_Writeback_Inv_D,240(t0)
 694                 cache   Index_Writeback_Inv_D,272(t0)
 695                 cache   Index_Writeback_Inv_D,304(t0)
 696                 cache   Index_Writeback_Inv_D,336(t0)
 697                 cache   Index_Writeback_Inv_D,368(t0)
 698                 cache   Index_Writeback_Inv_D,400(t0)
 699                 cache   Index_Writeback_Inv_D,432(t0)
 700                 cache   Index_Writeback_Inv_D,464(t0)
 701                 cache   Index_Writeback_Inv_D,496(t0)
 702                 addiu   t0,512
 703                 bnez    t1,1b
 704                 subu    t1,1
 705 
 706 do_icache:      andi    t1,a2,ICACHE
 707                 beqz    t1,done
 708 
 709                 /*
 710                  * Flush instruction cache, even lines
 711                  */
 712                 lui     t0,0x8000
 713                 li      t1,CACHELINES-1
 714 1:              cache   Index_Invalidate_I,0(t0)        
 715                 cache   Index_Invalidate_I,32(t0)
 716                 cache   Index_Invalidate_I,64(t0)
 717                 cache   Index_Invalidate_I,96(t0)
 718                 cache   Index_Invalidate_I,128(t0)
 719                 cache   Index_Invalidate_I,160(t0)
 720                 cache   Index_Invalidate_I,192(t0)
 721                 cache   Index_Invalidate_I,224(t0)
 722                 cache   Index_Invalidate_I,256(t0)
 723                 cache   Index_Invalidate_I,288(t0)
 724                 cache   Index_Invalidate_I,320(t0)
 725                 cache   Index_Invalidate_I,352(t0)
 726                 cache   Index_Invalidate_I,384(t0)
 727                 cache   Index_Invalidate_I,416(t0)
 728                 cache   Index_Invalidate_I,448(t0)
 729                 cache   Index_Invalidate_I,480(t0)
 730                 addiu   t0,512
 731                 bnez    t1,1b
 732                 subu    t1,1
 733 
 734                 /*
 735                  * Flush instruction cache, even lines
 736                  * Only needed for 16 byte line size
 737                  */
 738                 mfc0    t1,CP0_CONFIG
 739                 andi    t1,CONFIG_IB
 740                 bnez    t1,done
 741                 li      t1,CACHELINES-1
 742 1:              cache   Index_Invalidate_I,16(t0)
 743                 cache   Index_Invalidate_I,48(t0)
 744                 cache   Index_Invalidate_I,80(t0)
 745                 cache   Index_Invalidate_I,112(t0)
 746                 cache   Index_Invalidate_I,144(t0)
 747                 cache   Index_Invalidate_I,176(t0)
 748                 cache   Index_Invalidate_I,208(t0)
 749                 cache   Index_Invalidate_I,240(t0)
 750                 cache   Index_Invalidate_I,272(t0)
 751                 cache   Index_Invalidate_I,304(t0)
 752                 cache   Index_Invalidate_I,336(t0)
 753                 cache   Index_Invalidate_I,368(t0)
 754                 cache   Index_Invalidate_I,400(t0)
 755                 cache   Index_Invalidate_I,432(t0)
 756                 cache   Index_Invalidate_I,464(t0)
 757                 cache   Index_Invalidate_I,496(t0)
 758                 addiu   t0,512
 759                 bnez    t1,1b
 760                 subu    t1,1
 761 
 762 done:           j       ra
 763                 nop
 764                 END(sys_cacheflush)
 765 
 766 /*
 767  * Update the TLB - or how instruction scheduling makes code unreadable ...
 768  *
 769  * MIPS doesn't need any external MMU info: the kernel page tables contain
 770  * all the necessary information.  We use this hook though to load the
 771  * TLB as early as possible with uptodate information avoiding unecessary
 772  * exceptions.
 773  *
 774  * Parameters: a0 - struct vm_area_struct *vma  (ignored)
 775  *             a1 - unsigned long address
 776  *             a2 - pte_t pte
 777  */
 778                 .set    noreorder
 779                 LEAF(update_mmu_cache)
 780                 /*
 781                  * Step 1: Wipe out old TLB information.  Not shure if
 782                  * we really need that step; call it paranoia ...
 783                  * In order to do that we need to disable interrupts.
 784                  */
 785                 mfc0    t0,CP0_STATUS           # interrupts off
 786                 ori     t1,t0,1
 787                 xori    t1,1
 788                 mtc0    t1,CP0_STATUS
 789                 li      t3,TLBMAP               # then wait 3 cycles
 790                 ori     t1,a1,0xfff             # mask off low 12 bits
 791                 xori    t1,0xfff
 792                 mfc0    t2,CP0_ENTRYHI          # copy ASID into address
 793                 andi    t2,0xff
 794                 or      t2,t1
 795                 mtc0    t2,CP0_ENTRYHI
 796                 srl     t4,a1,12                # wait again three cycles
 797                 sll     t4,t4,PTRLOG
 798                 dmtc0   zero,CP0_ENTRYLO0
 799                 tlbp                            # now query the TLB
 800                 addu    t3,t4                   # wait another three cycles
 801                 ori     t3,0xffff
 802                 xori    t3,0xffff
 803                 mfc0    t1,CP0_INDEX
 804                 bltz    t1,1f                   # No old entry?
 805                 dmtc0   zero,CP0_ENTRYLO1
 806                 or      t3,t1                   # wait one cycle
 807                 tlbwi
 808                 /*
 809                  * But there still might be a entry for the pgd ...
 810                  */
 811 1:              mtc0    t3,CP0_ENTRYHI
 812                 nop                             # wait 3 cycles
 813                 nop
 814                 nop
 815                 tlbp                            # TLB lookup
 816                 nop
 817                 nop
 818                 mfc0    t1,CP0_INDEX            # wait 3 cycles
 819                 bltz    t1,1f                   # No old entry?
 820                 nop             
 821                 tlbwi                           # gotcha ...
 822                 /*
 823                  * Step 2: Reload the TLB with new information.  We can skip
 824                  * this but this should speed the mess a bit by avoiding
 825                  * tlbl/tlbs exceptions. (To be done)
 826                  */
 827 1:              jr      ra
 828                 mtc0    t0,CP0_STATUS           # delay slot
 829                 END(update_mmu_cache)

/* [previous][next][first][last][top][bottom][index][help] */