qc.h 6.87 KB
/*
 * Copyright (C) 1996-1998 by the Board of Trustees
 *    of Leland Stanford Junior University.
 * 
 * This file is part of the SimOS distribution. 
 * See LICENSE file for terms of the license. 
 *
 */

#ifndef QC_H
#define QC_H


/* From mshade_cache_init */
extern void qc_cache_init( int cpuNum );
/* Gets called on switch from page to cache mode (either direction) */
/* It clears the qc and gets k0 ready */
extern void qc_renew( int cpuNum );
/* Called on an intervention from cache.c */
extern void qc_clobber( PA pAddr, int cpuNum, EmVQCMemState state );
/* Called from pc_tc.c and mem_control.c -- used to downgrade a line */
/* or page when we are executing off something we own exclusive */
extern void qc_downgrade( int cpuNum, VA, int);
extern void qc_downgrade_ifpresent( VA vAddr);
/* Called through CPU vector since qc caches write protection info */
extern void EmFirewallChange(PA pAddr, uint grant, uint64 cpumask);

/* Entry points from mem_control */
/* Entry point from cache_tags module */
extern void set_qc_state( int cpuNum, VLN vline, PLN pline, int new_state );

/* Entry points from the cp0 module */
/* This function acesses true TLB state */
extern void qc_mmu_switch( int cpuNum, unsigned old_asid, unsigned new_asid,
                           unsigned forceSelfSwitch);

/* These functions are called by cp0.c */
extern void qc_tlb_inval_page( int cpuNum,int idx);
extern void qc_tlb_replace_page( int cpuNum, int idx);
extern void qc_cache_inval_page( int cpuNum, int idx);
extern void qc_flush_etlb( int cpuNum );
extern void qc_erase_etlb( int cpuNum, EntryHi hi );
extern void qc_map_page( int cpuNum, int idx);


/* debuging : These function check the inclusion property
   between MMU/TLB and PhysArray/Cache */
extern void MMUCheck(int procNum);
extern void PACheck(int procNum);
extern void qc_CheckForDuplicates(CPUState *);
extern void qc_CheckForDuplicate(CPUState *, int index);
#ifdef EMBRA_USE_QC64
extern void qc64_remove_addr(int cpu, VA va);
extern MA Em_QC64Reload(VA vAddr, int flags);
#endif

/*Called from pc_tc.c and translator.c and Icohere */
extern K0A non_excepting_tv( int cpuNum, VA va );

/* Called from simfirewall.c for invalidating the relocation array */
extern void qc_invalidate_reloc(int cpuNum, PA start, unsigned size);

/* Called from phys_mem_ref_wrapper */
MA phys_mem_ref(VA vAddr, EmVQCMemState new_state, MA mAddr, int cpuNum);
unsigned pa_mem_ref(VA vAddr, EmVQCMemState new_state, MA mAddr, int cpuNum);

/* We need this so we can special case the context switch to zero */
/* and back, since we don't want to flush the state we keep in the */
/* virtual quick check With this variable set, we can fool the code */
/* into believing the previous ASID is running */
#define CURRENT_ASID(_cpu) (quick_ASID[_cpu])

/* #define CURRENT_ASID (((S->CP0[C0_TLBHI]) & TLBHI_PIDMASK) >> TLBHI_PIDSHIFT) */

/* PhysArray */
#define PA_NUM(_m)       ((unsigned) ADDR2SLINE(MEM_SIZE(_m)))
#define PA_SIZE(_m)      ( PA_NUM(_m) * sizeof(pa_info_t))
/* virtual quick check */
#define QC_VIRT_NUM      ((unsigned) (1<<(32 - log2SCACHE_LINE_SIZE)))
#define QC_VIRT_SIZE     ( QC_VIRT_NUM * sizeof(char))
/* physical quick check */
#define QC_PHYS_NUM(_m)  ((unsigned) ADDR2SLINE(MEM_SIZE(_m)))
#define QC_PHYS_SIZE(_m) ( QC_PHYS_NUM(_m) * sizeof(phys_info_t) )


/* XXX - we do our own aligning, but this is a byte array */
#define QC_V(_cpu, _addr) ((char*) \
   (QC_START + ((_cpu) * (QC_VIRT_SIZE + QC_PHYS_SIZE(M_FROM_CPU(_cpu)))) + \
         ((unsigned)ADDR2SLINE(_addr)) ))
/* XXX - we do our own aligning.  Note the sizeof( phys_info_t) */
#define QC_P(_cpu, _addr) ((phys_info_t*) \
   (QC_START + ((_cpu) * (QC_VIRT_SIZE + QC_PHYS_SIZE(M_FROM_CPU(_cpu)))) + \
     QC_VIRT_SIZE  + (((unsigned)ADDR2SLINE(_addr))*sizeof(phys_info_t)) ))

#define PA_P(_cpu, _addr) ((pa_info_t*) \
     (PA_START + ((_cpu) * (PA_SIZE(M_FROM_CPU(_cpu)))) + \
     (((unsigned)ADDR2SLINE(_addr))*sizeof(pa_info_t)) ))


/* XXX - we do our own aligning, but this is a byte array */
#define QC_VL(_cpu, _lineno) ((char*) \
  (QC_START + ((_cpu) * (QC_VIRT_SIZE + QC_PHYS_SIZE(M_FROM_CPU(_cpu)))) + \
         ((unsigned)(_lineno)) ))
/* XXX - we do our own aligning.  Note the sizeof( phys_info_t) */
#define QC_PL(_cpu, _lineno) ((phys_info_t*) \
  (QC_START + ((_cpu) * (QC_VIRT_SIZE + QC_PHYS_SIZE(M_FROM_CPU(_cpu)))) + \
     QC_VIRT_SIZE  + (((unsigned)(_lineno))*sizeof(phys_info_t)) ))

#define PA_PL(_cpu, _lineno) ((pa_info_t*) \
     (PA_START + ((_cpu) * (PA_SIZE(M_FROM_CPU(_cpu)))) + \
     (((unsigned)(_lineno))*sizeof(pa_info_t)) ))

#define MMU_RELOC_SIZE ((1<<(32-NUM_OFFSET_BITS)) * sizeof( unsigned))


/* This assumes we can steal the upper bit of our memAddr, which we */
/* are guaranteed because in MIPS the kernel steals the upper bit */
#define MMU_PROT_WRITE(_mmuEnt)   (MA)((uint)(_mmuEnt) | 0x80000000)
#define IS_MMU_PROT_WRITE(_mmuEnt)(MA)((uint)(_mmuEnt) & 0x80000000)
#define MMU_PROT_READ(_mmuEnt)    (MA)((uint)(_mmuEnt) & 0x7FFFFFFF)
#define MMU2ADDR(_mmuEnt)         (MA)((uint)(_mmuEnt) & 0x7FFFFFFF)

#define QC_MMU_LOOKUP(_E,_addr) ((_E)->mmu[PAGE_NUMBER(_addr)])

/* We do our own alignment */
/*#define MMU_RELOC(_cpunum, _vAddr) ((unsigned*)(MMU_RELOC_START + ((_cpunum) * MMU_RELOC_SIZE ) + (((unsigned)(_vAddr)>>NUM_OFFSET_BITS)<<2) ) )*/

/* This is a combination of a physically addressed quick check with a */
/* backmap from physical to virtual addresses.  They are combined for */
/* cache locality. The cache state entries will get aligned to take up */
/* a work, but this makes sign extention easier */
/* Allow access via KSEG0 virtual addresses, and by a mapped virtual */
/* address */ 

/* Note: Line size must be at least 8 */
typedef uint phys_info_t;
typedef char pa_info_t;

/* actions on the physical quick check */
#define PQC_VLINE(_pqc)      ((phys_info_t)(_pqc) & 0x1FFFFFFF)
#define PQC_VLINEADDR(_pqc)  (SLINE2ADDR(PQC_VLINE(_pqc)))
#define PQC_STATUS(_pqc)     ((phys_info_t)(_pqc) & 0xe0000000)
#define PQC_SHARED(_pqc)     ((phys_info_t)(_pqc) & 0x40000000)
#define PQC_SET_SHARED(_pqc) ((phys_info_t)(_pqc) | 0x40000000)
#define PQC_DIRTY(_pqc)      ((phys_info_t)(_pqc) & 0x80000000)
#define PQC_SET_DIRTY(_pqc)  ((phys_info_t)(_pqc) | 0x80000000)
#define PQC_VALID(_pqc)      ((phys_info_t)(_pqc) & 0xc0000000)

/*#define PQC_INV(_pqc)      ((uint)(_pqc) & 0x20000000)*/
#define PQC_SET_INV              ((phys_info_t)  0x20000000)
/* INV is set on an invalidate to count misses due to invalidation */
/* by another proc. or by DMA (see qc_clobber)                     */

/* actions on the physarray */
/* #define PA_SET_STATUS(_pa)   ((pa_info_t)(_pa) & 0xe0) */
#define PA_SHARED(_pa)       ((pa_info_t)(_pa) & 0x1)
#define PA_DIRTY(_pa)        ((pa_info_t)(_pa) & 0x80)

/* #define PA_SET_VALID         ((pa_info_t) 0xc0) */
#define PA_SET_DIRTY         ((pa_info_t) 0x80)
#define PA_SET_SHARED        ((pa_info_t) 0x1)
#define PA_SET_INV           ((pa_info_t) 0x0) 
/* see PQC_SET_INV */

#endif