mem_control.c
13.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
/*
* Copyright (C) 1996-1998 by the Board of Trustees
* of Leland Stanford Junior University.
*
* This file is part of the SimOS distribution.
* See LICENSE file for terms of the license.
*
*/
/****************************************************************
* mem_control.c
*
* Author: $Author: blythe $
* Date: $Date: 2002/05/29 01:09:10 $
*****************************************************************/
/* include the sim stuff first so we don't expand our macros in their code */
#include <stdio.h>
#include <unistd.h>
#include "simmisc.h"
#include "embra.h"
#include "mem_control.h"
#include "qc.h"
#include "cache.h"
#include "debug.h"
#include "driver.h"
#include "cp0.h"
#include "clock.h"
#include "main_run.h"
#include "tc_coherence.h"
#include "stats.h"
#include "callout.h"
#include "main_run.h"
#include "tc.h"
#include "annotations.h"
PA mem_translate( int cpuNum, VA vAddr )
{
PA pAddr;
int status;
#ifdef wrongl_place
/*
* This is definitively not the right place to do this.
* As a matter of fact, we should get rid of sim_misc.first....
* altogether.
* (bugnion)
*/
/* Should I enter the debugger? -- Since this is done in Periodic_Callout,*/
/* This is really just to handle cpus in the prom slave loop */
if( sim_misc.first_cpu_into_debugger != -1 ) {
Embra_Collect_Processes_For_Debug(cpuNum);
}
#endif
/* Since this is the start of a basic block, we are not in the delay slot */
status = Em_TranslateVirtual( cpuNum, vAddr, &pAddr, ACT_IREAD);
if( status == BACKDOOR_CODE ) {
/* Assume this is a call. Execute it on the real CPU. This
* assumes all backdoor calls take less than 4 arguments.
*/
/* If we are counting time for the translator, stop */
STAT_TIMER_STOP( trans_timer );
/* Set this global ONLY for backdoor calls where we don't want to */
/* force a cpuNum parameter */
/* curr_cpu = cpuNum; */
curEmp = &EMP[cpuNum];
STAT_INC( backdoor_calls );
if( !EMP[cpuNum].outOfSlaveLoop ) {
/* This CPU has not been kicked yet -- it's in the PROM */
uint launchAddr = (uint)sim_misc.launchAddr[cpuNum];
if( !embra.MPinUP )
if(!launchAddr) sginap(20); /* XXX ??? */
/* Get master's cycle count */
EMP[cpuNum].cycleCount = EMP[0].cycleCount;
/* Make time go foward */
EMP[cpuNum].cycleCountdown = 0;
/* XXX - non-backdoor address is launch, backdoor address is call */
if( IS_BACKDOOR( launchAddr ) ) {
int64 result;
/* Do call */
result = ((int64 (*)(int,int,int,int))launchAddr)
( sim_misc.launchArg[cpuNum][0],
sim_misc.launchArg[cpuNum][1],
sim_misc.launchArg[cpuNum][2],
sim_misc.launchArg[cpuNum][3] );
if( result == SLAVELOOP_CONTINUE ) {
/* signal init function done */
sim_misc.launchAddr[cpuNum] = 0;
/* clear so will be 0 if not set up on next call */
sim_misc.launchArg[cpuNum][0] = 0;
sim_misc.launchArg[cpuNum][1] = 0;
sim_misc.launchArg[cpuNum][2] = 0;
sim_misc.launchArg[cpuNum][3] = 0;
CPUPut("Slave %d returning to launch wait\n",cpuNum );
}
} else {
if( launchAddr != 0 ) {
/* Start emulation */
CPUWarning("Launch slave PC 0x%x\n", launchAddr);
/* Note we set PC to RA before returning from here */
EMP[cpuNum].R[31] = launchAddr;
EMP[cpuNum].cpuStatus = cpu_running;
EMP[cpuNum].R[REG_A0] = sim_misc.launchArg[cpuNum][0];
EMP[cpuNum].R[REG_A1] = sim_misc.launchArg[cpuNum][1];
EMP[cpuNum].R[REG_A2] = sim_misc.launchArg[cpuNum][2];
EMP[cpuNum].R[REG_A3] = sim_misc.launchArg[cpuNum][3];
EMP[cpuNum].outOfSlaveLoop = 1;
}
}
} else {
/* Not a Prom call, just do it */
int64 bdoorRetval;
ASSERT(pAddr < 0x80000000);
bdoorRetval = ((int64 (*)(int,int,int,int))pAddr)
(EMP[cpuNum].R[REG_A0],
EMP[cpuNum].R[REG_A1],
EMP[cpuNum].R[REG_A2],
EMP[cpuNum].R[REG_A3]);
EMP[cpuNum].R[2] = bdoorRetval >> 32;
EMP[cpuNum].R[3] = bdoorRetval & 0xffffffff;
}
ASSERT( embra.emode == EMBRA_PAGE ||embra.sequential || EMP[cpuNum].outTC );
EMP[cpuNum].PC = EMP[cpuNum].R[31];
ASSERT( (EMP[cpuNum].PC & 0x3) == 0 );
if (embra.MPinUP) {
/* NOTE: to get things to boot we need to do a CX after the */
/* slave loop. */
if( !EMP[cpuNum].outOfSlaveLoop ) {
EMP[cpuNum].jumpPC = (uint)continue_run_without_chaining;
ReenterTC_CX( &EMP[cpuNum] );
ASSERT(0);
/* NOT REACHED */
}
}
ReenterTC( &EMP[cpuNum] );
/* NOT REACHED */
}
if( status == EXCEPTION_CODE ) {
ReenterTC(&EMP[cpuNum]);
ASSERT(0);
/* NOT REACHED */
return 0;
}
return pAddr;
#ifdef gone
return PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr);
#endif
}
static int debugOnWatchpoint[SIM_MAXCPUS];
/* called on miss in Quickcheck and Physarray
* In virtual quickcheck mode we first have to check
* the physical quickcheck in phys_mem_ref */
MA mem_ref( VA vAddr, EmVQCMemState new_state, int cpuNum )
{
int prevTCGenNumber = tcGenNumber;
PA pAddr;
int status;
Em_accesstype act;
MA retval;
exceptionDuringBackdoor = FALSE;
if( VQC_INST(new_state) ) {
vAddr = IN_BD(EMP[cpuNum].PC)?CLEAR_BD(EMP[cpuNum].PC)+INST_SIZE:
EMP[cpuNum].PC;
act = ACT_IREAD;
} else {
act = VQC_EXCL( new_state ) ? ACT_DWRITE : ACT_DREAD;
}
/* In cache mode, PQC missed, in page mode, mmu reloc missed */
STAT_PQC(new_state);
status = Em_TranslateVirtual( cpuNum, vAddr, &pAddr, act );
/*
* I guess that we could check even if the translation fails, but
* it's overall faster to do it this way
*/
if (status==NORMAL_CODE && annWatchpoints == TRUE) {
if (VQC_SHARED(new_state)) {
EmbraAnnExec(cpuNum,AnnFMLookup(vAddr, ANNFM_LD_TYPE),ANNFM_LD_TYPE);
}
if (VQC_EXCL(new_state)) {
EmbraAnnExec(cpuNum,AnnFMLookup(vAddr, ANNFM_ST_TYPE),ANNFM_ST_TYPE);
}
}
if( status == NORMAL_CODE ) {
/* NOTE: this code detects the case where we write code and then */
/* jump to it. In that case we need to downgrade the code so we */
/* can detect future writes. This detection occurs here and in */
/* pc_tc_lookup depending on whether we detect the condition when */
/* we jump to the code, or if we are executing inside a */
/* traslation */
if( embra.emode == EMBRA_PAGE ) {
if( VQC_EXCL( new_state ) ) {
if( EmbraTCCoherenceCheck( cpuNum, vAddr,pAddr, pAddr+8 ) ) {
CPUWarning("Flushing the TC in mem_ref:1 (TC coherence) PC=0x%llx vAddr=0x%llx \n",
(Reg64)EMP[cpuNum].PC, (Reg64)vAddr);
ReenterTC( &EMP[cpuNum] );
/* NOT REACHED */
}
}
#ifdef EMBRA_USE_QC64
if( VQC_INST( new_state )) {
/* Downgrade page to read/execute so we can detect */
/* writes to it */
qc_downgrade( cpuNum, vAddr, new_state );
}
#else
if( VQC_INST( new_state ) &&
(IS_MMU_PROT_WRITE(EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)]) ) ) {
/* Downgrade page to read/execute so we can detect */
/* writes to it */
qc_downgrade( cpuNum, vAddr, new_state );
}
#endif
/* This returns to callout.s */
return PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr);
} else {
/* ASSERT(embra.emode == EMBRA_CACHE); */
/* If we are doing cache simulation, make the reference */
if( NUM_CPUS(M_FROM_CPU(cpuNum)) == 1 ) {
UPCache_Ref( cpuNum, pAddr, vAddr, new_state );
} else {
if( embra.MPinUP ) {
MPinUPCache_Ref( cpuNum, pAddr, vAddr, new_state );
} else {
MPCache_Ref( cpuNum, pAddr, vAddr, new_state );
}
}
if( VQC_INST( new_state )){
if (embra.useVQC){
if (VQC_EXCL(EMP[cpuNum].qc_v[ADDR2SLINE(vAddr)])) {
/* Downgrade page to read/execute so we can detect */
/* writes to it */
qc_downgrade( cpuNum, vAddr, new_state );
}
} else { /* !useVQC */
#ifdef EMBRA_USE_QC64
if (1) { /* alway safe to do in QC64 */
#else
if (IS_MMU_PROT_WRITE(EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)]) ) {
#endif
/* Downgrade page to read/execute so we can detect */
/* writes to it */
qc_downgrade( cpuNum, vAddr, new_state );
}
}
}
/* This maintains a data structure which allows us to determine */
/* if we are writing to a page which has code in it which we are */
/* exectuing. If such a conflict occurs, we flush the TC and */
/* ReenterTC with the pc value */
/* This is conservative because most stores are not doubles */
if( VQC_EXCL( new_state ) ) {
if( EmbraTCCoherenceCheck( cpuNum, vAddr,pAddr, pAddr+8 ) ) {
CPUWarning("Flushing the TC in mem_ref:2 (TC coherence) PC=0x%llx vAddr=0x%llx \n",
(Reg64)EMP[cpuNum].PC, (Reg64)vAddr);
ReenterTC( &EMP[cpuNum] );
/* NOT REACHED */
}
}
/* And return Zero indicating rewind the QC */
if( embra.sequential ) {
/* No need to rewind in
MPinUP, if line is stolen
during stall, access
still suceeds, but sc fails */
return PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr);
} else {
return 0;
}
}
}
else if (status == BACKDOOR_CODE) {
if (tcGenNumber != prevTCGenNumber) {
/*
* the TC was flushed during the badoor data addressed function
* at this point, the RT register has already been set,
* so we simply reenter the TC.
* Caveat: we better NOT be in a BD.
*/
if (!exceptionDuringBackdoor) {
CPUPrint("EMBRA: %10lld cpu=%d TC flushed on backdoor ref (ok) \n",
(uint64) EmbraCpuCycleCount(cpuNum),cpuNum);
ASSERT( !IN_BD(EMP[cpuNum].PC));
EMP[cpuNum].PC += INST_SIZE;
EMP[cpuNum].cycleCountdown--;
} else {
ASSERT( !(EMP[cpuNum].CP0[C0_CAUSE] & CAUSE_BD));
EMP[cpuNum].CP0[C0_EPC] += INST_SIZE;
/* Don't reexecute on return */
}
ReenterTC(&EMP[cpuNum]);
}
exceptionDuringBackdoor = FALSE;
/*
* XXX this cast is very important and makes sense.
*/
return (MA)pAddr;
}
else {
if (status != EXCEPTION_CODE) {
CPUWarning("PROBABLY a user-levle SIGSEGV! stats=0x%x when 0x%x (EXCEPTION_CODE) expected.cpu=%d vAddr=0x%x \n",
status,EXCEPTION_CODE,cpuNum,vAddr);
Em_EXCEPTION(cpuNum, (act==ACT_IREAD) ? EXC_IBE : EXC_DBE, 0);
ReenterTC( &EMP[cpuNum] );
/* NOT REACHED */
return 0;
}
ASSERT( status == EXCEPTION_CODE );
ReenterTC( &EMP[cpuNum] );
/* NOT REACHED */
return 0;
}
}
/* *****************************************************
* EmbraTCCoherenceCheck
* *****************************************************/
static int just_flushed;
static VA last_pc;
static PA last_pAddr;
int EmbraTCCoherenceCheck(int cpuNum, VA vAddr, PA pAddr, PA end)
{
if (TCcoherence_check(PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr),
PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), end))) {
if( just_flushed ) {
/* This catches and disallows an infinite loop case */
/* The problem is that the kernel writes say the UTLB miss */
/* handler, then flushed the cache. The cache flush looks */
/* like a self-writing line because we just flushed from */
/* the write. We special case this by checking for vAddr */
/* == 0 */
if( (EMP[cpuNum].PC == last_pc) && (pAddr == last_pAddr) &&
vAddr ) {
CPUWarning("SELF-WRITING LINE code PA 0x%x VA 0x%x vPC 0x%x pPC 0x%x\n",
pAddr, vAddr, EMP[cpuNum].PC,
K0_TO_PHYS_REMAP(non_excepting_tv(cpuNum,
EMP[cpuNum].PC),
cpuNum) );
}
return 0;
}
/* This doesn't add appreciable cost, so always maintain it */
em_stats.icache_coherence++;
/*
CPUWarning("Write to code PA 0x%x VA 0x%x PC 0x%x\n",
pAddr, vAddr, EMP[cpuNum].PC );
*/
/* Kernel text is overwritten (ex. UTLB miss handler) */
Clear_Translation_State(TCFLUSH_ALL );
last_pc = EMP[cpuNum].PC;
last_pAddr = pAddr;
just_flushed = 1;
if( IN_BD( EMP[cpuNum].PC ) ) {
EMP[cpuNum].PC = CLEAR_BD( EMP[cpuNum].PC );
EMP[cpuNum].PC -= INST_SIZE;
}
ASSERT( (EMP[cpuNum].PC & 0x3) == 0 );
#if 0
CPUWarning("TCcoherence_check_code detected conflict at pc=0x%llx. \n",
(Reg64)EMP[cpuNum].PC);
#endif
return 1;
} else {
just_flushed = 0;
return 0;
}
}