1 #ifndef CPUDYNTRANSCOMPONENT_H 2 #define CPUDYNTRANSCOMPONENT_H 43 #define N_DYNTRANS_IC_ARGS 3 65 #define DYNTRANS_PAGE_NSPECIALENTRIES 2 71 #define DECLARE_DYNTRANS_INSTR(name) static void instr_##name(CPUDyntransComponent* cpubase, DyntransIC* ic); 72 #define DYNTRANS_INSTR(class,name) void class::instr_##name(CPUDyntransComponent* cpubase, DyntransIC* ic) 73 #define DYNTRANS_INSTR_HEAD(class) class* cpu = (class*) cpubase; 75 #define REG32(arg) (*((uint32_t*)((arg).p))) 76 #define REG64(arg) (*((uint64_t*)((arg).p))) 78 #define DYNTRANS_SYNCH_PC cpu->m_nextIC = ic; cpu->DyntransResyncPC() 98 virtual int Execute(
GXemul* gxemul,
int nrOfCycles);
103 static void RunUnitTests(
int& nSucceeded,
int& nFailures);
107 virtual int GetDyntransICshift()
const = 0;
110 void DyntransToBeTranslatedBegin(
struct DyntransIC*);
111 bool DyntransReadInstruction(uint16_t& iword);
112 bool DyntransReadInstruction(uint32_t& iword,
int offset = 0);
113 void DyntransToBeTranslatedDone(
struct DyntransIC*);
118 void DyntransResyncPC();
127 void DyntransPCtoPointers();
132 void DyntransClearICPage(
struct DyntransIC* icpage);
177 class DyntransTranslationPage
180 DyntransTranslationPage(
int nICentriesPerpage)
183 , m_nextCacheEntryForAddr(-1)
185 , m_showFunctionTraceCall(false)
187 m_ic.resize(nICentriesPerpage);
196 int m_nextCacheEntryForAddr;
203 bool m_showFunctionTraceCall;
206 vector< struct DyntransIC > m_ic;
209 class DyntransTranslationCache
212 DyntransTranslationCache()
213 : m_nICentriesPerpage(0)
222 void Reinit(
size_t approximateSize,
int nICentriesPerpage,
int pageShift)
224 size_t approximateSizePerPage =
sizeof(
struct DyntransIC) * nICentriesPerpage + 64;
225 size_t nrOfPages = approximateSize / approximateSizePerPage;
228 std::cerr <<
"Too small translation cache!\n";
229 throw std::exception();
232 if (nICentriesPerpage == m_nICentriesPerpage &&
233 nrOfPages == m_pageCache.size() &&
234 pageShift == m_pageShift)
237 m_nICentriesPerpage = nICentriesPerpage;
238 m_pageShift = pageShift;
242 m_pageCache.resize(nrOfPages, DyntransTranslationPage(nICentriesPerpage));
246 m_lastFree = nrOfPages - 1;
247 for (
int i=m_firstFree; i<=m_lastFree; i++) {
248 m_pageCache[i].m_prev = i-1;
252 m_pageCache[i].m_next = -1;
254 m_pageCache[i].m_next = i+1;
258 m_firstMRU = m_lastMRU = -1;
264 m_addrToFirstPageIndex.resize(1024 * 1048576 >> m_pageShift);
265 for (
size_t i=0; i<m_addrToFirstPageIndex.size(); ++i)
266 m_addrToFirstPageIndex[i] = -1;
268 ValidateConsistency();
271 void ValidateConsistency()
274 vector<bool> pageIsInMRUList;
275 vector<bool> pageIsInFreeList;
276 vector<bool> pageIsInMRUListReverse;
277 vector<bool> pageIsInFreeListReverse;
279 pageIsInMRUList.resize(m_pageCache.size(),
false);
280 pageIsInFreeList.resize(m_pageCache.size(),
false);
281 pageIsInMRUListReverse.resize(m_pageCache.size(),
false);
282 pageIsInFreeListReverse.resize(m_pageCache.size(),
false);
287 pageIsInFreeList[i] =
true;
288 i = m_pageCache[i].m_next;
294 pageIsInFreeListReverse[i] =
true;
295 i = m_pageCache[i].m_prev;
301 pageIsInMRUList[i] =
true;
302 i = m_pageCache[i].m_next;
308 pageIsInMRUListReverse[i] =
true;
309 i = m_pageCache[i].m_prev;
313 for (
size_t j=0; j<m_pageCache.size(); ++j) {
314 if (pageIsInFreeList[j] != pageIsInFreeListReverse[j]) {
315 std::cerr <<
"Forward and reverse Free-list iteration mismatch, position " << j <<
"!\n";
316 throw std::exception();
319 if (pageIsInMRUList[j] != pageIsInMRUListReverse[j]) {
320 std::cerr <<
"Forward and reverse MRU-list iteration mismatch, position " << j <<
"!\n";
321 throw std::exception();
324 if ((pageIsInMRUList[j] ^ pageIsInFreeList[j]) ==
false) {
325 std::cerr <<
"Each page should be in exactly ONE of the two lists, position " << j <<
"!\n";
326 throw std::exception();
330 vector<bool> pageIsPointedToByQuickLookupTable;
331 vector<bool> pageIsPointedToByQLTChain;
332 pageIsPointedToByQuickLookupTable.resize(m_pageCache.size(),
false);
333 pageIsPointedToByQLTChain.resize(m_pageCache.size(),
false);
335 for (
size_t k=0; k<m_addrToFirstPageIndex.size(); ++k)
336 if (m_addrToFirstPageIndex[k] >= 0)
337 pageIsPointedToByQuickLookupTable[m_addrToFirstPageIndex[k]] =
true;
339 for (
size_t k=0; k<m_pageCache.size(); ++k) {
340 int index = m_pageCache[k].m_nextCacheEntryForAddr;
342 pageIsPointedToByQLTChain[index] =
true;
345 for (
size_t k=0; k<pageIsInFreeList.size(); ++k) {
346 if (!pageIsInFreeList[k])
349 if (m_pageCache[k].m_nextCacheEntryForAddr >= 0) {
350 std::cerr <<
"Pages on the free-list should not have m_nextCacheEntryForAddr set!\n";
351 throw std::exception();
354 if (pageIsPointedToByQuickLookupTable[k]) {
355 std::cerr <<
"Pages on the free-list should not be pointed to by the quick lookup table!\n";
356 throw std::exception();
359 if (pageIsPointedToByQLTChain[k]) {
360 std::cerr <<
"Pages on the free-list should not be in the quick lookup table chain!\n";
361 throw std::exception();
365 for (
size_t k=0; k<pageIsInMRUList.size(); ++k) {
366 if (!pageIsInMRUList[k])
369 uint64_t
addr = m_pageCache[k].m_addr;
370 uint64_t physPageNumber = addr >> m_pageShift;
371 int quickLookupIndex = physPageNumber & (m_addrToFirstPageIndex.size() - 1);
372 int pageIndex = m_addrToFirstPageIndex[quickLookupIndex];
374 while (pageIndex >= 0) {
375 if (m_pageCache[pageIndex].m_addr == addr)
378 pageIndex = m_pageCache[pageIndex].m_nextCacheEntryForAddr;
382 std::cerr <<
"Pages in the MRU list must be reachable from the quick lookup table!\n";
383 throw std::exception();
389 void FreeLeastRecentlyUsedPage()
393 assert(m_firstFree < 0);
395 if (m_firstMRU == m_lastMRU) {
396 std::cerr <<
"Attempt to free a page, but there's only one page in the MRU list. Too small!\n";
397 throw std::exception();
401 int index = m_lastMRU;
402 assert(m_pageCache[index].m_prev >= 0);
403 assert(m_pageCache[index].m_next < 0);
406 m_lastMRU = m_pageCache[index].m_prev;
407 m_pageCache[m_lastMRU].m_next = -1;
410 if (m_firstFree < 0) {
412 m_firstFree = m_lastFree = index;
413 m_pageCache[index].m_prev = -1;
414 m_pageCache[index].m_next = -1;
416 m_pageCache[index].m_prev = -1;
417 m_pageCache[index].m_next = m_firstFree;
418 m_pageCache[m_firstFree].m_prev = index;
423 uint64_t physPageNumber = m_pageCache[index].m_addr >> m_pageShift;
424 int quickLookupIndex = physPageNumber & (m_addrToFirstPageIndex.size() - 1);
425 int pageIndex = m_addrToFirstPageIndex[quickLookupIndex];
426 if (pageIndex == index) {
428 m_addrToFirstPageIndex[quickLookupIndex] = m_pageCache[index].m_nextCacheEntryForAddr;
432 if (m_pageCache[pageIndex].m_nextCacheEntryForAddr == index) {
433 m_pageCache[pageIndex].m_nextCacheEntryForAddr = m_pageCache[index].m_nextCacheEntryForAddr;
437 pageIndex = m_pageCache[pageIndex].m_nextCacheEntryForAddr;
441 m_pageCache[index].m_nextCacheEntryForAddr = -1;
443 ValidateConsistency();
446 struct DyntransIC *AllocateNewPage(uint64_t addr,
bool showFunctionTraceCall)
448 int index = m_firstFree;
454 if (index == m_lastFree) {
456 m_firstFree = m_lastFree = -1;
459 m_firstFree = m_pageCache[index].m_next;
460 m_pageCache[m_firstFree].m_prev = -1;
467 if (m_firstMRU == -1) {
469 m_firstMRU = m_lastMRU = index;
470 m_pageCache[index].m_next = m_pageCache[index].m_prev = -1;
473 m_pageCache[m_firstMRU].m_prev = index;
474 m_pageCache[index].m_next = m_firstMRU;
476 m_pageCache[index].m_prev = -1;
480 m_pageCache[index].m_addr =
addr;
481 m_pageCache[index].m_showFunctionTraceCall = showFunctionTraceCall;
484 uint64_t physPageNumber = addr >> m_pageShift;
485 int quickLookupIndex = physPageNumber & (m_addrToFirstPageIndex.size() - 1);
488 if (m_addrToFirstPageIndex[quickLookupIndex] < 0) {
489 m_addrToFirstPageIndex[quickLookupIndex] = index;
490 m_pageCache[index].m_nextCacheEntryForAddr = -1;
493 m_pageCache[index].m_nextCacheEntryForAddr = m_addrToFirstPageIndex[quickLookupIndex];
494 m_addrToFirstPageIndex[quickLookupIndex] = index;
497 ValidateConsistency();
499 return &(m_pageCache[index].m_ic[0]);
502 struct DyntransIC *GetICPage(uint64_t addr,
bool showFunctionTraceCall,
bool& clear)
507 addr >>= m_pageShift;
508 uint64_t physPageNumber =
addr;
509 addr <<= m_pageShift;
511 int quickLookupIndex = physPageNumber & (m_addrToFirstPageIndex.size() - 1);
512 int pageIndex = m_addrToFirstPageIndex[quickLookupIndex];
517 while (pageIndex >= 0) {
518 if (m_pageCache[pageIndex].m_addr == addr)
523 pageIndex = m_pageCache[pageIndex].m_nextCacheEntryForAddr;
527 if (pageIndex >= 0) {
530 if (m_firstMRU != pageIndex) {
532 int prev = m_pageCache[pageIndex].m_prev;
533 int next = m_pageCache[pageIndex].m_next;
535 m_pageCache[prev].m_next = next;
537 m_pageCache[next].m_prev = prev;
540 if (pageIndex == m_lastMRU)
544 m_pageCache[pageIndex].m_prev = -1;
545 m_pageCache[pageIndex].m_next = m_firstMRU;
546 m_pageCache[m_firstMRU].m_prev = pageIndex;
547 m_firstMRU = pageIndex;
560 ValidateConsistency();
563 if (m_pageCache[pageIndex].m_showFunctionTraceCall != showFunctionTraceCall) {
564 m_pageCache[pageIndex].m_showFunctionTraceCall = showFunctionTraceCall;
568 return &(m_pageCache[pageIndex].m_ic[0]);
577 FreeLeastRecentlyUsedPage();
581 return AllocateNewPage(addr, showFunctionTraceCall);
587 int m_nICentriesPerpage;
599 vector<int> m_addrToFirstPageIndex;
602 vector<DyntransTranslationPage> m_pageCache;
629 #endif // CPUDYNTRANSCOMPONENT_H DyntransTranslationCache m_translationCache
void(* f)(CPUDyntransComponent *, DyntransIC *)
void COMBINE() nop(struct cpu *cpu, struct mips_instr_call *ic, int low_addr)
struct arm_instr_call * ic
int m_nrOfCyclesToExecute
virtual void(*)(CPUDyntransComponent *cpu, DyntransIC *ic) GetDyntransToBeTranslated()
union DyntransIC::@0 arg[N_DYNTRANS_IC_ARGS]
A dyntrans instruction call.
struct DyntransIC * m_firstIConPage
#define N_DYNTRANS_IC_ARGS
int m_dyntransICentriesPerPage
A base-class for processors Component implementations that use dynamic translation.
A base-class for processors Component implementations.
#define DECLARE_DYNTRANS_INSTR(name)
struct DyntransIC * m_nextIC