1 /* This file is part of the program psim.
3 Copyright (C) 1994-1997, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 #include "registers.h"
31 #include "interrupts.h"
39 For the VEA model, the VM layer is almost transparent. It's only
40 purpose is to maintain separate core_map's for the instruction
41 and data address spaces. This being so that writes to instruction
42 space or execution of a data space is prevented.
44 For the OEA model things are more complex. The reason for separate
45 instruction and data models becomes crucial. The OEA model is
46 built out of three parts. An instruction map, a data map and an
47 underlying structure that provides access to the VM data kept in
51 /* OEA data structures:
53 The OEA model maintains internal data structures that shadow the
54 semantics of the various OEA VM registers (BAT, SR, etc). This
55 allows a simple efficient model of the VM to be implemented.
57 Consistency between OEA registers and this model's internal data
58 structures is maintained by updating the structures at
59 `synchronization' points. Of particular note is that (at the time
60 of writing) the memory data types for BAT registers are rebuilt
61 when ever the processor moves between problem and system states.
63 Unpacked values are stored in the OEA so that they correctly align
64 to where they will be needed by the PTE address. */
69 Matrix of processor state, type of access and validity */
78 om_data_read, om_data_write,
79 om_instruction_read, om_access_any,
83 static int om_valid_access[2][4][nr_om_access_types] = {
84 /* read, write, instruction, any */
87 { 1, 1, 1, 1 }, /* 00 */
88 { 1, 1, 1, 1 }, /* 01 */
89 { 1, 1, 1, 1 }, /* 10 */
90 { 1, 0, 1, 1 }, /* 11 */
92 /* K bit == 1 or P bit valid */
94 { 0, 0, 0, 0 }, /* 00 */
95 { 1, 0, 1, 1 }, /* 01 */
96 { 1, 1, 1, 1 }, /* 10 */
97 { 1, 0, 1, 1 }, /* 11 */
104 The bat data structure only contains information on valid BAT
105 translations for the current processor mode and type of access. */
107 typedef struct _om_bat {
108 unsigned_word block_effective_page_index;
109 unsigned_word block_effective_page_index_mask;
110 unsigned_word block_length_mask;
111 unsigned_word block_real_page_number;
115 enum _nr_om_bat_registers {
116 nr_om_bat_registers = 4
119 typedef struct _om_bats {
120 int nr_valid_bat_registers;
121 om_bat bat[nr_om_bat_registers];
127 In this model the 32 and 64 bit segment tables are treated in very
128 similar ways. The 32bit segment registers are treated as a
129 simplification of the 64bit segment tlb */
131 enum _om_segment_tlb_constants {
132 #if (WITH_TARGET_WORD_BITSIZE == 64)
133 sizeof_segment_table_entry_group = 128,
134 sizeof_segment_table_entry = 16,
136 om_segment_tlb_index_start_bit = 32,
137 om_segment_tlb_index_stop_bit = 35,
138 nr_om_segment_tlb_entries = 16,
139 nr_om_segment_tlb_constants
142 typedef struct _om_segment_tlb_entry {
143 int key[nr_om_modes];
144 om_access_types invalid_access; /* set to instruction if no_execute bit */
145 unsigned_word masked_virtual_segment_id; /* aligned ready for pte group addr */
146 #if (WITH_TARGET_WORD_BITSIZE == 64)
148 unsigned_word masked_effective_segment_id;
150 } om_segment_tlb_entry;
152 typedef struct _om_segment_tlb {
153 om_segment_tlb_entry entry[nr_om_segment_tlb_entries];
159 This OEA model includes a small direct map Page TLB. The tlb is to
160 cut down on the need for the OEA to perform walks of the page hash
163 enum _om_page_tlb_constants {
164 om_page_tlb_index_start_bit = 46,
165 om_page_tlb_index_stop_bit = 51,
166 nr_om_page_tlb_entries = 64,
167 #if (WITH_TARGET_WORD_BITSIZE == 64)
168 sizeof_pte_group = 128,
171 #if (WITH_TARGET_WORD_BITSIZE == 32)
172 sizeof_pte_group = 64,
175 nr_om_page_tlb_constants
178 typedef struct _om_page_tlb_entry {
181 unsigned_word real_address_of_pte_1;
182 unsigned_word masked_virtual_segment_id;
183 unsigned_word masked_page;
184 unsigned_word masked_real_page_number;
187 typedef struct _om_page_tlb {
188 om_page_tlb_entry entry[nr_om_page_tlb_entries];
192 /* memory translation:
194 OEA memory translation possibly involves BAT, SR, TLB and HTAB
197 typedef struct _om_map {
199 /* local cache of register values */
201 int is_problem_state;
203 /* block address translation */
204 om_bats *bat_registers;
206 /* failing that, translate ea to va using segment tlb */
207 #if (WITH_TARGET_WORD_BITSIZE == 64)
208 unsigned_word real_address_of_segment_table;
210 om_segment_tlb *segment_tlb;
212 /* then va to ra using hashed page table and tlb */
213 unsigned_word real_address_of_page_table;
214 unsigned_word page_table_hash_mask;
215 om_page_tlb *page_tlb;
217 /* physical memory for fetching page table entries */
220 /* address xor for PPC endian */
221 unsigned xor[WITH_XOR_ENDIAN];
228 External objects defined by vm.h */
230 struct _vm_instruction_map {
231 /* real memory for last part */
233 /* translate effective to real */
237 struct _vm_data_map {
238 /* translate effective to real */
240 /* real memory for translated address */
248 Underlying memory object. For the VEA this is just the
249 core_map. For OEA it is the instruction and data memory
254 /* OEA: base address registers */
258 /* OEA: segment registers */
259 om_segment_tlb segment_tlb;
261 /* OEA: translation lookaside buffers */
262 om_page_tlb instruction_tlb;
263 om_page_tlb data_tlb;
269 vm_instruction_map instruction_map;
270 vm_data_map data_map;
275 /* OEA Support procedures */
280 om_segment_tlb_index(unsigned_word ea)
282 unsigned_word index = EXTRACTED(ea,
283 om_segment_tlb_index_start_bit,
284 om_segment_tlb_index_stop_bit);
290 om_page_tlb_index(unsigned_word ea)
292 unsigned_word index = EXTRACTED(ea,
293 om_page_tlb_index_start_bit,
294 om_page_tlb_index_stop_bit);
300 om_hash_page(unsigned_word masked_vsid,
303 unsigned_word extracted_ea = EXTRACTED(ea, 36, 51);
304 #if (WITH_TARGET_WORD_BITSIZE == 32)
305 unsigned_word masked_ea = INSERTED32(extracted_ea, 7, 31-6);
306 unsigned_word hash = masked_vsid ^ masked_ea;
308 #if (WITH_TARGET_WORD_BITSIZE == 64)
309 unsigned_word masked_ea = INSERTED64(extracted_ea, 18, 63-7);
310 unsigned_word hash = masked_vsid ^ masked_ea;
312 TRACE(trace_vm, ("ea=0x%lx - masked-vsid=0x%lx masked-ea=0x%lx hash=0x%lx\n",
314 (unsigned long)masked_vsid,
315 (unsigned long)masked_ea,
316 (unsigned long)hash));
322 om_pte_0_api(unsigned_word pte_0)
324 #if (WITH_TARGET_WORD_BITSIZE == 32)
325 return EXTRACTED32(pte_0, 26, 31);
327 #if (WITH_TARGET_WORD_BITSIZE == 64)
328 return EXTRACTED64(pte_0, 52, 56);
334 om_pte_0_hash(unsigned_word pte_0)
336 #if (WITH_TARGET_WORD_BITSIZE == 32)
337 return EXTRACTED32(pte_0, 25, 25);
339 #if (WITH_TARGET_WORD_BITSIZE == 64)
340 return EXTRACTED64(pte_0, 62, 62);
346 om_pte_0_valid(unsigned_word pte_0)
348 #if (WITH_TARGET_WORD_BITSIZE == 32)
349 return MASKED32(pte_0, 0, 0) != 0;
351 #if (WITH_TARGET_WORD_BITSIZE == 64)
352 return MASKED64(pte_0, 63, 63) != 0;
358 om_ea_masked_page(unsigned_word ea)
360 return MASKED(ea, 36, 51);
365 om_ea_masked_byte(unsigned_word ea)
367 return MASKED(ea, 52, 63);
370 /* return the VSID aligned for pte group addr */
373 om_pte_0_masked_vsid(unsigned_word pte_0)
375 #if (WITH_TARGET_WORD_BITSIZE == 32)
376 return INSERTED32(EXTRACTED32(pte_0, 1, 24), 31-6-24+1, 31-6);
378 #if (WITH_TARGET_WORD_BITSIZE == 64)
379 return INSERTED64(EXTRACTED64(pte_0, 0, 51), 63-7-52+1, 63-7);
385 om_pte_1_pp(unsigned_word pte_1)
387 return MASKED(pte_1, 62, 63); /*PP*/
392 om_pte_1_referenced(unsigned_word pte_1)
394 return EXTRACTED(pte_1, 55, 55);
399 om_pte_1_changed(unsigned_word pte_1)
401 return EXTRACTED(pte_1, 56, 56);
406 om_pte_1_masked_rpn(unsigned_word pte_1)
408 return MASKED(pte_1, 0, 51); /*RPN*/
413 om_ea_api(unsigned_word ea)
415 return EXTRACTED(ea, 36, 41);
419 /* Page and Segment table read/write operators, these need to still
420 account for the PPC's XOR operation */
424 om_read_word(om_map *map,
430 ra ^= map->xor[sizeof(instruction_word) - 1];
431 return core_map_read_word(map->physical, ra, processor, cia);
436 om_write_word(om_map *map,
443 ra ^= map->xor[sizeof(instruction_word) - 1];
444 core_map_write_word(map->physical, ra, val, processor, cia);
448 /* Bring things into existance */
452 vm_create(core *physical)
456 /* internal checks */
457 if (nr_om_segment_tlb_entries
458 != (1 << (om_segment_tlb_index_stop_bit
459 - om_segment_tlb_index_start_bit + 1)))
460 error("internal error - vm_create - problem with om_segment constants\n");
461 if (nr_om_page_tlb_entries
462 != (1 << (om_page_tlb_index_stop_bit
463 - om_page_tlb_index_start_bit + 1)))
464 error("internal error - vm_create - problem with om_page constants\n");
466 /* create the new vm register file */
467 virtual = ZALLOC(vm);
470 virtual->physical = physical;
472 /* set up the address decoders */
473 virtual->instruction_map.translation.bat_registers = &virtual->ibats;
474 virtual->instruction_map.translation.segment_tlb = &virtual->segment_tlb;
475 virtual->instruction_map.translation.page_tlb = &virtual->instruction_tlb;
476 virtual->instruction_map.translation.is_relocate = 0;
477 virtual->instruction_map.translation.is_problem_state = 0;
478 virtual->instruction_map.translation.physical = core_readable(physical);
479 virtual->instruction_map.code = core_readable(physical);
481 virtual->data_map.translation.bat_registers = &virtual->dbats;
482 virtual->data_map.translation.segment_tlb = &virtual->segment_tlb;
483 virtual->data_map.translation.page_tlb = &virtual->data_tlb;
484 virtual->data_map.translation.is_relocate = 0;
485 virtual->data_map.translation.is_problem_state = 0;
486 virtual->data_map.translation.physical = core_readable(physical);
487 virtual->data_map.read = core_readable(physical);
488 virtual->data_map.write = core_writeable(physical);
496 om_effective_to_bat(om_map *map,
500 om_bats *bats = map->bat_registers;
501 int nr_bats = bats->nr_valid_bat_registers;
503 for (curr_bat = 0; curr_bat < nr_bats; curr_bat++) {
504 om_bat *bat = bats->bat + curr_bat;
505 if ((ea & bat->block_effective_page_index_mask)
506 != bat->block_effective_page_index)
516 (om_segment_tlb_entry *)
517 om_effective_to_virtual(om_map *map,
522 /* first try the segment tlb */
523 om_segment_tlb_entry *segment_tlb_entry = (map->segment_tlb->entry
524 + om_segment_tlb_index(ea));
526 #if (WITH_TARGET_WORD_BITSIZE == 32)
527 TRACE(trace_vm, ("ea=0x%lx - sr[%ld] - masked-vsid=0x%lx va=0x%lx%07lx\n",
529 (long)om_segment_tlb_index(ea),
530 (unsigned long)segment_tlb_entry->masked_virtual_segment_id,
531 (unsigned long)EXTRACTED32(segment_tlb_entry->masked_virtual_segment_id, 31-6-24+1, 31-6),
532 (unsigned long)EXTRACTED32(ea, 4, 31)));
533 return segment_tlb_entry;
536 #if (WITH_TARGET_WORD_BITSIZE == 64)
537 if (segment_tlb_entry->is_valid
538 && (segment_tlb_entry->masked_effective_segment_id == MASKED(ea, 0, 35))) {
539 error("fixme - is there a need to update any bits\n");
540 return segment_tlb_entry;
543 /* drats, segment tlb missed */
545 unsigned_word segment_id_hash = ea;
546 int current_hash = 0;
547 for (current_hash = 0; current_hash < 2; current_hash += 1) {
548 unsigned_word segment_table_entry_group =
549 (map->real_address_of_segment_table
550 | (MASKED64(segment_id_hash, 31, 35) >> (56-35)));
551 unsigned_word segment_table_entry;
552 for (segment_table_entry = segment_table_entry_group;
553 segment_table_entry < (segment_table_entry_group
554 + sizeof_segment_table_entry_group);
555 segment_table_entry += sizeof_segment_table_entry) {
557 unsigned_word segment_table_entry_dword_0 =
558 om_read_word(map->physical, segment_table_entry, processor, cia);
559 unsigned_word segment_table_entry_dword_1 =
560 om_read_word(map->physical, segment_table_entry + 8,
562 int is_valid = MASKED64(segment_table_entry_dword_0, 56, 56) != 0;
563 unsigned_word masked_effective_segment_id =
564 MASKED64(segment_table_entry_dword_0, 0, 35);
565 if (is_valid && masked_effective_segment_id == MASKED64(ea, 0, 35)) {
566 /* don't permit some things */
567 if (MASKED64(segment_table_entry_dword_0, 57, 57))
568 error("om_effective_to_virtual() - T=1 in STE not supported\n");
569 /* update segment tlb */
570 segment_tlb_entry->is_valid = is_valid;
571 segment_tlb_entry->masked_effective_segment_id =
572 masked_effective_segment_id;
573 segment_tlb_entry->key[om_supervisor_state] =
574 EXTRACTED64(segment_table_entry_dword_0, 58, 58);
575 segment_tlb_entry->key[om_problem_state] =
576 EXTRACTED64(segment_table_entry_dword_0, 59, 59);
577 segment_tlb_entry->invalid_access =
578 (MASKED64(segment_table_entry_dword_0, 60, 60)
579 ? om_instruction_read
581 segment_tlb_entry->masked_virtual_segment_id =
582 INSERTED64(EXTRACTED64(segment_table_entry_dword_1, 0, 51),
583 18-13, 63-7); /* aligned ready for pte group addr */
584 return segment_tlb_entry;
587 segment_id_hash = ~segment_id_hash;
597 (om_page_tlb_entry *)
598 om_virtual_to_real(om_map *map,
600 om_segment_tlb_entry *segment_tlb_entry,
601 om_access_types access,
605 om_page_tlb_entry *page_tlb_entry = (map->page_tlb->entry
606 + om_page_tlb_index(ea));
608 /* is it a tlb hit? */
609 if ((page_tlb_entry->masked_virtual_segment_id
610 == segment_tlb_entry->masked_virtual_segment_id)
611 && (page_tlb_entry->masked_page
612 == om_ea_masked_page(ea))) {
613 TRACE(trace_vm, ("ea=0x%lx - tlb hit - tlb=0x%lx\n",
614 (long)ea, (long)page_tlb_entry));
615 return page_tlb_entry;
618 /* drats, it is a tlb miss */
620 unsigned_word page_hash =
621 om_hash_page(segment_tlb_entry->masked_virtual_segment_id, ea);
623 for (current_hash = 0; current_hash < 2; current_hash += 1) {
624 unsigned_word real_address_of_pte_group =
625 (map->real_address_of_page_table
626 | (page_hash & map->page_table_hash_mask));
627 unsigned_word real_address_of_pte_0;
629 ("ea=0x%lx - htab search %d - htab=0x%lx hash=0x%lx mask=0x%lx pteg=0x%lx\n",
630 (long)ea, current_hash,
631 map->real_address_of_page_table,
633 map->page_table_hash_mask,
634 (long)real_address_of_pte_group));
635 for (real_address_of_pte_0 = real_address_of_pte_group;
636 real_address_of_pte_0 < (real_address_of_pte_group
638 real_address_of_pte_0 += sizeof_pte) {
639 unsigned_word pte_0 = om_read_word(map,
640 real_address_of_pte_0,
643 if (om_pte_0_valid(pte_0)
644 && (current_hash == om_pte_0_hash(pte_0))
645 && (segment_tlb_entry->masked_virtual_segment_id
646 == om_pte_0_masked_vsid(pte_0))
647 && (om_ea_api(ea) == om_pte_0_api(pte_0))) {
648 unsigned_word real_address_of_pte_1 = (real_address_of_pte_0
650 unsigned_word pte_1 = om_read_word(map,
651 real_address_of_pte_1,
653 page_tlb_entry->protection = om_pte_1_pp(pte_1);
654 page_tlb_entry->changed = om_pte_1_changed(pte_1);
655 page_tlb_entry->masked_virtual_segment_id = segment_tlb_entry->masked_virtual_segment_id;
656 page_tlb_entry->masked_page = om_ea_masked_page(ea);
657 page_tlb_entry->masked_real_page_number = om_pte_1_masked_rpn(pte_1);
658 page_tlb_entry->real_address_of_pte_1 = real_address_of_pte_1;
659 if (!om_pte_1_referenced(pte_1)) {
661 real_address_of_pte_1,
665 ("ea=0x%lx - htab hit - set ref - tlb=0x%lx &pte1=0x%lx\n",
666 (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
670 ("ea=0x%lx - htab hit - tlb=0x%lx &pte1=0x%lx\n",
671 (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
673 return page_tlb_entry;
676 page_hash = ~page_hash; /*???*/
685 om_interrupt(cpu *processor,
688 om_access_types access,
689 storage_interrupt_reasons reason)
693 data_storage_interrupt(processor, cia, ea, reason, 0/*!is_store*/);
696 data_storage_interrupt(processor, cia, ea, reason, 1/*is_store*/);
698 case om_instruction_read:
699 instruction_storage_interrupt(processor, cia, reason);
702 error("internal error - om_interrupt - unexpected access type %d", access);
709 om_translate_effective_to_real(om_map *map,
711 om_access_types access,
717 om_segment_tlb_entry *segment_tlb_entry = NULL;
718 om_page_tlb_entry *page_tlb_entry = NULL;
721 if (!map->is_relocate) {
723 TRACE(trace_vm, ("ea=0x%lx - direct map - ra=0x%lx\n",
724 (long)ea, (long)ra));
728 /* match with BAT? */
729 bat = om_effective_to_bat(map, ea);
731 if (!om_valid_access[1][bat->protection_bits][access]) {
732 TRACE(trace_vm, ("ea=0x%lx - bat access violation\n", (long)ea));
734 om_interrupt(processor, cia, ea, access,
735 protection_violation_storage_interrupt);
740 ra = ((ea & bat->block_length_mask) | bat->block_real_page_number);
741 TRACE(trace_vm, ("ea=0x%lx - bat translation - ra=0x%lx\n",
742 (long)ea, (long)ra));
746 /* translate ea to va using segment map */
747 segment_tlb_entry = om_effective_to_virtual(map, ea, processor, cia);
748 #if (WITH_TARGET_WORD_BITSIZE == 64)
749 if (segment_tlb_entry == NULL) {
750 TRACE(trace_vm, ("ea=0x%lx - segment tlb miss\n", (long)ea));
752 om_interrupt(processor, cia, ea, access,
753 segment_table_miss_storage_interrupt);
758 /* check for invalid segment access type */
759 if (segment_tlb_entry->invalid_access == access) {
760 TRACE(trace_vm, ("ea=0x%lx - segment access invalid\n", (long)ea));
762 om_interrupt(processor, cia, ea, access,
763 protection_violation_storage_interrupt);
769 page_tlb_entry = om_virtual_to_real(map, ea, segment_tlb_entry,
772 if (page_tlb_entry == NULL) {
773 TRACE(trace_vm, ("ea=0x%lx - page tlb miss\n", (long)ea));
775 om_interrupt(processor, cia, ea, access,
776 hash_table_miss_storage_interrupt);
780 if (!(om_valid_access
781 [segment_tlb_entry->key[map->is_problem_state]]
782 [page_tlb_entry->protection]
784 TRACE(trace_vm, ("ea=0x%lx - page tlb access violation\n", (long)ea));
786 om_interrupt(processor, cia, ea, access,
787 protection_violation_storage_interrupt);
792 /* update change bit as needed */
793 if (access == om_data_write &&!page_tlb_entry->changed) {
794 unsigned_word pte_1 = om_read_word(map,
795 page_tlb_entry->real_address_of_pte_1,
798 page_tlb_entry->real_address_of_pte_1,
801 TRACE(trace_vm, ("ea=0x%lx - set change bit - tlb=0x%lx &pte1=0x%lx\n",
802 (long)ea, (long)page_tlb_entry,
803 (long)page_tlb_entry->real_address_of_pte_1));
806 ra = (page_tlb_entry->masked_real_page_number | om_ea_masked_byte(ea));
807 TRACE(trace_vm, ("ea=0x%lx - page translation - ra=0x%lx\n",
808 (long)ea, (long)ra));
814 * Definition of operations for memory management
818 /* rebuild all the relevant bat information */
821 om_unpack_bat(om_bat *bat,
825 /* for extracting out the offset within a page */
826 bat->block_length_mask = ((MASKED(ubat, 51, 61) << (17-2))
827 | MASK(63-17+1, 63));
829 /* for checking the effective page index */
830 bat->block_effective_page_index = MASKED(ubat, 0, 46);
831 bat->block_effective_page_index_mask = ~bat->block_length_mask;
833 /* protection information */
834 bat->protection_bits = EXTRACTED(lbat, 62, 63);
835 bat->block_real_page_number = MASKED(lbat, 0, 46);
839 /* rebuild the given bat table */
842 om_unpack_bats(om_bats *bats,
847 bats->nr_valid_bat_registers = 0;
848 for (i = 0; i < nr_om_bat_registers*2; i += 2) {
849 spreg ubat = raw_bats[i];
850 spreg lbat = raw_bats[i+1];
851 if ((msr & msr_problem_state)
852 ? EXTRACTED(ubat, 63, 63)
853 : EXTRACTED(ubat, 62, 62)) {
854 om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],
856 bats->nr_valid_bat_registers += 1;
862 #if (WITH_TARGET_WORD_BITSIZE == 32)
865 om_unpack_sr(vm *virtual,
871 om_segment_tlb_entry *segment_tlb_entry = 0;
872 sreg new_sr_value = 0;
874 /* check register in range */
875 ASSERT(which_sr >= 0 && which_sr < nr_om_segment_tlb_entries);
877 /* get the working values */
878 segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];
879 new_sr_value = srs[which_sr];
881 /* do we support this */
882 if (MASKED32(new_sr_value, 0, 0))
883 cpu_error(processor, cia, "unsupported value of T in segment register %d",
887 segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);
888 segment_tlb_entry->key[om_problem_state] = EXTRACTED32(new_sr_value, 2, 2);
889 segment_tlb_entry->invalid_access = (MASKED32(new_sr_value, 3, 3)
890 ? om_instruction_read
892 segment_tlb_entry->masked_virtual_segment_id =
893 INSERTED32(EXTRACTED32(new_sr_value, 8, 31),
894 31-6-24+1, 31-6); /* aligned ready for pte group addr */
899 #if (WITH_TARGET_WORD_BITSIZE == 32)
902 om_unpack_srs(vm *virtual,
908 for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {
909 om_unpack_sr(virtual, srs, which_sr,
916 /* Rebuild all the data structures for the new context as specifed by
917 the passed registers */
920 vm_synchronize_context(vm *virtual,
929 /* enable/disable translation */
930 int problem_state = (msr & msr_problem_state) != 0;
931 int data_relocate = (msr & msr_data_relocate) != 0;
932 int instruction_relocate = (msr & msr_instruction_relocate) != 0;
933 int little_endian = (msr & msr_little_endian_mode) != 0;
935 unsigned_word page_table_hash_mask;
936 unsigned_word real_address_of_page_table;
938 /* update current processor mode */
939 virtual->instruction_map.translation.is_relocate = instruction_relocate;
940 virtual->instruction_map.translation.is_problem_state = problem_state;
941 virtual->data_map.translation.is_relocate = data_relocate;
942 virtual->data_map.translation.is_problem_state = problem_state;
944 /* update bat registers for the new context */
945 om_unpack_bats(&virtual->ibats, &sprs[spr_ibat0u], msr);
946 om_unpack_bats(&virtual->dbats, &sprs[spr_dbat0u], msr);
948 /* unpack SDR1 - the storage description register 1 */
949 #if (WITH_TARGET_WORD_BITSIZE == 64)
950 real_address_of_page_table = MASKED64(sprs[spr_sdr1], 0, 45);
951 page_table_hash_mask = MASK64(18+28-EXTRACTED64(sprs[spr_sdr1], 59, 63),
954 #if (WITH_TARGET_WORD_BITSIZE == 32)
955 real_address_of_page_table = MASKED32(sprs[spr_sdr1], 0, 15);
956 page_table_hash_mask = (INSERTED32(EXTRACTED32(sprs[spr_sdr1], 23, 31),
958 | MASK32(7+9, 31-6));
960 virtual->instruction_map.translation.real_address_of_page_table = real_address_of_page_table;
961 virtual->instruction_map.translation.page_table_hash_mask = page_table_hash_mask;
962 virtual->data_map.translation.real_address_of_page_table = real_address_of_page_table;
963 virtual->data_map.translation.page_table_hash_mask = page_table_hash_mask;
966 /* unpack the segment tlb registers */
967 #if (WITH_TARGET_WORD_BITSIZE == 32)
968 om_unpack_srs(virtual, srs,
972 /* set up the XOR registers if the current endian mode conflicts
973 with what is in the MSR */
974 if (WITH_XOR_ENDIAN) {
977 if ((little_endian && CURRENT_TARGET_BYTE_ORDER == LITTLE_ENDIAN)
978 || (!little_endian && CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN))
981 mask = WITH_XOR_ENDIAN - 1;
982 while (i - 1 < WITH_XOR_ENDIAN) {
983 virtual->instruction_map.translation.xor[i-1] = mask;
984 virtual->data_map.translation.xor[i-1] = mask;
985 mask = (mask << 1) & (WITH_XOR_ENDIAN - 1);
990 /* don't allow the processor to change endian modes */
991 if ((little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN)
992 || (!little_endian && CURRENT_TARGET_BYTE_ORDER != BIG_ENDIAN))
993 cpu_error(processor, cia, "attempt to change hardwired byte order");
997 /* update vm data structures due to a TLB operation */
1001 vm_page_tlb_invalidate_entry(vm *memory,
1004 int i = om_page_tlb_index(ea);
1005 memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1006 memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1007 TRACE(trace_vm, ("ea=0x%lx - tlb invalidate entry\n", (long)ea));
1012 vm_page_tlb_invalidate_all(vm *memory)
1015 for (i = 0; i < nr_om_page_tlb_entries; i++) {
1016 memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1017 memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1019 TRACE(trace_vm, ("tlb invalidate all\n"));
1026 vm_create_data_map(vm *memory)
1028 return &memory->data_map;
1033 (vm_instruction_map *)
1034 vm_create_instruction_map(vm *memory)
1036 return &memory->instruction_map;
1042 vm_translate(om_map *map,
1044 om_access_types access,
1049 switch (CURRENT_ENVIRONMENT) {
1050 case USER_ENVIRONMENT:
1051 case VIRTUAL_ENVIRONMENT:
1053 case OPERATING_ENVIRONMENT:
1054 return om_translate_effective_to_real(map, ea, access,
1058 error("internal error - vm_translate - bad switch");
1066 vm_real_data_addr(vm_data_map *map,
1072 return vm_translate(&map->translation,
1074 is_read ? om_data_read : om_data_write,
1083 vm_real_instruction_addr(vm_instruction_map *map,
1087 return vm_translate(&map->translation,
1089 om_instruction_read,
1097 vm_instruction_map_read(vm_instruction_map *map,
1101 unsigned_word ra = vm_real_instruction_addr(map, processor, cia);
1102 ASSERT((cia & 0x3) == 0); /* always aligned */
1103 if (WITH_XOR_ENDIAN)
1104 ra ^= map->translation.xor[sizeof(instruction_word) - 1];
1105 return core_map_read_4(map->code, ra, processor, cia);
1111 vm_data_map_read_buffer(vm_data_map *map,
1119 for (count = 0; count < nr_bytes; count++) {
1121 unsigned_word ea = addr + count;
1122 unsigned_word ra = vm_translate(&map->translation,
1124 processor, /*processor*/
1126 processor != NULL); /*abort?*/
1127 if (ra == MASK(0, 63))
1129 if (WITH_XOR_ENDIAN)
1130 ra ^= map->translation.xor[0];
1131 if (core_map_read_buffer(map->read, &byte, ra, sizeof(byte))
1134 ((unsigned_1*)target)[count] = T2H_1(byte);
1142 vm_data_map_write_buffer(vm_data_map *map,
1146 int violate_read_only_section,
1152 for (count = 0; count < nr_bytes; count++) {
1153 unsigned_word ea = addr + count;
1154 unsigned_word ra = vm_translate(&map->translation,
1158 processor != NULL); /*abort?*/
1159 if (ra == MASK(0, 63))
1161 if (WITH_XOR_ENDIAN)
1162 ra ^= map->translation.xor[0];
1163 byte = T2H_1(((unsigned_1*)source)[count]);
1164 if (core_map_write_buffer((violate_read_only_section
1167 &byte, ra, sizeof(byte)) != sizeof(byte))
1174 /* define the read/write 1/2/4/8/word functions */