aboutsummaryrefslogtreecommitdiff
path: root/src/s390/codegen-s390.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/s390/codegen-s390.cc')
-rw-r--r--src/s390/codegen-s390.cc354
1 files changed, 28 insertions, 326 deletions
diff --git a/src/s390/codegen-s390.cc b/src/s390/codegen-s390.cc
index d92cc54a..6b842005 100644
--- a/src/s390/codegen-s390.cc
+++ b/src/s390/codegen-s390.cc
@@ -66,310 +66,13 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch_elements = r6;
- DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
- allocation_memento_found);
- }
-
- // Set transitioned map.
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode, Label* fail) {
- // lr contains the return address
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- Register elements = r6;
- Register length = r7;
- Register array = r8;
- Register array_end = array;
-
- // target_map parameter can be clobbered.
- Register scratch1 = target_map;
- Register scratch2 = r1;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
- scratch2));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ beq(&only_change_map, Label::kNear);
-
- // Preserve lr and use r14 as a temporary register.
- __ push(r14);
-
- __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- __ SmiToDoubleArrayOffset(r14, length);
- __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
- __ SubP(array, array, Operand(kHeapObjectTag));
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
- __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ AddP(scratch1, array, Operand(kHeapObjectTag));
- __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
- kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Prepare for conversion loop.
- __ AddP(target_map, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToDoubleArrayOffset(array, length);
- __ AddP(array_end, r9, array);
-// Repurpose registers no longer in use.
-#if V8_TARGET_ARCH_S390X
- Register hole_int64 = elements;
-#else
- Register hole_lower = elements;
- Register hole_upper = length;
-#endif
- // scratch1: begin of source FixedArray element fields, not tagged
- // hole_lower: kHoleNanLower32 OR hol_int64
- // hole_upper: kHoleNanUpper32
- // array_end: end of destination FixedDoubleArray, not tagged
- // scratch2: begin of FixedDoubleArray element fields, not tagged
-
- __ b(&entry, Label::kNear);
-
- __ bind(&only_change_map);
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&done, Label::kNear);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(r14);
- __ b(fail);
-
- // Convert and copy elements.
- __ bind(&loop);
- __ LoadP(r14, MemOperand(scratch1));
- __ la(scratch1, MemOperand(scratch1, kPointerSize));
- // r1: current element
- __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
-
- // Normal smi, convert to double and store.
- __ ConvertIntToDouble(r14, d0);
- __ StoreDouble(d0, MemOperand(r9, 0));
- __ la(r9, MemOperand(r9, 8));
-
- __ b(&entry, Label::kNear);
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ LoadP(r1, MemOperand(r5, -kPointerSize));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kObjectFoundInSmiOnlyArray);
- }
-#if V8_TARGET_ARCH_S390X
- __ stg(hole_int64, MemOperand(r9, 0));
-#else
- __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
- __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
-#endif
- __ AddP(r9, Operand(8));
-
- __ bind(&entry);
- __ CmpP(r9, array_end);
- __ blt(&loop);
-
- __ pop(r14);
- __ bind(&done);
-}
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode, Label* fail) {
- // Register lr contains the return address.
- Label loop, convert_hole, gc_required, only_change_map;
- Register elements = r6;
- Register array = r8;
- Register length = r7;
- Register scratch = r1;
- Register scratch3 = r9;
- Register hole_value = r9;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
- scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ beq(&only_change_map);
-
- __ Push(target_map, receiver, key, value);
- __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedDoubleArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- // Re-use value and target_map registers, as they have been saved on the
- // stack.
- Register array_size = value;
- Register allocate_scratch = target_map;
- __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToPtrArrayOffset(r0, length);
- __ AddP(array_size, r0);
- __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
- NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
- r0);
- __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
-
- // Prepare for conversion loop.
- Register src_elements = elements;
- Register dst_elements = target_map;
- Register dst_end = length;
- Register heap_number_map = scratch;
- __ AddP(src_elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(length, length);
- __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
-
- Label initialization_loop, loop_done;
- __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
- __ beq(&loop_done, Label::kNear);
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- __ AddP(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ bind(&initialization_loop);
- __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
- __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
- __ BranchOnCount(scratch, &initialization_loop);
-
- __ AddP(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(dst_end, dst_elements, length);
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses in src_elements to fully take advantage of
- // post-indexing.
- // dst_elements: begin of destination FixedArray element fields, not tagged
- // src_elements: begin of source FixedDoubleArray element fields,
- // not tagged, +4
- // dst_end: end of destination FixedArray, not tagged
- // array: destination FixedArray
- // hole_value: the-hole pointer
- // heap_number_map: heap number map
- __ b(&loop, Label::kNear);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(target_map, receiver, key, value);
- __ b(fail);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ StoreP(hole_value, MemOperand(dst_elements));
- __ AddP(dst_elements, Operand(kPointerSize));
- __ CmpLogicalP(dst_elements, dst_end);
- __ bge(&loop_done);
-
- __ bind(&loop);
- Register upper_bits = key;
- __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
- __ AddP(src_elements, Operand(kDoubleSize));
- // upper_bits: current element's upper 32 bit
- // src_elements: address of next element's upper 32 bit
- __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
- __ beq(&convert_hole, Label::kNear);
-
- // Non-hole double, copy value into a heap number.
- Register heap_number = receiver;
- Register scratch2 = value;
- __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
- &gc_required);
-// heap_number: new heap number
-#if V8_TARGET_ARCH_S390X
- __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
- // subtract tag for std
- __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
- __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
-#else
- __ LoadlW(scratch2,
- MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
- __ LoadlW(upper_bits,
- MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
- __ StoreW(scratch2,
- FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
- __ StoreW(upper_bits,
- FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
-#endif
- __ LoadRR(scratch2, dst_elements);
- __ StoreP(heap_number, MemOperand(dst_elements));
- __ AddP(dst_elements, Operand(kPointerSize));
- __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ CmpLogicalP(dst_elements, dst_end);
- __ blt(&loop);
- __ bind(&loop_done);
-
- __ Pop(target_map, receiver, key, value);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
// assume ip can be used as a scratch register below
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Register index, Register result,
Label* call_runtime) {
+ Label indirect_string_loaded;
+ __ bind(&indirect_string_loaded);
+
// Fetch the instance type of the receiver into result register.
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -381,19 +84,25 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ beq(&check_sequential, Label::kNear /*, cr0*/);
// Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ mov(ip, Operand(kSlicedNotConsMask));
- __ LoadRR(r0, result);
- __ AndP(r0, ip /*, SetRC*/); // Should be okay to remove RC
- __ beq(&cons_string, Label::kNear /*, cr0*/);
+ Label cons_string, thin_string;
+ __ LoadRR(ip, result);
+ __ nilf(ip, Operand(kStringRepresentationMask));
+ __ CmpP(ip, Operand(kConsStringTag));
+ __ beq(&cons_string);
+ __ CmpP(ip, Operand(kThinStringTag));
+ __ beq(&thin_string);
// Handle slices.
- Label indirect_string_loaded;
__ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ SmiUntag(ip, result);
__ AddP(index, ip);
- __ b(&indirect_string_loaded, Label::kNear);
+ __ b(&indirect_string_loaded);
+
+ // Handle thin strings.
+ __ bind(&thin_string);
+ __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ b(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
@@ -406,10 +115,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ bne(call_runtime);
// Get the first of the two strings and load its instance type.
__ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ b(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
@@ -487,29 +193,25 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Code* code = NULL;
- Address target_address =
- Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
+
+ Code* code = NULL;
+ Address target_address =
+ Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon