--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -emulated-tls -relocation-model=pic < %s \
+; RUN: | FileCheck -check-prefix=RV32 %s
+; RUN: llc -mtriple=riscv64 -emulated-tls -relocation-model=pic < %s \
+; RUN: | FileCheck -check-prefix=RV64 %s
+
+@external_x = external thread_local global i32, align 8
+@y = thread_local global i8 7, align 2
+@internal_z = internal thread_local global i64 9, align 16
+
+define ptr @get_external_x() nounwind {
+; RV32-LABEL: get_external_x:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .Lpcrel_hi0:
+; RV32-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.external_x)
+; RV32-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi0)(a0)
+; RV32-NEXT: call __emutls_get_address@plt
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: get_external_x:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .Lpcrel_hi0:
+; RV64-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.external_x)
+; RV64-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi0)(a0)
+; RV64-NEXT: call __emutls_get_address@plt
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+entry:
+ ret ptr @external_x
+}
+
+define ptr @get_y() nounwind {
+; RV32-LABEL: get_y:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .Lpcrel_hi1:
+; RV32-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.y)
+; RV32-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi1)(a0)
+; RV32-NEXT: call __emutls_get_address@plt
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: get_y:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .Lpcrel_hi1:
+; RV64-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.y)
+; RV64-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi1)(a0)
+; RV64-NEXT: call __emutls_get_address@plt
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+entry:
+ ret ptr @y
+}
+
+define ptr @get_internal_z() nounwind {
+; RV32-LABEL: get_internal_z:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .Lpcrel_hi2:
+; RV32-NEXT: auipc a0, %pcrel_hi(__emutls_v.internal_z)
+; RV32-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi2)
+; RV32-NEXT: call __emutls_get_address@plt
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: get_internal_z:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .Lpcrel_hi2:
+; RV64-NEXT: auipc a0, %pcrel_hi(__emutls_v.internal_z)
+; RV64-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi2)
+; RV64-NEXT: call __emutls_get_address@plt
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+entry:
+ ret ptr @internal_z
+}
+
+; UTC_ARGS: --disable
+
+; RV32: .data
+; RV32: .globl __emutls_v.y
+; RV32: .p2align 2
+; RV32-LABEL: __emutls_v.y:
+; RV32-NEXT: .word 1
+; RV32-NEXT: .word 2
+; RV32-NEXT: .word 0
+; RV32-NEXT: .word __emutls_t.y
+; RV32: .section .rodata,
+; RV32-LABEL: __emutls_t.y:
+; RV32-NEXT: .byte 7
+; RV32: .data
+; RV32: .p2align 2
+; RV32-LABEL: __emutls_v.internal_z:
+; RV32-NEXT: .word 8
+; RV32-NEXT: .word 16
+; RV32-NEXT: .word 0
+; RV32-NEXT: .word __emutls_t.internal_z
+; RV32: .section .rodata,
+; RV32-LABEL: __emutls_t.internal_z:
+; RV32-NEXT: .quad 9
+
+; RV64: .data
+; RV64: .globl __emutls_v.y
+; RV64: .p2align 3
+; RV64-LABEL: __emutls_v.y:
+; RV64-NEXT: .quad 1
+; RV64-NEXT: .quad 2
+; RV64-NEXT: .quad 0
+; RV64-NEXT: .quad __emutls_t.y
+; RV64: .section .rodata,
+; RV64-LABEL: __emutls_t.y:
+; RV64-NEXT: .byte 7
+; RV64: .data
+; RV64: .p2align 3
+; RV64-LABEL: __emutls_v.internal_z:
+; RV64-NEXT: .quad 8
+; RV64-NEXT: .quad 16
+; RV64-NEXT: .quad 0
+; RV64-NEXT: .quad __emutls_t.internal_z
+; RV64: .section .rodata,
+; RV64-LABEL: __emutls_t.internal_z:
+; RV64-NEXT: .quad 9