From 04ff86fc4a11a63b3218a24a58f2dd1dccd99125 Mon Sep 17 00:00:00 2001 From: KitsuneAlex Date: Sat, 7 Dec 2024 08:36:17 +0100 Subject: [PATCH 1/2] Implement KT-73565 and basic box tests --- .../kotlin/kotlinx/cinterop/JvmNativeMem.kt | 76 ++++++++- .../src/main/kotlin/kotlinx/cinterop/Utils.kt | 96 ++++++++--- .../kotlin/kotlinx/cinterop/NativeMem.kt | 153 ++++++++++-------- .../backend/konan/llvm/CodeGenerator.kt | 123 ++++++++++---- .../kotlin/backend/konan/llvm/ContextUtils.kt | 40 ++++- .../backend/konan/llvm/IntrinsicGenerator.kt | 78 ++++++--- .../kotlin/native/internal/IntrinsicType.kt | 4 + .../testData/codegen/cinterop/kt73565.kt | 54 +++++++ .../FirNativeCodegenLocalTestGenerated.java | 6 + .../NativeCodegenLocalTestGenerated.java | 6 + 10 files changed, 485 insertions(+), 151 deletions(-) create mode 100644 native/native.tests/testData/codegen/cinterop/kt73565.kt diff --git a/kotlin-native/Interop/Runtime/src/jvm/kotlin/kotlinx/cinterop/JvmNativeMem.kt b/kotlin-native/Interop/Runtime/src/jvm/kotlin/kotlinx/cinterop/JvmNativeMem.kt index cf1f2b9a655c1..7ac979440f672 100644 --- a/kotlin-native/Interop/Runtime/src/jvm/kotlin/kotlinx/cinterop/JvmNativeMem.kt +++ b/kotlin-native/Interop/Runtime/src/jvm/kotlin/kotlinx/cinterop/JvmNativeMem.kt @@ -45,10 +45,10 @@ internal object nativeMemUtils { fun getShort(mem: NativePointed) = unsafe.getShort(mem.address) fun putShort(mem: NativePointed, value: Short) = unsafe.putShort(mem.address, value) - + fun getInt(mem: NativePointed) = unsafe.getInt(mem.address) fun putInt(mem: NativePointed, value: Int) = unsafe.putInt(mem.address, value) - + fun getLong(mem: NativePointed) = unsafe.getLong(mem.address) fun putLong(mem: NativePointed, value: Long) = unsafe.putLong(mem.address, value) @@ -77,11 +77,19 @@ internal object nativeMemUtils { } fun getCharArray(source: NativePointed, dest: CharArray, length: Int) { - unsafe.copyMemory(null, source.address, dest, charArrayBaseOffset, length.toLong() * 2) + unsafe.copyMemory(null, source.address, dest, charArrayBaseOffset, length.toLong() * Char.SIZE_BYTES) } fun putCharArray(source: CharArray, dest: NativePointed, length: Int) { - unsafe.copyMemory(source, charArrayBaseOffset, null, dest.address, length.toLong() * 2) + unsafe.copyMemory(source, charArrayBaseOffset, null, dest.address, length.toLong() * Char.SIZE_BYTES) + } + + fun getFloatArray(source: NativePointed, dest: FloatArray, length: Int) { + unsafe.copyMemory(null, source.address, dest, floatArrayBaseOffset, length.toLong() * Float.SIZE_BYTES) + } + + fun putFloatArray(source: FloatArray, dest: NativePointed, length: Int) { + unsafe.copyMemory(source, floatArrayBaseOffset, null, dest.address, length.toLong() * Float.SIZE_BYTES) } fun zeroMemory(dest: NativePointed, length: Int): Unit = @@ -90,6 +98,65 @@ internal object nativeMemUtils { fun copyMemory(dest: NativePointed, length: Int, src: NativePointed) = unsafe.copyMemory(src.address, dest.address, length.toLong()) + fun memset(mem: NativePointed, value: Byte, size: Int) { + unsafe.setMemory(mem.address, size.toLong(), value) + } + + fun memset(mem: NativePointed, value: Byte, size: Long) { + unsafe.setMemory(mem.address, size, value) + } + + fun memcpy(dstMem: NativePointed, srcMem: NativePointed, size: Int) { + unsafe.copyMemory(srcMem.address, dstMem.address, size.toLong()) + } + + fun memcpy(dstMem: NativePointed, srcMem: NativePointed, size: Long) { + unsafe.copyMemory(srcMem.address, dstMem.address, size) + } + + fun memmove(dstMem: NativePointed, srcMem: NativePointed, size: Int) { + val src = srcMem.address + val dest = dstMem.address + if (src < dest) { // Backwards copy to allow overlap + var index = size - 1 + while (index > 0) { + unsafe.putByte(dest + index, unsafe.getByte(src + index)) + index++ + } + return + } + unsafe.copyMemory(src, dest, size.toLong()) + } + + fun memmove(dstMem: NativePointed, srcMem: NativePointed, size: Long) { + val src = srcMem.address + val dest = dstMem.address + if (src < dest) { // Backwards copy to allow overlap + var index = size - 1L + while (index > 0) { + unsafe.putByte(dest + index, unsafe.getByte(src + index)) + index++ + } + return + } + unsafe.copyMemory(src, dest, size) + } + + fun memcmp(memA: NativePointed, memB: NativePointed, size: Long): Int { + var index = 0L + val addressA = memA.address + val addressB = memB.address + while (index < size) { + val a = unsafe.getByte(addressA + index) + val b = unsafe.getByte(addressB + index) + when { + a < b -> return -1 + a > b -> return 1 + } + index++ + } + return 0 + } @Suppress("NON_PUBLIC_CALL_FROM_PUBLIC_INLINE") inline fun allocateInstance(): T { @@ -117,4 +184,5 @@ internal object nativeMemUtils { private val byteArrayBaseOffset = unsafe.arrayBaseOffset(ByteArray::class.java).toLong() private val charArrayBaseOffset = unsafe.arrayBaseOffset(CharArray::class.java).toLong() + private val floatArrayBaseOffset = unsafe.arrayBaseOffset(FloatArray::class.java).toLong() } diff --git a/kotlin-native/Interop/Runtime/src/main/kotlin/kotlinx/cinterop/Utils.kt b/kotlin-native/Interop/Runtime/src/main/kotlin/kotlinx/cinterop/Utils.kt index f7fc710da0eaf..4fb42003ada8a 100644 --- a/kotlin-native/Interop/Runtime/src/main/kotlin/kotlinx/cinterop/Utils.kt +++ b/kotlin-native/Interop/Runtime/src/main/kotlin/kotlinx/cinterop/Utils.kt @@ -23,6 +23,7 @@ public interface NativeFreeablePlacement : NativePlacement { @ExperimentalForeignApi public fun NativeFreeablePlacement.free(pointer: CPointer<*>): Unit = this.free(pointer.rawValue) + @ExperimentalForeignApi public fun NativeFreeablePlacement.free(pointed: NativePointed): Unit = this.free(pointed.rawPtr) @@ -153,10 +154,10 @@ public inline fun NativePlacement.allocArray(length: Int */ @ExperimentalForeignApi public inline fun NativePlacement.allocArray(length: Long, - initializer: T.(index: Long)->Unit): CArrayPointer { + initializer: T.(index: Long) -> Unit): CArrayPointer { val res = allocArray(length) - (0 .. length - 1).forEach { index -> + (0..length - 1).forEach { index -> res[index].initializer(index) } @@ -170,9 +171,9 @@ public inline fun NativePlacement.allocArray(length: Lon */ @ExperimentalForeignApi public inline fun NativePlacement.allocArray( - length: Int, initializer: T.(index: Int)->Unit): CArrayPointer = allocArray(length.toLong()) { index -> - this.initializer(index.toInt()) - } + length: Int, initializer: T.(index: Int) -> Unit): CArrayPointer = allocArray(length.toLong()) { index -> + this.initializer(index.toInt()) +} /** @@ -228,13 +229,9 @@ public fun NativePlacement.allocArrayOf(elements: ByteArray): CArrayPointer { - val res = allocArray(elements.size) - var index = 0 - while (index < elements.size) { - res[index] = elements[index] - ++index + return allocArray(elements.size).apply { + nativeMemUtils.putFloatArray(elements, pointed, elements.size) } - return res } @ExperimentalForeignApi @@ -252,11 +249,13 @@ internal class ZeroValue(private val sizeBytes: Int, private val nativeMemUtils.zeroMemory(interpretPointed(placement.rawValue), sizeBytes) return placement } + override val size get() = sizeBytes override val align get() = alignBytes } + @Suppress("NOTHING_TO_INLINE") @ExperimentalForeignApi public inline fun zeroValue(size: Int, align: Int): CValue = ZeroValue(size, align) @@ -277,10 +276,12 @@ public fun CPointed.readValues(size: Int, align: Int): CValues { return place(interpretCPointer(scope.alloc(size, align).rawPtr)!!) } + override fun place(placement: CPointer): CPointer { nativeMemUtils.putByteArray(bytes, interpretPointed(placement.rawValue), bytes.size) return placement } + override val size get() = size override val align get() = align } @@ -300,10 +301,12 @@ public fun CPointed.readValue(size: Long, align: Int): CValue nativeMemUtils.putByteArray(bytes, interpretPointed(placement.rawValue), bytes.size) return placement } + // Optimization to avoid unneeded virtual calls in base class implementation. public override fun getPointer(scope: AutofreeScope): CPointer { return place(interpretCPointer(scope.alloc(size, align).rawPtr)!!) } + override val size get() = size.toInt() override val align get() = align } @@ -361,7 +364,7 @@ public inline fun CValue.copy(modify: T.() -> Unit): @ExperimentalForeignApi public inline fun cValue(initialize: T.() -> Unit): CValue = - zeroValue().copy(modify = initialize) + zeroValue().copy(modify = initialize) @ExperimentalForeignApi public inline fun createValues(count: Int, initializer: T.(index: Int) -> Unit): CValues = memScoped { @@ -379,6 +382,7 @@ public fun cValuesOf(vararg elements: Byte): CValues = object : CValues override fun getPointer(scope: AutofreeScope): CPointer { return place(interpretCPointer(scope.alloc(size, align).rawPtr)!!) } + override fun place(placement: CPointer): CPointer { nativeMemUtils.putByteArray(elements, interpretPointed(placement.rawValue), elements.size) return placement @@ -414,18 +418,25 @@ public fun cValuesOf(vararg elements: CPointer?): CValues = cValuesOf(*this) + @ExperimentalForeignApi public fun ShortArray.toCValues(): CValues = cValuesOf(*this) + @ExperimentalForeignApi public fun IntArray.toCValues(): CValues = cValuesOf(*this) + @ExperimentalForeignApi public fun LongArray.toCValues(): CValues = cValuesOf(*this) + @ExperimentalForeignApi public fun FloatArray.toCValues(): CValues = cValuesOf(*this) + @ExperimentalForeignApi public fun DoubleArray.toCValues(): CValues = cValuesOf(*this) + @ExperimentalForeignApi public fun Array?>.toCValues(): CValues> = cValuesOf(*this) + @ExperimentalForeignApi public fun List?>.toCValues(): CValues> = this.toTypedArray().toCValues() @@ -438,6 +449,7 @@ private class CString(val bytes: ByteArray) : CValues() { override fun getPointer(scope: AutofreeScope): CPointer { return place(interpretCPointer(scope.alloc(size, align).rawPtr)!!) } + override fun place(placement: CPointer): CPointer { nativeMemUtils.putByteArray(bytes, placement.pointed, bytes.size) placement[bytes.size] = 0.toByte() @@ -497,7 +509,7 @@ public fun Array.toCStringArray(autofreeScope: AutofreeScope): CPointer< @ExperimentalForeignApi -private class U16CString(val chars: CharArray): CValues() { +private class U16CString(val chars: CharArray) : CValues() { override val size get() = 2 * (chars.size + 1) override val align get() = 2 @@ -593,13 +605,9 @@ public fun CPointer.toKStringFromUtf16(): String { while (nativeBytes[length] != 0.toShort()) { ++length } - val chars = CharArray(length) - var index = 0 - while (index < length) { - chars[index] = nativeBytes[index].toInt().toChar() - ++index - } - return chars.concatToString() + return CharArray(length).apply { + nativeMemUtils.getCharArray(pointed, this, length) + }.concatToString() } /** @@ -644,7 +652,7 @@ public fun CPointer.toKStringFromUtf32(): String { */ @SinceKotlin("1.3") @ExperimentalForeignApi -public fun ByteArray.toKString() : String { +public fun ByteArray.toKString(): String { val realEndIndex = realEndIndex(this, 0, this.size) return decodeToString(0, realEndIndex) } @@ -667,7 +675,7 @@ public fun ByteArray.toKString( startIndex: Int = 0, endIndex: Int = this.size, throwOnInvalidSequence: Boolean = false -) : String { +): String { checkBoundsIndexes(startIndex, endIndex, this.size) val realEndIndex = realEndIndex(this, startIndex, endIndex) return decodeToString(startIndex, realEndIndex, throwOnInvalidSequence) @@ -696,7 +704,7 @@ public class MemScope : ArenaBase() { public val memScope: MemScope get() = this - public val CValues.ptr: CPointer + public val CValues.ptr: CPointer get() = this@ptr.getPointer(this@MemScope) } @@ -708,7 +716,7 @@ public class MemScope : ArenaBase() { */ @ExperimentalForeignApi @OptIn(kotlin.contracts.ExperimentalContracts::class) -public inline fun memScoped(block: MemScope.()->R): R { +public inline fun memScoped(block: MemScope.() -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } @@ -727,3 +735,43 @@ public fun COpaquePointer.readBytes(count: Int): ByteArray { nativeMemUtils.getByteArray(this.reinterpret().pointed, result, count) return result } + +@ExperimentalForeignApi +public fun setMemory(mem: NativePointed, value: Byte, size: Long) { + nativeMemUtils.memset(mem, value, size) +} + +@ExperimentalForeignApi +public fun copyMemory(destMem: NativePointed, srcMem: NativePointed, size: Long) { + nativeMemUtils.memcpy(destMem, srcMem, size) +} + +@ExperimentalForeignApi +public fun moveMemory(destMem: NativePointed, srcMem: NativePointed, size: Long) { + nativeMemUtils.memmove(destMem, srcMem, size) +} + +@ExperimentalForeignApi +public fun compareMemory(destMem: NativePointed, srcMem: NativePointed, size: Long): Int { + return nativeMemUtils.memcmp(destMem, srcMem, size) +} + +@ExperimentalForeignApi +public inline fun CPointer.setMemory(value: Byte, length: Int) { + nativeMemUtils.memset(pointed, value, length * sizeOf()) +} + +@ExperimentalForeignApi +public inline fun CPointer.copyMemory(dest: CPointer, length: Int) { + nativeMemUtils.memcpy(dest.pointed, pointed, length * sizeOf()) +} + +@ExperimentalForeignApi +public inline fun CPointer.moveMemory(dest: CPointer, length: Int) { + nativeMemUtils.memmove(dest.pointed, pointed, length * sizeOf()) +} + +@ExperimentalForeignApi +public inline fun CPointer.compareMemory(dest: CPointer, length: Int): Int { + return nativeMemUtils.memcmp(dest.pointed, pointed, length * sizeOf()) +} \ No newline at end of file diff --git a/kotlin-native/Interop/Runtime/src/native/kotlin/kotlinx/cinterop/NativeMem.kt b/kotlin-native/Interop/Runtime/src/native/kotlin/kotlinx/cinterop/NativeMem.kt index 96ed55f5dba63..9e247d29d377e 100644 --- a/kotlin-native/Interop/Runtime/src/native/kotlin/kotlinx/cinterop/NativeMem.kt +++ b/kotlin-native/Interop/Runtime/src/native/kotlin/kotlinx/cinterop/NativeMem.kt @@ -3,13 +3,12 @@ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ @file:OptIn(ExperimentalForeignApi::class) + package kotlinx.cinterop -import kotlin.native.* import kotlin.native.internal.GCUnsafeCall -import kotlin.native.internal.Intrinsic -import kotlin.native.internal.TypedIntrinsic import kotlin.native.internal.IntrinsicType +import kotlin.native.internal.TypedIntrinsic @PublishedApi internal inline val pointerSize: Int @@ -22,91 +21,119 @@ internal external fun getPointerSize(): Int // TODO: do not use singleton because it leads to init-check on any access. @PublishedApi internal object nativeMemUtils { - @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) external fun getByte(mem: NativePointed): Byte - @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) external fun putByte(mem: NativePointed, value: Byte) + @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) + external fun getByte(mem: NativePointed): Byte + + @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) + external fun putByte(mem: NativePointed, value: Byte) + + @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) + external fun getShort(mem: NativePointed): Short + + @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) + external fun putShort(mem: NativePointed, value: Short) + + @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) + external fun getInt(mem: NativePointed): Int + + @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) + external fun putInt(mem: NativePointed, value: Int) + + @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) + external fun getLong(mem: NativePointed): Long + + @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) + external fun putLong(mem: NativePointed, value: Long) + + @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) + external fun getFloat(mem: NativePointed): Float + + @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) + external fun putFloat(mem: NativePointed, value: Float) + + @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) + external fun getDouble(mem: NativePointed): Double + + @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) + external fun putDouble(mem: NativePointed, value: Double) - @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) external fun getShort(mem: NativePointed): Short - @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) external fun putShort(mem: NativePointed, value: Short) + @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) + external fun getNativePtr(mem: NativePointed): NativePtr - @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) external fun getInt(mem: NativePointed): Int - @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) external fun putInt(mem: NativePointed, value: Int) + @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) + external fun putNativePtr(mem: NativePointed, value: NativePtr) - @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) external fun getLong(mem: NativePointed): Long - @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) external fun putLong(mem: NativePointed, value: Long) + @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) + external fun getVector(mem: NativePointed): Vector128 - @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) external fun getFloat(mem: NativePointed): Float - @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) external fun putFloat(mem: NativePointed, value: Float) + @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) + external fun putVector(mem: NativePointed, value: Vector128) - @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) external fun getDouble(mem: NativePointed): Double - @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) external fun putDouble(mem: NativePointed, value: Double) + @TypedIntrinsic(IntrinsicType.INTEROP_SET_MEMORY) + external fun memset(mem: NativePointed, value: Byte, size: Int) - @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) external fun getNativePtr(mem: NativePointed): NativePtr - @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) external fun putNativePtr(mem: NativePointed, value: NativePtr) + @TypedIntrinsic(IntrinsicType.INTEROP_SET_MEMORY) + external fun memset(mem: NativePointed, value: Byte, size: Long) - @TypedIntrinsic(IntrinsicType.INTEROP_READ_PRIMITIVE) external fun getVector(mem: NativePointed): Vector128 - @TypedIntrinsic(IntrinsicType.INTEROP_WRITE_PRIMITIVE) external fun putVector(mem: NativePointed, value: Vector128) + @TypedIntrinsic(IntrinsicType.INTEROP_COPY_MEMORY) + external fun memcpy(dstMem: NativePointed, srcMem: NativePointed, size: Int) + + @TypedIntrinsic(IntrinsicType.INTEROP_COPY_MEMORY) + external fun memcpy(dstMem: NativePointed, srcMem: NativePointed, size: Long) + + @TypedIntrinsic(IntrinsicType.INTEROP_MOVE_MEMORY) + external fun memmove(dstMem: NativePointed, srcMem: NativePointed, size: Int) + + @TypedIntrinsic(IntrinsicType.INTEROP_MOVE_MEMORY) + external fun memmove(dstMem: NativePointed, srcMem: NativePointed, size: Long) + + @TypedIntrinsic(IntrinsicType.INTEROP_COMPARE_MEMORY) + external fun memcmp(memA: NativePointed, memB: NativePointed, size: Long): Int - // TODO: optimize fun getByteArray(source: NativePointed, dest: ByteArray, length: Int) { - val sourceArray = source.reinterpret().ptr - var index = 0 - while (index < length) { - dest[index] = sourceArray[index] - ++index + dest.usePinned { pinnedDest -> + memcpy(pinnedDest.addressOf(0).pointed, source, length) } } - // TODO: optimize fun putByteArray(source: ByteArray, dest: NativePointed, length: Int) { - val destArray = dest.reinterpret().ptr - var index = 0 - while (index < length) { - destArray[index] = source[index] - ++index + source.usePinned { pinnedSrc -> + memcpy(dest, pinnedSrc.addressOf(0).pointed, length) } } - // TODO: optimize fun getCharArray(source: NativePointed, dest: CharArray, length: Int) { - val sourceArray = source.reinterpret().ptr - var index = 0 - while (index < length) { - dest[index] = sourceArray[index].toInt().toChar() - ++index + dest.usePinned { pinnedDest -> + memcpy(pinnedDest.addressOf(0).pointed, source, length * Char.SIZE_BYTES) } } - // TODO: optimize fun putCharArray(source: CharArray, dest: NativePointed, length: Int) { - val destArray = dest.reinterpret().ptr - var index = 0 - while (index < length) { - destArray[index] = source[index].code.toShort() - ++index + source.usePinned { pinnedSrc -> + memcpy(dest, pinnedSrc.addressOf(0).pointed, length * Char.SIZE_BYTES) } } - // TODO: optimize - fun zeroMemory(dest: NativePointed, length: Int): Unit { - val destArray = dest.reinterpret().ptr - var index = 0 - while (index < length) { - destArray[index] = 0 - ++index + fun getFloatArray(source: NativePointed, dest: FloatArray, length: Int) { + dest.usePinned { pinnedDest -> + memcpy(pinnedDest.addressOf(0).pointed, source, length * Float.SIZE_BYTES) } } - // TODO: optimize - fun copyMemory(dest: NativePointed, length: Int, src: NativePointed): Unit { - val destArray = dest.reinterpret().ptr - val srcArray = src.reinterpret().ptr - var index = 0 - while (index < length) { - destArray[index] = srcArray[index] - ++index + fun putFloatArray(source: FloatArray, dest: NativePointed, length: Int) { + source.usePinned { pinnedSrc -> + memcpy(dest, pinnedSrc.addressOf(0).pointed, length * Float.SIZE_BYTES) } } + fun zeroMemory(dest: NativePointed, length: Int): Unit { + memset(dest, 0, length) + } + + fun copyMemory(dest: NativePointed, length: Int, src: NativePointed): Unit { + memcpy(dest, src, length) + } + fun alloc(size: Long, align: Int): NativePointed { return interpretOpaquePointed(allocRaw(size, align)) } @@ -136,13 +163,9 @@ public fun CPointer.toKStringFromUtf16(): String { while (nativeBytes[length] != 0.toUShort()) { ++length } - val chars = kotlin.CharArray(length) - var index = 0 - while (index < length) { - chars[index] = nativeBytes[index].toInt().toChar() - ++index - } - return chars.concatToString() + return CharArray(length).apply { + nativeMemUtils.getCharArray(pointed, this, length) + }.concatToString() } @ExperimentalForeignApi diff --git a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/CodeGenerator.kt b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/CodeGenerator.kt index 14283f822528a..c7256f5724646 100644 --- a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/CodeGenerator.kt +++ b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/CodeGenerator.kt @@ -6,7 +6,10 @@ package org.jetbrains.kotlin.backend.konan.llvm -import kotlinx.cinterop.* +import kotlinx.cinterop.cValuesOf +import kotlinx.cinterop.memScoped +import kotlinx.cinterop.toCValues +import kotlinx.cinterop.toKString import llvm.* import org.jetbrains.kotlin.backend.konan.NativeGenerationState import org.jetbrains.kotlin.backend.konan.RuntimeNames @@ -19,11 +22,34 @@ import org.jetbrains.kotlin.backend.konan.llvm.ThreadState.Native import org.jetbrains.kotlin.backend.konan.llvm.ThreadState.Runnable import org.jetbrains.kotlin.backend.konan.llvm.objc.ObjCDataGenerator import org.jetbrains.kotlin.ir.UNDEFINED_OFFSET -import org.jetbrains.kotlin.ir.declarations.* +import org.jetbrains.kotlin.ir.declarations.IrClass +import org.jetbrains.kotlin.ir.declarations.IrEnumEntry +import org.jetbrains.kotlin.ir.declarations.IrSimpleFunction import org.jetbrains.kotlin.ir.objcinterop.* import org.jetbrains.kotlin.ir.util.* import org.jetbrains.kotlin.konan.ForeignExceptionMode import org.jetbrains.kotlin.konan.target.CompilerOutputKind +import kotlin.collections.Collection +import kotlin.collections.List +import kotlin.collections.MutableMap +import kotlin.collections.component1 +import kotlin.collections.component2 +import kotlin.collections.emptyList +import kotlin.collections.forEach +import kotlin.collections.getOrPut +import kotlin.collections.indices +import kotlin.collections.isNotEmpty +import kotlin.collections.listOf +import kotlin.collections.map +import kotlin.collections.mapOf +import kotlin.collections.mutableListOf +import kotlin.collections.mutableMapOf +import kotlin.collections.plus +import kotlin.collections.plusAssign +import kotlin.collections.set +import kotlin.collections.sortedBy +import kotlin.collections.toList +import kotlin.collections.toTypedArray internal class CodeGenerator(override val generationState: NativeGenerationState) : ContextUtils { @@ -41,6 +67,7 @@ internal class CodeGenerator(override val generationState: NativeGenerationState val intPtrType = LLVMIntPtrTypeInContext(llvm.llvmContext, llvmTargetData)!! internal val immOneIntPtrType = LLVMConstInt(intPtrType, 1, 1)!! internal val immThreeIntPtrType = LLVMConstInt(intPtrType, 3, 1)!! + // Keep in sync with OBJECT_TAG_MASK in C++. internal val immTypeInfoMask = LLVMConstNot(LLVMConstInt(intPtrType, 3, 0)!!)!! @@ -69,6 +96,7 @@ internal class CodeGenerator(override val generationState: NativeGenerationState else -> null } + fun LLVMTypeRef.sizeInBits() = LLVMSizeOfTypeInBits(llvmTargetData, this).toInt() } internal sealed class ExceptionHandler { @@ -96,10 +124,10 @@ internal enum class ThreadState { Native, Runnable } -val LLVMValueRef.name:String? +val LLVMValueRef.name: String? get() = LLVMGetValueName(this)?.toKString() -val LLVMValueRef.isConst:Boolean +val LLVMValueRef.isConst: Boolean get() = (LLVMIsConstant(this) == 1) @@ -151,7 +179,7 @@ internal inline fun generateFunction( switchToRunnable: Boolean = false, needSafePoint: Boolean = true, code: FunctionGenerationContext.() -> Unit -) : LlvmCallable { +): LlvmCallable { val function = codegen.addFunction(functionProto) val functionGenerationContext = DefaultFunctionGenerationContext( function, @@ -174,7 +202,7 @@ internal inline fun generateFunctionNoRuntime( codegen: CodeGenerator, functionProto: LlvmFunctionProto, code: FunctionGenerationContext.() -> Unit, -) : LlvmCallable { +): LlvmCallable { val function = codegen.addFunction(functionProto) val functionGenerationContext = DefaultFunctionGenerationContext(function, codegen, null, null, switchToRunnable = false, needSafePoint = true) @@ -304,7 +332,7 @@ internal object VirtualTablesLookup { } internal fun IrSimpleFunction.findOverriddenMethodOfAny() = - resolveFakeOverride().takeIf { it?.parentClassOrNull?.isAny() == true } + resolveFakeOverride().takeIf { it?.parentClassOrNull?.isAny() == true } /* * Special trampoline function to call actual virtual implementation. This helps with reducing @@ -391,8 +419,14 @@ internal class StackLocalsManagerImpl( val bbInitStackLocals: LLVMBasicBlockRef ) : StackLocalsManager { private var scopeDepth = 0 - override fun enterScope() { scopeDepth++ } - override fun exitScope() { scopeDepth-- } + override fun enterScope() { + scopeDepth++ + } + + override fun exitScope() { + scopeDepth-- + } + private fun isRootScope() = scopeDepth == 0 private class StackLocal( @@ -516,7 +550,7 @@ internal class StackLocalsManagerImpl( } else { val info = llvmDeclarations.forClass(stackLocal.irClass) val type = info.bodyType.llvmBodyType - for ((fieldSymbol, fieldIndex) in info.fieldIndices.entries.sortedBy{ e -> e.value }) { + for ((fieldSymbol, fieldIndex) in info.fieldIndices.entries.sortedBy { e -> e.value }) { if (fieldSymbol.owner.type.binaryTypeIsReference()) { val fieldPtr = structGep(type, stackLocal.stackAllocationPtr, fieldIndex, "") @@ -607,6 +641,7 @@ internal abstract class FunctionGenerationContext( (LLVMStoreSizeOfType(llvmTargetData, runtime.frameOverlayType) / runtime.pointerSize).toInt() private var slotCount = frameOverlaySlotCount private var localAllocs = 0 + // TODO: remove if exactly unused. //private var arenaSlot: LLVMValueRef? = null private val slotToVariableLocation = mutableMapOf() @@ -766,7 +801,7 @@ internal abstract class FunctionGenerationContext( isVolatile: Boolean = false, alignment: Int? = null) { require(alignment == null || alignment % runtime.pointerAlignment == 0) if (onStack) { - require(!isVolatile) { "Stack ref update can't be volatile"} + require(!isVolatile) { "Stack ref update can't be volatile" } call(llvm.updateStackRefFunction, listOf(address, value)) } else { if (isVolatile) { @@ -796,11 +831,31 @@ internal abstract class FunctionGenerationContext( llvm.int32(size), llvm.int1(isVolatile))) - fun call(llvmCallable: LlvmCallable, args: List, - resultLifetime: Lifetime = Lifetime.IRRELEVANT, - exceptionHandler: ExceptionHandler = ExceptionHandler.None, - verbatim: Boolean = false, - resultSlot: LLVMValueRef? = null, + fun memset(pointer: LLVMValueRef, value: LLVMValueRef, size: LLVMValueRef, isVolatile: Boolean = false) = with(codegen) { + call(if (size.type.sizeInBits() == 32) llvm.memsetFunction else llvm.memsetFunction64, + listOf(pointer, value, size, llvm.int1(isVolatile))) + } + + fun memcpy(dstPointer: LLVMValueRef, srcPointer: LLVMValueRef, size: LLVMValueRef, isVolatile: Boolean = false) = with(codegen) { + call(if (size.type.sizeInBits() == 32) llvm.memcpyFunction else llvm.memcpyFunction64, + listOf(dstPointer, srcPointer, size, llvm.int1(isVolatile))) + } + + fun memmove(dstPointer: LLVMValueRef, srcPointer: LLVMValueRef, size: LLVMValueRef, isVolatile: Boolean = false) = with(codegen) { + call(if (size.type.sizeInBits() == 32) llvm.memmoveFunction else llvm.memmoveFunction64, + listOf(dstPointer, srcPointer, size, llvm.int1(isVolatile))) + } + + fun memcmp(memA: LLVMValueRef, memB: LLVMValueRef, size: LLVMValueRef): LLVMValueRef = with(codegen) { + return call(llvm.memcmpFunction, listOf(memA, memB, size)) + } + + fun call( + llvmCallable: LlvmCallable, args: List, + resultLifetime: Lifetime = Lifetime.IRRELEVANT, + exceptionHandler: ExceptionHandler = ExceptionHandler.None, + verbatim: Boolean = false, + resultSlot: LLVMValueRef? = null, ): LLVMValueRef { val callArgs = if (verbatim || !llvmCallable.returnsObjectType) { args @@ -887,21 +942,21 @@ internal abstract class FunctionGenerationContext( } } - fun allocInstance(typeInfo: LLVMValueRef, lifetime: Lifetime, resultSlot: LLVMValueRef?) : LLVMValueRef = + fun allocInstance(typeInfo: LLVMValueRef, lifetime: Lifetime, resultSlot: LLVMValueRef?): LLVMValueRef = call(llvm.allocInstanceFunction, listOf(typeInfo), lifetime, resultSlot = resultSlot) fun allocInstance(irClass: IrClass, lifetime: Lifetime, resultSlot: LLVMValueRef?) = - if (lifetime == Lifetime.STACK) - stackLocalsManager.alloc(irClass) - else - allocInstance(codegen.typeInfoForAllocation(irClass), lifetime, resultSlot) + if (lifetime == Lifetime.STACK) + stackLocalsManager.alloc(irClass) + else + allocInstance(codegen.typeInfoForAllocation(irClass), lifetime, resultSlot) fun allocArray( - irClass: IrClass, - count: LLVMValueRef, - lifetime: Lifetime, - exceptionHandler: ExceptionHandler, - resultSlot: LLVMValueRef? = null + irClass: IrClass, + count: LLVMValueRef, + lifetime: Lifetime, + exceptionHandler: ExceptionHandler, + resultSlot: LLVMValueRef? = null ): LLVMValueRef { val typeInfo = codegen.typeInfoValue(irClass) return if (lifetime == Lifetime.STACK) { @@ -1341,7 +1396,7 @@ internal abstract class FunctionGenerationContext( internal fun prologue() { if (function.returnsObjectType) { - returnSlot = function.param( function.numParams - 1) + returnSlot = function.param(function.numParams - 1) } positionAtEnd(localsInitBb) @@ -1372,13 +1427,13 @@ internal abstract class FunctionGenerationContext( val expr = longArrayOf(DwarfOp.DW_OP_plus_uconst.value, runtime.pointerSize * slot.toLong()).toCValues() DIInsertDeclaration( - builder = generationState.debugInfo.builder, - value = slots, + builder = generationState.debugInfo.builder, + value = slots, localVariable = variable.localVariable, - location = variable.location, - bb = prologueBb, - expr = expr, - exprCount = 2) + location = variable.location, + bb = prologueBb, + expr = expr, + exprCount = 2) } } br(localsInitBb) @@ -1530,7 +1585,7 @@ internal abstract class FunctionGenerationContext( fun positionAtEnd(block: LLVMBasicBlockRef) { LLVMPositionBuilderAtEnd(builder, block) - basicBlockToLastLocation[block]?.let{ debugLocation(it.start, it.end) } + basicBlockToLastLocation[block]?.let { debugLocation(it.start, it.end) } val lastInstr = LLVMGetLastInstruction(block) isAfterTerminator = lastInstr != null && (LLVMIsATerminatorInst(lastInstr) != null) } diff --git a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/ContextUtils.kt b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/ContextUtils.kt index 22fd746ec8d43..6756835dfe6ef 100644 --- a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/ContextUtils.kt +++ b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/ContextUtils.kt @@ -9,7 +9,6 @@ import kotlinx.cinterop.toCValues import kotlinx.cinterop.toKString import llvm.* import org.jetbrains.kotlin.backend.konan.* -import org.jetbrains.kotlin.backend.konan.Context import org.jetbrains.kotlin.backend.konan.lower.originalConstructor import org.jetbrains.kotlin.descriptors.konan.* import org.jetbrains.kotlin.ir.declarations.* @@ -260,7 +259,7 @@ internal class InitializersGenerationState { var scopeState = ScopeInitializersGenerationState() - fun reset(newState: ScopeInitializersGenerationState) : ScopeInitializersGenerationState { + fun reset(newState: ScopeInitializersGenerationState): ScopeInitializersGenerationState { val t = scopeState scopeState = newState return t @@ -348,14 +347,37 @@ internal class CodegenLlvmHelpers(private val generationState: NativeGenerationS return LlvmCallable(functionType, returnsObjectType, function, attributesCopier) } - private fun importMemset(): LlvmCallable { - val functionType = functionType(voidType, false, int8PtrType, int8Type, int32Type, int1Type) + private fun importMemset(bitness: Int = 32): LlvmCallable { + val sizeType = if (bitness == 32) int32Type else int64Type + val functionType = functionType(voidType, false, int8PtrType, int8Type, sizeType, int1Type) return llvmIntrinsic( - if (context.config.useLlvmOpaquePointers) "llvm.memset.p0.i32" - else "llvm.memset.p0i8.i32", + if (context.config.useLlvmOpaquePointers) "llvm.memset.p0.i$bitness" + else "llvm.memset.p0i8.i$bitness", functionType) } + private fun importMemcpy(bitness: Int = 32): LlvmCallable { + val sizeType = if (bitness == 32) int32Type else int64Type + val functionType = functionType(voidType, false, int8PtrType, int8PtrType, sizeType, int1Type) + return llvmIntrinsic( + if (context.config.useLlvmOpaquePointers) "llvm.memcpy.p0.p0.i$bitness" + else "llvm.memcpy.p0i8.p0i8.i$bitness", + functionType) + } + + private fun importMemmove(bitness: Int = 32): LlvmCallable { + val sizeType = if (bitness == 32) int32Type else int64Type + val functionType = functionType(voidType, false, int8PtrType, int8PtrType, sizeType, int1Type) + return llvmIntrinsic( + if (context.config.useLlvmOpaquePointers) "llvm.memmove.p0.p0.i$bitness" + else "llvm.memmove.p0i8.p0i8.i$bitness", + functionType) + } + + private fun importMemcmp(): LlvmCallable { + return llvmIntrinsic("memcmp", functionType(int32Type, false, int8PtrType, int8PtrType, intptrType), "nounwind") + } + private fun llvmIntrinsic(name: String, type: LLVMTypeRef, vararg attributes: String): LlvmCallable { val result = LLVMAddFunction(module, name, type)!! attributes.forEach { @@ -555,7 +577,13 @@ internal class CodegenLlvmHelpers(private val generationState: NativeGenerationS val kImmInt32Zero by lazy { int32(0) } val kImmInt32One by lazy { int32(1) } + val memcpyFunction = importMemcpy() + val memcpyFunction64 = importMemcpy(64) + val memmoveFunction = importMemmove() + val memmoveFunction64 = importMemmove(64) val memsetFunction = importMemset() + val memsetFunction64 = importMemset(64) + val memcmpFunction = importMemcmp() val llvmTrap = llvmIntrinsic( "llvm.trap", diff --git a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/IntrinsicGenerator.kt b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/IntrinsicGenerator.kt index 95999b7e587b6..5dd17e8eed0cf 100644 --- a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/IntrinsicGenerator.kt +++ b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/IntrinsicGenerator.kt @@ -1,10 +1,11 @@ package org.jetbrains.kotlin.backend.konan.llvm import llvm.* -import org.jetbrains.kotlin.backend.konan.* +import org.jetbrains.kotlin.backend.konan.KonanFqNames +import org.jetbrains.kotlin.backend.konan.RuntimeNames +import org.jetbrains.kotlin.backend.konan.binaryTypeIsReference import org.jetbrains.kotlin.backend.konan.ir.isConstantConstructorIntrinsic import org.jetbrains.kotlin.backend.konan.ir.isTypedIntrinsic -import org.jetbrains.kotlin.ir.util.getAnnotationStringValue import org.jetbrains.kotlin.backend.konan.llvm.objc.genObjCSelector import org.jetbrains.kotlin.backend.konan.lower.volatileField import org.jetbrains.kotlin.backend.konan.reportCompilationError @@ -17,6 +18,7 @@ import org.jetbrains.kotlin.ir.objcinterop.isExternalObjCClass import org.jetbrains.kotlin.ir.symbols.IrConstructorSymbol import org.jetbrains.kotlin.ir.types.* import org.jetbrains.kotlin.ir.util.findAnnotation +import org.jetbrains.kotlin.ir.util.getAnnotationStringValue import org.jetbrains.kotlin.ir.util.isInterface internal enum class IntrinsicType { @@ -50,6 +52,7 @@ internal enum class IntrinsicType { EXTRACT_ELEMENT, ARE_EQUAL_BY_VALUE, IEEE_754_EQUALS, + // OBJC OBJC_GET_MESSENGER, OBJC_GET_MESSENGER_STRET, @@ -57,6 +60,7 @@ internal enum class IntrinsicType { OBJC_CREATE_SUPER_STRUCT, OBJC_INIT_BY, OBJC_GET_SELECTOR, + // Other CREATE_UNINITIALIZED_INSTANCE, CREATE_UNINITIALIZED_ARRAY, @@ -65,15 +69,18 @@ internal enum class IntrinsicType { INIT_INSTANCE, IS_SUBTYPE, THE_UNIT_INSTANCE, + // Enums ENUM_VALUES, ENUM_VALUE_OF, ENUM_ENTRIES, + // Coroutines GET_CONTINUATION, RETURN_IF_SUSPENDED, SAVE_COROUTINE_STATE, RESTORE_COROUTINE_STATE, + // Interop INTEROP_READ_BITS, INTEROP_WRITE_BITS, @@ -90,8 +97,14 @@ internal enum class IntrinsicType { INTEROP_NARROW, INTEROP_STATIC_C_FUNCTION, INTEROP_FUNPTR_INVOKE, + INTEROP_SET_MEMORY, + INTEROP_COPY_MEMORY, + INTEROP_MOVE_MEMORY, + INTEROP_COMPARE_MEMORY, + // Worker WORKER_EXECUTE, + // Atomics ATOMIC_GET_FIELD, ATOMIC_SET_FIELD, @@ -103,6 +116,7 @@ internal enum class IntrinsicType { COMPARE_AND_EXCHANGE, GET_AND_SET, GET_AND_ADD, + // Atomic arrays ATOMIC_GET_ARRAY_ELEMENT, ATOMIC_SET_ARRAY_ELEMENT, @@ -170,9 +184,6 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv private val IrCall.llvmReturnType: LLVMTypeRef get() = codegen.getLlvmFunctionReturnType(symbol.owner).llvmType - - private fun LLVMTypeRef.sizeInBits() = LLVMSizeOfTypeInBits(codegen.llvmTargetData, this).toInt() - /** * Some intrinsics have to be processed before evaluation of their arguments. * So this method looks at [callSite] and if it is call to "special" intrinsic @@ -262,6 +273,10 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv IntrinsicType.COMPARE_AND_SET_ARRAY_ELEMENT -> emitCompareAndSetArrayElement(callSite, args) IntrinsicType.GET_AND_SET_ARRAY_ELEMENT -> emitGetAndSetArrayElement(callSite, args, resultSlot) IntrinsicType.GET_AND_ADD_ARRAY_ELEMENT -> emitGetAndAddArrayElement(callSite, args) + IntrinsicType.INTEROP_SET_MEMORY -> emitMemset(callSite, args) + IntrinsicType.INTEROP_COPY_MEMORY -> emitMemcpy(callSite, args) + IntrinsicType.INTEROP_MOVE_MEMORY -> emitMemmove(callSite, args) + IntrinsicType.INTEROP_COMPARE_MEMORY -> emitMemcmp(callSite, args) IntrinsicType.GET_CONTINUATION, IntrinsicType.RETURN_IF_SUSPENDED, IntrinsicType.SAVE_COROUTINE_STATE, @@ -289,7 +304,7 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv reportSpecialIntrinsic(intrinsicType) } - fun evaluateConstantConstructorFields(constant: IrConstantObject, args: List) : List { + fun evaluateConstantConstructorFields(constant: IrConstantObject, args: List): List { return when (val intrinsicType = getConstantConstructorIntrinsicType(constant.constructor)) { ConstantConstructorIntrinsicType.KCLASS_IMPL -> { require(args.isEmpty()) @@ -325,7 +340,7 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv args.single() // cmpxcgh llvm instruction return pair. idnex is index of required element of this pair - enum class CmpExchangeMode(val index:Int) { + enum class CmpExchangeMode(val index: Int) { SWAP(0), SET(1) } @@ -377,12 +392,15 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv private fun FunctionGenerationContext.emitCompareAndSet(callSite: IrCall, args: List): LLVMValueRef { return emitCmpExchange(callSite, transformArgsForVolatile(callSite, args), CmpExchangeMode.SET, null) } + private fun FunctionGenerationContext.emitCompareAndSwap(callSite: IrCall, args: List, resultSlot: LLVMValueRef?): LLVMValueRef { return emitCmpExchange(callSite, transformArgsForVolatile(callSite, args), CmpExchangeMode.SWAP, resultSlot) } + private fun FunctionGenerationContext.emitGetAndSet(callSite: IrCall, args: List, resultSlot: LLVMValueRef?): LLVMValueRef { return emitAtomicRMW(callSite, transformArgsForVolatile(callSite, args), LLVMAtomicRMWBinOp.LLVMAtomicRMWBinOpXchg, resultSlot) } + private fun FunctionGenerationContext.emitGetAndAdd(callSite: IrCall, args: List): LLVMValueRef { return emitAtomicRMW(callSite, transformArgsForVolatile(callSite, args), LLVMAtomicRMWBinOp.LLVMAtomicRMWBinOpAdd, null) } @@ -437,7 +455,7 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv llvm.kNullInt8Ptr private fun FunctionGenerationContext.emitNativePtrPlusLong(args: List): LLVMValueRef = - gep(llvm.int8Type, args[0], args[1]) + gep(llvm.int8Type, args[0], args[1]) private fun FunctionGenerationContext.emitNativePtrToLong(callSite: IrCall, args: List): LLVMValueRef { val intPtrValue = ptrToInt(args.single(), codegen.intPtrType) @@ -613,11 +631,11 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv private fun FunctionGenerationContext.emitAreEqualByValue(args: List): LLVMValueRef { val (first, second) = args - assert (first.type == second.type) { "Types are different: '${llvmtype2string(first.type)}' and '${llvmtype2string(second.type)}'" } + assert(first.type == second.type) { "Types are different: '${llvmtype2string(first.type)}' and '${llvmtype2string(second.type)}'" } return when (val typeKind = LLVMGetTypeKind(first.type)) { LLVMTypeKind.LLVMFloatTypeKind, LLVMTypeKind.LLVMDoubleTypeKind, - LLVMTypeKind.LLVMVectorTypeKind -> { + LLVMTypeKind.LLVMVectorTypeKind -> with(codegen) { // TODO LLVM API does not provide guarantee for LLVMIntTypeInContext availability for longer types; consider meaningful diag message instead of NPE val integerType = LLVMIntTypeInContext(llvm.llvmContext, first.type.sizeInBits())!! icmpEq(bitcast(integerType, first), bitcast(integerType, second)) @@ -629,11 +647,11 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv private fun FunctionGenerationContext.emitIeee754Equals(args: List): LLVMValueRef { val (first, second) = args - assert (first.type == second.type) - { "Types are different: '${llvmtype2string(first.type)}' and '${llvmtype2string(second.type)}'" } + assert(first.type == second.type) + { "Types are different: '${llvmtype2string(first.type)}' and '${llvmtype2string(second.type)}'" } val type = LLVMGetTypeKind(first.type) - assert (type == LLVMTypeKind.LLVMFloatTypeKind || type == LLVMTypeKind.LLVMDoubleTypeKind) - { "Should be of floating point kind, not: '${llvmtype2string(first.type)}'"} + assert(type == LLVMTypeKind.LLVMFloatTypeKind || type == LLVMTypeKind.LLVMDoubleTypeKind) + { "Should be of floating point kind, not: '${llvmtype2string(first.type)}'" } return fcmpEq(first, second) } @@ -647,7 +665,7 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv assert(callSite.llvmReturnType.isVectorElementType() && vectorSize % elementSize == 0 - ) { "Invalid vector element type ${LLVMGetTypeKind(callSite.llvmReturnType)}"} + ) { "Invalid vector element type ${LLVMGetTypeKind(callSite.llvmReturnType)}" } val elementCount = vectorSize / elementSize emitThrowIfOOB(index, llvm.int32((elementCount))) @@ -798,7 +816,7 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv type: LLVMTypeRef, retZeroOnOverflow: Boolean, nonOverflowValue: () -> LLVMValueRef - ): LLVMValueRef { + ): LLVMValueRef = with(codegen) { val minValue = when (val sizeInBits = type.sizeInBits()) { 32 -> LLVMConstInt(type, Int.MIN_VALUE.toLong(), 1)!! 64 -> LLVMConstInt(type, Long.MIN_VALUE, 1)!! @@ -808,7 +826,7 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv val minusOne = LLVMConstInt(type, -1, 1)!! val overflowValue = if (retZeroOnOverflow) Zero(type).llvm else minValue - return ifThenElse(and(icmpEq(dividend, minValue), icmpEq(divisor, minusOne)), overflowValue, nonOverflowValue) + ifThenElse(and(icmpEq(dividend, minValue), icmpEq(divisor, minusOne)), overflowValue, nonOverflowValue) } private fun FunctionGenerationContext.emitInc(args: List): LLVMValueRef { @@ -868,4 +886,28 @@ internal class IntrinsicGenerator(private val environment: IntrinsicGeneratorEnv llvm.doubleType -> llvm.float64(value.toDouble()) else -> context.reportCompilationError("Unexpected primitive type: $type") } -} + + private fun FunctionGenerationContext.emitMemcpy(callSite: IrCall, args: List): LLVMValueRef { + assert(args.size == 4) { "Wrong number of arguments for memcpy intrinsic" } + val (_, dst, src, size) = args + return memcpy(dst, src, size) + } + + private fun FunctionGenerationContext.emitMemmove(callSite: IrCall, args: List): LLVMValueRef { + assert(args.size == 4) { "Wrong number of arguments for memmove intrinsic" } + val (_, dst, src, size) = args + return memmove(dst, src, size) + } + + private fun FunctionGenerationContext.emitMemset(callSite: IrCall, args: List): LLVMValueRef { + assert(args.size == 4) { "Wrong number of arguments for memset intrinsic" } + val (_, address, value, size) = args + return memset(address, value, size) + } + + private fun FunctionGenerationContext.emitMemcmp(callSite: IrCall, args: List): LLVMValueRef { + assert(args.size == 4) { "Wrong number of arguments for memcmp intrinsic" } + val (_, memA, memB, size) = args + return memcmp(memA, memB, size) + } +} \ No newline at end of file diff --git a/kotlin-native/runtime/src/main/kotlin/kotlin/native/internal/IntrinsicType.kt b/kotlin-native/runtime/src/main/kotlin/kotlin/native/internal/IntrinsicType.kt index af1f9d900b673..5b91339b40488 100644 --- a/kotlin-native/runtime/src/main/kotlin/kotlin/native/internal/IntrinsicType.kt +++ b/kotlin-native/runtime/src/main/kotlin/kotlin/native/internal/IntrinsicType.kt @@ -78,6 +78,10 @@ internal class IntrinsicType { const val INTEROP_NARROW = "INTEROP_NARROW" const val INTEROP_STATIC_C_FUNCTION = "INTEROP_STATIC_C_FUNCTION" const val INTEROP_FUNPTR_INVOKE = "INTEROP_FUNPTR_INVOKE" + const val INTEROP_COPY_MEMORY = "INTEROP_COPY_MEMORY" + const val INTEROP_MOVE_MEMORY = "INTEROP_MOVE_MEMORY" + const val INTEROP_SET_MEMORY = "INTEROP_SET_MEMORY" + const val INTEROP_COMPARE_MEMORY = "INTEROP_COMPARE_MEMORY" // Worker const val WORKER_EXECUTE = "WORKER_EXECUTE" diff --git a/native/native.tests/testData/codegen/cinterop/kt73565.kt b/native/native.tests/testData/codegen/cinterop/kt73565.kt new file mode 100644 index 0000000000000..d9afe7a23fe49 --- /dev/null +++ b/native/native.tests/testData/codegen/cinterop/kt73565.kt @@ -0,0 +1,54 @@ +@file:OptIn(kotlinx.cinterop.ExperimentalForeignApi::class) + +import kotlin.test.* +import kotlinx.cinterop.* + +fun box(): String { + // memcmp + val data1 = floatArrayOf(1F, 2F, 3F, 4F, 5F, 6F) + val data2 = floatArrayOf(1F, 2F, 3F, 4F, 5F, 6F) + val data3 = floatArrayOf(1F, 2F, 3F, 4F, 4F, 6F) + + data1.usePinned { pinnedMemA -> + data2.usePinned { pinnedMemB -> + if (pinnedMemA.addressOf(0).compareMemory(pinnedMemB.addressOf(0), data1.size) != 0) { + return "memcmp test failed for equal data" + } + } + data3.usePinned { pinnedMemB -> + if (pinnedMemA.addressOf(0).compareMemory(pinnedMemB.addressOf(0), data1.size) == 0) { + return "memcmp test failed for unequal data" + } + } + } + + // memset + data1.usePinned { pinned -> + pinned.addressOf(0).setMemory(0, data1.size) + } + if (data1.contentEquals(data2)) { + return "memset test failed" + } + + // memcpy + data1.usePinned { pinnedDest -> + data2.usePinned { pinnedSrc -> + pinnedSrc.addressOf(0).copyMemory(pinnedDest.addressOf(0), data1.size) + } + } + if (!data1.contentEquals(data2)) { + return "memcpy test failed" + } + + // memmove + data1.usePinned { pinned -> + val destAddress = interpretCPointer(pinned.addressOf(0).rawValue + (Float.SIZE_BYTES.toLong() shl 1)) + ?: return "memmove test failed with overlapping data" + pinned.addressOf(0).moveMemory(destAddress, 2) + } + if (!data1.contentEquals(floatArrayOf(1F, 2F, 1F, 2F, 5F, 6F))) { + return "memmove test failed with overlapping data" + } + + return "OK" +} \ No newline at end of file diff --git a/native/native.tests/tests-gen/org/jetbrains/kotlin/konan/test/blackbox/FirNativeCodegenLocalTestGenerated.java b/native/native.tests/tests-gen/org/jetbrains/kotlin/konan/test/blackbox/FirNativeCodegenLocalTestGenerated.java index d076301570191..f9b8d30749b0b 100644 --- a/native/native.tests/tests-gen/org/jetbrains/kotlin/konan/test/blackbox/FirNativeCodegenLocalTestGenerated.java +++ b/native/native.tests/tests-gen/org/jetbrains/kotlin/konan/test/blackbox/FirNativeCodegenLocalTestGenerated.java @@ -261,6 +261,12 @@ public void testKt72552() { runTest("native/native.tests/testData/codegen/cinterop/kt72552.kt"); } + @Test + @TestMetadata("kt73565.kt") + public void testKt73565() { + runTest("native/native.tests/testData/codegen/cinterop/kt73565.kt"); + } + @Test @TestMetadata("libiconv.kt") public void testLibiconv() { diff --git a/native/native.tests/tests-gen/org/jetbrains/kotlin/konan/test/blackbox/NativeCodegenLocalTestGenerated.java b/native/native.tests/tests-gen/org/jetbrains/kotlin/konan/test/blackbox/NativeCodegenLocalTestGenerated.java index ab5ab3ea07c86..c6439e4bec124 100644 --- a/native/native.tests/tests-gen/org/jetbrains/kotlin/konan/test/blackbox/NativeCodegenLocalTestGenerated.java +++ b/native/native.tests/tests-gen/org/jetbrains/kotlin/konan/test/blackbox/NativeCodegenLocalTestGenerated.java @@ -251,6 +251,12 @@ public void testKt72552() { runTest("native/native.tests/testData/codegen/cinterop/kt72552.kt"); } + @Test + @TestMetadata("kt73565.kt") + public void testKt73565() { + runTest("native/native.tests/testData/codegen/cinterop/kt73565.kt"); + } + @Test @TestMetadata("libiconv.kt") public void testLibiconv() { From 1c634a78031b585754072557c08617cd86dcc954 Mon Sep 17 00:00:00 2001 From: KitsuneAlex Date: Sun, 8 Dec 2024 00:26:44 +0100 Subject: [PATCH 2/2] Rename CPointer extension, add missing target header from box test --- .../src/main/kotlin/kotlinx/cinterop/Utils.kt | 8 ++++---- .../testData/codegen/cinterop/kt73565.kt | 15 +++++++-------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/kotlin-native/Interop/Runtime/src/main/kotlin/kotlinx/cinterop/Utils.kt b/kotlin-native/Interop/Runtime/src/main/kotlin/kotlinx/cinterop/Utils.kt index 4fb42003ada8a..3eb1a19061663 100644 --- a/kotlin-native/Interop/Runtime/src/main/kotlin/kotlinx/cinterop/Utils.kt +++ b/kotlin-native/Interop/Runtime/src/main/kotlin/kotlinx/cinterop/Utils.kt @@ -757,21 +757,21 @@ public fun compareMemory(destMem: NativePointed, srcMem: NativePointed, size: Lo } @ExperimentalForeignApi -public inline fun CPointer.setMemory(value: Byte, length: Int) { +public inline fun CPointer.setBlock(value: Byte, length: Int) { nativeMemUtils.memset(pointed, value, length * sizeOf()) } @ExperimentalForeignApi -public inline fun CPointer.copyMemory(dest: CPointer, length: Int) { +public inline fun CPointer.copyTo(dest: CPointer, length: Int) { nativeMemUtils.memcpy(dest.pointed, pointed, length * sizeOf()) } @ExperimentalForeignApi -public inline fun CPointer.moveMemory(dest: CPointer, length: Int) { +public inline fun CPointer.moveTo(dest: CPointer, length: Int) { nativeMemUtils.memmove(dest.pointed, pointed, length * sizeOf()) } @ExperimentalForeignApi -public inline fun CPointer.compareMemory(dest: CPointer, length: Int): Int { +public inline fun CPointer.compareBlock(dest: CPointer, length: Int): Int { return nativeMemUtils.memcmp(dest.pointed, pointed, length * sizeOf()) } \ No newline at end of file diff --git a/native/native.tests/testData/codegen/cinterop/kt73565.kt b/native/native.tests/testData/codegen/cinterop/kt73565.kt index d9afe7a23fe49..0ce2f907ba6af 100644 --- a/native/native.tests/testData/codegen/cinterop/kt73565.kt +++ b/native/native.tests/testData/codegen/cinterop/kt73565.kt @@ -1,3 +1,4 @@ +// TARGET_BACKEND: NATIVE @file:OptIn(kotlinx.cinterop.ExperimentalForeignApi::class) import kotlin.test.* @@ -11,12 +12,12 @@ fun box(): String { data1.usePinned { pinnedMemA -> data2.usePinned { pinnedMemB -> - if (pinnedMemA.addressOf(0).compareMemory(pinnedMemB.addressOf(0), data1.size) != 0) { + if (pinnedMemA.addressOf(0).compareBlock(pinnedMemB.addressOf(0), data1.size) != 0) { return "memcmp test failed for equal data" } } data3.usePinned { pinnedMemB -> - if (pinnedMemA.addressOf(0).compareMemory(pinnedMemB.addressOf(0), data1.size) == 0) { + if (pinnedMemA.addressOf(0).compareBlock(pinnedMemB.addressOf(0), data1.size) == 0) { return "memcmp test failed for unequal data" } } @@ -24,7 +25,7 @@ fun box(): String { // memset data1.usePinned { pinned -> - pinned.addressOf(0).setMemory(0, data1.size) + pinned.addressOf(0).setBlock(0, data1.size) } if (data1.contentEquals(data2)) { return "memset test failed" @@ -33,7 +34,7 @@ fun box(): String { // memcpy data1.usePinned { pinnedDest -> data2.usePinned { pinnedSrc -> - pinnedSrc.addressOf(0).copyMemory(pinnedDest.addressOf(0), data1.size) + pinnedSrc.addressOf(0).copyTo(pinnedDest.addressOf(0), data1.size) } } if (!data1.contentEquals(data2)) { @@ -42,11 +43,9 @@ fun box(): String { // memmove data1.usePinned { pinned -> - val destAddress = interpretCPointer(pinned.addressOf(0).rawValue + (Float.SIZE_BYTES.toLong() shl 1)) - ?: return "memmove test failed with overlapping data" - pinned.addressOf(0).moveMemory(destAddress, 2) + pinned.addressOf(0).moveTo(pinned.addressOf(1), 2) } - if (!data1.contentEquals(floatArrayOf(1F, 2F, 1F, 2F, 5F, 6F))) { + if (!data1.contentEquals(floatArrayOf(1F, 1F, 2F, 4F, 5F, 6F))) { return "memmove test failed with overlapping data" }