Skip to content

Instantly share code, notes, and snippets.

@kuruczgy
Last active October 29, 2023 15:05
Show Gist options
  • Save kuruczgy/c6bfd5d4efa4e3bbf56e4a87c9deaf56 to your computer and use it in GitHub Desktop.
Save kuruczgy/c6bfd5d4efa4e3bbf56e4a87c9deaf56 to your computer and use it in GitHub Desktop.
Generated Lean Vulkan bindings
#include <stdlib.h>
#include <vulkan/vulkan_core.h>
#include <lean/lean.h>
#include <stdio.h> // for debugging
#define Pointer void*
LEAN_EXPORT lean_obj_res glue_vkCreateInstance(b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_pApplicationInfo = lean_ctor_get(pCreateInfo, 1);
_Bool is_some_pCreateInfo_pApplicationInfo = !lean_is_scalar(pCreateInfo_pApplicationInfo);
VkApplicationInfo um_pCreateInfo_pApplicationInfo;
if (is_some_pCreateInfo_pApplicationInfo) {
lean_object *some_pCreateInfo_pApplicationInfo = lean_ctor_get(pCreateInfo_pApplicationInfo, 0);
lean_object *some_pCreateInfo_pApplicationInfo_pApplicationName = lean_ctor_get(some_pCreateInfo_pApplicationInfo, 0);
lean_object *some_pCreateInfo_pApplicationInfo_pEngineName = lean_ctor_get(some_pCreateInfo_pApplicationInfo, 1);
struct VkApplicationInfo um_some_pCreateInfo_pApplicationInfo = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pNext = 0,
.pApplicationName = (char*)lean_string_cstr(some_pCreateInfo_pApplicationInfo_pApplicationName),
.applicationVersion = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pApplicationInfo) + lean_ctor_num_objs(some_pCreateInfo_pApplicationInfo)) + 0),
.pEngineName = (char*)lean_string_cstr(some_pCreateInfo_pApplicationInfo_pEngineName),
.engineVersion = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pApplicationInfo) + lean_ctor_num_objs(some_pCreateInfo_pApplicationInfo)) + 4),
.apiVersion = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pApplicationInfo) + lean_ctor_num_objs(some_pCreateInfo_pApplicationInfo)) + 8),
};
um_pCreateInfo_pApplicationInfo = um_some_pCreateInfo_pApplicationInfo;
}
lean_object *pCreateInfo_ppEnabledLayerNames = lean_ctor_get(pCreateInfo, 2);
size_t len_pCreateInfo_ppEnabledLayerNames = lean_array_size(pCreateInfo_ppEnabledLayerNames);
char** um_pCreateInfo_ppEnabledLayerNames = calloc(len_pCreateInfo_ppEnabledLayerNames, sizeof(char*));
for (size_t i = 0; i < len_pCreateInfo_ppEnabledLayerNames; ++i) {
lean_object *i_pCreateInfo_ppEnabledLayerNames = lean_array_cptr(pCreateInfo_ppEnabledLayerNames)[i];
um_pCreateInfo_ppEnabledLayerNames[i] = (char*)lean_string_cstr(i_pCreateInfo_ppEnabledLayerNames);
}
lean_object *pCreateInfo_ppEnabledExtensionNames = lean_ctor_get(pCreateInfo, 3);
size_t len_pCreateInfo_ppEnabledExtensionNames = lean_array_size(pCreateInfo_ppEnabledExtensionNames);
char** um_pCreateInfo_ppEnabledExtensionNames = calloc(len_pCreateInfo_ppEnabledExtensionNames, sizeof(char*));
for (size_t i = 0; i < len_pCreateInfo_ppEnabledExtensionNames; ++i) {
lean_object *i_pCreateInfo_ppEnabledExtensionNames = lean_array_cptr(pCreateInfo_ppEnabledExtensionNames)[i];
um_pCreateInfo_ppEnabledExtensionNames[i] = (char*)lean_string_cstr(i_pCreateInfo_ppEnabledExtensionNames);
}
struct VkInstanceCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pNext = 0,
.flags = (VkInstanceCreateFlags)(VkInstanceCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.pApplicationInfo = (is_some_pCreateInfo_pApplicationInfo ? &um_pCreateInfo_pApplicationInfo : NULL),
.enabledLayerCount = len_pCreateInfo_ppEnabledLayerNames,
.ppEnabledLayerNames = um_pCreateInfo_ppEnabledLayerNames,
.enabledExtensionCount = len_pCreateInfo_ppEnabledExtensionNames,
.ppEnabledExtensionNames = um_pCreateInfo_ppEnabledExtensionNames,
};
VkInstance out_pInstance;
VkResult out_ret = vkCreateInstance(&um_pCreateInfo, NULL, &out_pInstance);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pInstance);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyInstance(b_lean_obj_arg instance, b_lean_obj_arg w) {
vkDestroyInstance((VkInstance)lean_unbox_uint64(instance), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkEnumeratePhysicalDevices(b_lean_obj_arg instance, b_lean_obj_arg w) {
uint32_t len_out_pPhysicalDevices;
VkPhysicalDevice* out_pPhysicalDevices;
// get length pPhysicalDeviceCount of pPhysicalDevices
vkEnumeratePhysicalDevices((VkInstance)lean_unbox_uint64(instance), &len_out_pPhysicalDevices, NULL);
out_pPhysicalDevices = calloc(len_out_pPhysicalDevices, sizeof(VkPhysicalDevice));
VkResult out_ret = vkEnumeratePhysicalDevices((VkInstance)lean_unbox_uint64(instance), &len_out_pPhysicalDevices, out_pPhysicalDevices);
lean_object *m_out_pPhysicalDevices = lean_alloc_array(len_out_pPhysicalDevices, len_out_pPhysicalDevices);
for (size_t i = 0; i < len_out_pPhysicalDevices; ++i) {
VkPhysicalDevice i_out_pPhysicalDevices = out_pPhysicalDevices[i];
lean_array_cptr(m_out_pPhysicalDevices)[i] = lean_box_uint64((uint64_t)i_out_pPhysicalDevices);
}
lean_object *temp, *tuple = m_out_pPhysicalDevices;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceFeatures(b_lean_obj_arg physicalDevice, b_lean_obj_arg w) {
VkPhysicalDeviceFeatures out_pFeatures;
vkGetPhysicalDeviceFeatures((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), &out_pFeatures);
lean_object *m_out_pFeatures = lean_alloc_ctor(0, 0, 220);
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 0) = out_pFeatures.robustBufferAccess;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 4) = out_pFeatures.fullDrawIndexUint32;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 8) = out_pFeatures.imageCubeArray;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 12) = out_pFeatures.independentBlend;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 16) = out_pFeatures.geometryShader;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 20) = out_pFeatures.tessellationShader;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 24) = out_pFeatures.sampleRateShading;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 28) = out_pFeatures.dualSrcBlend;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 32) = out_pFeatures.logicOp;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 36) = out_pFeatures.multiDrawIndirect;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 40) = out_pFeatures.drawIndirectFirstInstance;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 44) = out_pFeatures.depthClamp;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 48) = out_pFeatures.depthBiasClamp;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 52) = out_pFeatures.fillModeNonSolid;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 56) = out_pFeatures.depthBounds;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 60) = out_pFeatures.wideLines;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 64) = out_pFeatures.largePoints;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 68) = out_pFeatures.alphaToOne;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 72) = out_pFeatures.multiViewport;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 76) = out_pFeatures.samplerAnisotropy;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 80) = out_pFeatures.textureCompressionETC2;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 84) = out_pFeatures.textureCompressionASTC_LDR;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 88) = out_pFeatures.textureCompressionBC;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 92) = out_pFeatures.occlusionQueryPrecise;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 96) = out_pFeatures.pipelineStatisticsQuery;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 100) = out_pFeatures.vertexPipelineStoresAndAtomics;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 104) = out_pFeatures.fragmentStoresAndAtomics;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 108) = out_pFeatures.shaderTessellationAndGeometryPointSize;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 112) = out_pFeatures.shaderImageGatherExtended;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 116) = out_pFeatures.shaderStorageImageExtendedFormats;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 120) = out_pFeatures.shaderStorageImageMultisample;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 124) = out_pFeatures.shaderStorageImageReadWithoutFormat;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 128) = out_pFeatures.shaderStorageImageWriteWithoutFormat;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 132) = out_pFeatures.shaderUniformBufferArrayDynamicIndexing;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 136) = out_pFeatures.shaderSampledImageArrayDynamicIndexing;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 140) = out_pFeatures.shaderStorageBufferArrayDynamicIndexing;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 144) = out_pFeatures.shaderStorageImageArrayDynamicIndexing;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 148) = out_pFeatures.shaderClipDistance;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 152) = out_pFeatures.shaderCullDistance;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 156) = out_pFeatures.shaderFloat64;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 160) = out_pFeatures.shaderInt64;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 164) = out_pFeatures.shaderInt16;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 168) = out_pFeatures.shaderResourceResidency;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 172) = out_pFeatures.shaderResourceMinLod;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 176) = out_pFeatures.sparseBinding;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 180) = out_pFeatures.sparseResidencyBuffer;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 184) = out_pFeatures.sparseResidencyImage2D;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 188) = out_pFeatures.sparseResidencyImage3D;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 192) = out_pFeatures.sparseResidency2Samples;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 196) = out_pFeatures.sparseResidency4Samples;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 200) = out_pFeatures.sparseResidency8Samples;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 204) = out_pFeatures.sparseResidency16Samples;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 208) = out_pFeatures.sparseResidencyAliased;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 212) = out_pFeatures.variableMultisampleRate;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pFeatures) + 216) = out_pFeatures.inheritedQueries;
lean_object *temp, *tuple = m_out_pFeatures;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceFormatProperties(b_lean_obj_arg physicalDevice, VkFormat format, b_lean_obj_arg w) {
VkFormatProperties out_pFormatProperties;
vkGetPhysicalDeviceFormatProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), format, &out_pFormatProperties);
lean_object *m_out_pFormatProperties = lean_alloc_ctor(0, 3, 0);
VkFormatFeatureFlags out_pFormatProperties_linearTilingFeatures = out_pFormatProperties.linearTilingFeatures;
lean_ctor_set(m_out_pFormatProperties, 0, lean_box_uint32((uint32_t)out_pFormatProperties_linearTilingFeatures));
VkFormatFeatureFlags out_pFormatProperties_optimalTilingFeatures = out_pFormatProperties.optimalTilingFeatures;
lean_ctor_set(m_out_pFormatProperties, 1, lean_box_uint32((uint32_t)out_pFormatProperties_optimalTilingFeatures));
VkFormatFeatureFlags out_pFormatProperties_bufferFeatures = out_pFormatProperties.bufferFeatures;
lean_ctor_set(m_out_pFormatProperties, 2, lean_box_uint32((uint32_t)out_pFormatProperties_bufferFeatures));
lean_object *temp, *tuple = m_out_pFormatProperties;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceImageFormatProperties(b_lean_obj_arg physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, b_lean_obj_arg w) {
VkImageFormatProperties out_pImageFormatProperties;
VkResult out_ret = vkGetPhysicalDeviceImageFormatProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), format, type, tiling, usage, flags, &out_pImageFormatProperties);
lean_object *m_out_pImageFormatProperties = lean_alloc_ctor(0, 2, 16);
VkExtent3D out_pImageFormatProperties_maxExtent = out_pImageFormatProperties.maxExtent;
lean_object *m_out_pImageFormatProperties_maxExtent = lean_alloc_ctor(0, 0, 12);
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pImageFormatProperties_maxExtent) + 0) = out_pImageFormatProperties_maxExtent.width;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pImageFormatProperties_maxExtent) + 4) = out_pImageFormatProperties_maxExtent.height;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pImageFormatProperties_maxExtent) + 8) = out_pImageFormatProperties_maxExtent.depth;
lean_ctor_set(m_out_pImageFormatProperties, 0, m_out_pImageFormatProperties_maxExtent);
VkSampleCountFlags out_pImageFormatProperties_sampleCounts = out_pImageFormatProperties.sampleCounts;
lean_ctor_set(m_out_pImageFormatProperties, 1, lean_box_uint32((uint32_t)out_pImageFormatProperties_sampleCounts));
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pImageFormatProperties) + 0) = out_pImageFormatProperties.maxMipLevels;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pImageFormatProperties) + 4) = out_pImageFormatProperties.maxArrayLayers;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pImageFormatProperties) + 8) = out_pImageFormatProperties.maxResourceSize;
lean_object *temp, *tuple = m_out_pImageFormatProperties;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceProperties(b_lean_obj_arg physicalDevice, b_lean_obj_arg w) {
VkPhysicalDeviceProperties out_pProperties;
vkGetPhysicalDeviceProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), &out_pProperties);
lean_object *m_out_pProperties = lean_alloc_ctor(0, 3, 16);
VkPhysicalDeviceType out_pProperties_deviceType = out_pProperties.deviceType;
lean_ctor_set(m_out_pProperties, 0, lean_box_uint32((uint32_t)out_pProperties_deviceType));
VkPhysicalDeviceLimits out_pProperties_limits = out_pProperties.limits;
lean_object *m_out_pProperties_limits = lean_alloc_ctor(0, 15, 428);
uint32_t* out_pProperties_limits_maxComputeWorkGroupCount = out_pProperties_limits.maxComputeWorkGroupCount;
lean_object *m_out_pProperties_limits_maxComputeWorkGroupCount = lean_alloc_array(3, 3);
for (size_t i = 0; i < 3; ++i) {
uint32_t i_out_pProperties_limits_maxComputeWorkGroupCount = out_pProperties_limits_maxComputeWorkGroupCount[i];
lean_array_cptr(m_out_pProperties_limits_maxComputeWorkGroupCount)[i] = lean_box_uint32((uint32_t)i_out_pProperties_limits_maxComputeWorkGroupCount);
}
lean_ctor_set(m_out_pProperties_limits, 0, m_out_pProperties_limits_maxComputeWorkGroupCount);
uint32_t* out_pProperties_limits_maxComputeWorkGroupSize = out_pProperties_limits.maxComputeWorkGroupSize;
lean_object *m_out_pProperties_limits_maxComputeWorkGroupSize = lean_alloc_array(3, 3);
for (size_t i = 0; i < 3; ++i) {
uint32_t i_out_pProperties_limits_maxComputeWorkGroupSize = out_pProperties_limits_maxComputeWorkGroupSize[i];
lean_array_cptr(m_out_pProperties_limits_maxComputeWorkGroupSize)[i] = lean_box_uint32((uint32_t)i_out_pProperties_limits_maxComputeWorkGroupSize);
}
lean_ctor_set(m_out_pProperties_limits, 1, m_out_pProperties_limits_maxComputeWorkGroupSize);
uint32_t* out_pProperties_limits_maxViewportDimensions = out_pProperties_limits.maxViewportDimensions;
lean_object *m_out_pProperties_limits_maxViewportDimensions = lean_alloc_array(2, 2);
for (size_t i = 0; i < 2; ++i) {
uint32_t i_out_pProperties_limits_maxViewportDimensions = out_pProperties_limits_maxViewportDimensions[i];
lean_array_cptr(m_out_pProperties_limits_maxViewportDimensions)[i] = lean_box_uint32((uint32_t)i_out_pProperties_limits_maxViewportDimensions);
}
lean_ctor_set(m_out_pProperties_limits, 2, m_out_pProperties_limits_maxViewportDimensions);
float* out_pProperties_limits_viewportBoundsRange = out_pProperties_limits.viewportBoundsRange;
lean_object *m_out_pProperties_limits_viewportBoundsRange = lean_alloc_sarray(8, 0, 2);
for (size_t i = 0; i < 2; ++i) {
lean_float_array_push(m_out_pProperties_limits_viewportBoundsRange, out_pProperties_limits_viewportBoundsRange[i]);
}
lean_ctor_set(m_out_pProperties_limits, 3, m_out_pProperties_limits_viewportBoundsRange);
VkSampleCountFlags out_pProperties_limits_framebufferColorSampleCounts = out_pProperties_limits.framebufferColorSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 4, lean_box_uint32((uint32_t)out_pProperties_limits_framebufferColorSampleCounts));
VkSampleCountFlags out_pProperties_limits_framebufferDepthSampleCounts = out_pProperties_limits.framebufferDepthSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 5, lean_box_uint32((uint32_t)out_pProperties_limits_framebufferDepthSampleCounts));
VkSampleCountFlags out_pProperties_limits_framebufferStencilSampleCounts = out_pProperties_limits.framebufferStencilSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 6, lean_box_uint32((uint32_t)out_pProperties_limits_framebufferStencilSampleCounts));
VkSampleCountFlags out_pProperties_limits_framebufferNoAttachmentsSampleCounts = out_pProperties_limits.framebufferNoAttachmentsSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 7, lean_box_uint32((uint32_t)out_pProperties_limits_framebufferNoAttachmentsSampleCounts));
VkSampleCountFlags out_pProperties_limits_sampledImageColorSampleCounts = out_pProperties_limits.sampledImageColorSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 8, lean_box_uint32((uint32_t)out_pProperties_limits_sampledImageColorSampleCounts));
VkSampleCountFlags out_pProperties_limits_sampledImageIntegerSampleCounts = out_pProperties_limits.sampledImageIntegerSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 9, lean_box_uint32((uint32_t)out_pProperties_limits_sampledImageIntegerSampleCounts));
VkSampleCountFlags out_pProperties_limits_sampledImageDepthSampleCounts = out_pProperties_limits.sampledImageDepthSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 10, lean_box_uint32((uint32_t)out_pProperties_limits_sampledImageDepthSampleCounts));
VkSampleCountFlags out_pProperties_limits_sampledImageStencilSampleCounts = out_pProperties_limits.sampledImageStencilSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 11, lean_box_uint32((uint32_t)out_pProperties_limits_sampledImageStencilSampleCounts));
VkSampleCountFlags out_pProperties_limits_storageImageSampleCounts = out_pProperties_limits.storageImageSampleCounts;
lean_ctor_set(m_out_pProperties_limits, 12, lean_box_uint32((uint32_t)out_pProperties_limits_storageImageSampleCounts));
float* out_pProperties_limits_pointSizeRange = out_pProperties_limits.pointSizeRange;
lean_object *m_out_pProperties_limits_pointSizeRange = lean_alloc_sarray(8, 0, 2);
for (size_t i = 0; i < 2; ++i) {
lean_float_array_push(m_out_pProperties_limits_pointSizeRange, out_pProperties_limits_pointSizeRange[i]);
}
lean_ctor_set(m_out_pProperties_limits, 13, m_out_pProperties_limits_pointSizeRange);
float* out_pProperties_limits_lineWidthRange = out_pProperties_limits.lineWidthRange;
lean_object *m_out_pProperties_limits_lineWidthRange = lean_alloc_sarray(8, 0, 2);
for (size_t i = 0; i < 2; ++i) {
lean_float_array_push(m_out_pProperties_limits_lineWidthRange, out_pProperties_limits_lineWidthRange[i]);
}
lean_ctor_set(m_out_pProperties_limits, 14, m_out_pProperties_limits_lineWidthRange);
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 0) = out_pProperties_limits.maxImageDimension1D;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 4) = out_pProperties_limits.maxImageDimension2D;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 8) = out_pProperties_limits.maxImageDimension3D;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 12) = out_pProperties_limits.maxImageDimensionCube;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 16) = out_pProperties_limits.maxImageArrayLayers;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 20) = out_pProperties_limits.maxTexelBufferElements;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 24) = out_pProperties_limits.maxUniformBufferRange;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 28) = out_pProperties_limits.maxStorageBufferRange;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 32) = out_pProperties_limits.maxPushConstantsSize;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 36) = out_pProperties_limits.maxMemoryAllocationCount;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 40) = out_pProperties_limits.maxSamplerAllocationCount;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 44) = out_pProperties_limits.bufferImageGranularity;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 52) = out_pProperties_limits.sparseAddressSpaceSize;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 60) = out_pProperties_limits.maxBoundDescriptorSets;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 64) = out_pProperties_limits.maxPerStageDescriptorSamplers;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 68) = out_pProperties_limits.maxPerStageDescriptorUniformBuffers;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 72) = out_pProperties_limits.maxPerStageDescriptorStorageBuffers;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 76) = out_pProperties_limits.maxPerStageDescriptorSampledImages;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 80) = out_pProperties_limits.maxPerStageDescriptorStorageImages;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 84) = out_pProperties_limits.maxPerStageDescriptorInputAttachments;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 88) = out_pProperties_limits.maxPerStageResources;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 92) = out_pProperties_limits.maxDescriptorSetSamplers;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 96) = out_pProperties_limits.maxDescriptorSetUniformBuffers;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 100) = out_pProperties_limits.maxDescriptorSetUniformBuffersDynamic;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 104) = out_pProperties_limits.maxDescriptorSetStorageBuffers;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 108) = out_pProperties_limits.maxDescriptorSetStorageBuffersDynamic;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 112) = out_pProperties_limits.maxDescriptorSetSampledImages;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 116) = out_pProperties_limits.maxDescriptorSetStorageImages;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 120) = out_pProperties_limits.maxDescriptorSetInputAttachments;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 124) = out_pProperties_limits.maxVertexInputAttributes;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 128) = out_pProperties_limits.maxVertexInputBindings;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 132) = out_pProperties_limits.maxVertexInputAttributeOffset;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 136) = out_pProperties_limits.maxVertexInputBindingStride;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 140) = out_pProperties_limits.maxVertexOutputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 144) = out_pProperties_limits.maxTessellationGenerationLevel;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 148) = out_pProperties_limits.maxTessellationPatchSize;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 152) = out_pProperties_limits.maxTessellationControlPerVertexInputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 156) = out_pProperties_limits.maxTessellationControlPerVertexOutputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 160) = out_pProperties_limits.maxTessellationControlPerPatchOutputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 164) = out_pProperties_limits.maxTessellationControlTotalOutputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 168) = out_pProperties_limits.maxTessellationEvaluationInputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 172) = out_pProperties_limits.maxTessellationEvaluationOutputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 176) = out_pProperties_limits.maxGeometryShaderInvocations;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 180) = out_pProperties_limits.maxGeometryInputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 184) = out_pProperties_limits.maxGeometryOutputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 188) = out_pProperties_limits.maxGeometryOutputVertices;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 192) = out_pProperties_limits.maxGeometryTotalOutputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 196) = out_pProperties_limits.maxFragmentInputComponents;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 200) = out_pProperties_limits.maxFragmentOutputAttachments;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 204) = out_pProperties_limits.maxFragmentDualSrcAttachments;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 208) = out_pProperties_limits.maxFragmentCombinedOutputResources;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 212) = out_pProperties_limits.maxComputeSharedMemorySize;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 216) = out_pProperties_limits.maxComputeWorkGroupInvocations;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 220) = out_pProperties_limits.subPixelPrecisionBits;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 224) = out_pProperties_limits.subTexelPrecisionBits;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 228) = out_pProperties_limits.mipmapPrecisionBits;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 232) = out_pProperties_limits.maxDrawIndexedIndexValue;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 236) = out_pProperties_limits.maxDrawIndirectCount;
*(float*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 240) = out_pProperties_limits.maxSamplerLodBias;
*(float*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 248) = out_pProperties_limits.maxSamplerAnisotropy;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 256) = out_pProperties_limits.maxViewports;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 260) = out_pProperties_limits.viewportSubPixelBits;
*(uint64_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 264) = out_pProperties_limits.minMemoryMapAlignment;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 272) = out_pProperties_limits.minTexelBufferOffsetAlignment;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 280) = out_pProperties_limits.minUniformBufferOffsetAlignment;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 288) = out_pProperties_limits.minStorageBufferOffsetAlignment;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 296) = out_pProperties_limits.minTexelOffset;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 300) = out_pProperties_limits.maxTexelOffset;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 304) = out_pProperties_limits.minTexelGatherOffset;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 308) = out_pProperties_limits.maxTexelGatherOffset;
*(float*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 312) = out_pProperties_limits.minInterpolationOffset;
*(float*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 320) = out_pProperties_limits.maxInterpolationOffset;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 328) = out_pProperties_limits.subPixelInterpolationOffsetBits;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 332) = out_pProperties_limits.maxFramebufferWidth;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 336) = out_pProperties_limits.maxFramebufferHeight;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 340) = out_pProperties_limits.maxFramebufferLayers;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 344) = out_pProperties_limits.maxColorAttachments;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 348) = out_pProperties_limits.maxSampleMaskWords;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 352) = out_pProperties_limits.timestampComputeAndGraphics;
*(float*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 356) = out_pProperties_limits.timestampPeriod;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 364) = out_pProperties_limits.maxClipDistances;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 368) = out_pProperties_limits.maxCullDistances;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 372) = out_pProperties_limits.maxCombinedClipAndCullDistances;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 376) = out_pProperties_limits.discreteQueuePriorities;
*(float*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 380) = out_pProperties_limits.pointSizeGranularity;
*(float*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 388) = out_pProperties_limits.lineWidthGranularity;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 396) = out_pProperties_limits.strictLines;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 400) = out_pProperties_limits.standardSampleLocations;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 404) = out_pProperties_limits.optimalBufferCopyOffsetAlignment;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 412) = out_pProperties_limits.optimalBufferCopyRowPitchAlignment;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pProperties_limits) + 420) = out_pProperties_limits.nonCoherentAtomSize;
lean_ctor_set(m_out_pProperties, 1, m_out_pProperties_limits);
VkPhysicalDeviceSparseProperties out_pProperties_sparseProperties = out_pProperties.sparseProperties;
lean_object *m_out_pProperties_sparseProperties = lean_alloc_ctor(0, 0, 20);
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pProperties_sparseProperties) + 0) = out_pProperties_sparseProperties.residencyStandard2DBlockShape;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pProperties_sparseProperties) + 4) = out_pProperties_sparseProperties.residencyStandard2DMultisampleBlockShape;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pProperties_sparseProperties) + 8) = out_pProperties_sparseProperties.residencyStandard3DBlockShape;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pProperties_sparseProperties) + 12) = out_pProperties_sparseProperties.residencyAlignedMipSize;
*(VkBool32*)(lean_ctor_scalar_cptr(m_out_pProperties_sparseProperties) + 16) = out_pProperties_sparseProperties.residencyNonResidentStrict;
lean_ctor_set(m_out_pProperties, 2, m_out_pProperties_sparseProperties);
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties) + 0) = out_pProperties.apiVersion;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties) + 4) = out_pProperties.driverVersion;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties) + 8) = out_pProperties.vendorID;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pProperties) + 12) = out_pProperties.deviceID;
lean_object *temp, *tuple = m_out_pProperties;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceQueueFamilyProperties(b_lean_obj_arg physicalDevice, b_lean_obj_arg w) {
uint32_t len_out_pQueueFamilyProperties;
VkQueueFamilyProperties* out_pQueueFamilyProperties;
// get length pQueueFamilyPropertyCount of pQueueFamilyProperties
vkGetPhysicalDeviceQueueFamilyProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), &len_out_pQueueFamilyProperties, NULL);
out_pQueueFamilyProperties = calloc(len_out_pQueueFamilyProperties, sizeof(VkQueueFamilyProperties));
vkGetPhysicalDeviceQueueFamilyProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), &len_out_pQueueFamilyProperties, out_pQueueFamilyProperties);
lean_object *m_out_pQueueFamilyProperties = lean_alloc_array(len_out_pQueueFamilyProperties, len_out_pQueueFamilyProperties);
for (size_t i = 0; i < len_out_pQueueFamilyProperties; ++i) {
VkQueueFamilyProperties i_out_pQueueFamilyProperties = out_pQueueFamilyProperties[i];
lean_object *m_i_out_pQueueFamilyProperties = lean_alloc_ctor(0, 2, 8);
VkQueueFlags i_out_pQueueFamilyProperties_queueFlags = i_out_pQueueFamilyProperties.queueFlags;
lean_ctor_set(m_i_out_pQueueFamilyProperties, 0, lean_box_uint32((uint32_t)i_out_pQueueFamilyProperties_queueFlags));
VkExtent3D i_out_pQueueFamilyProperties_minImageTransferGranularity = i_out_pQueueFamilyProperties.minImageTransferGranularity;
lean_object *m_i_out_pQueueFamilyProperties_minImageTransferGranularity = lean_alloc_ctor(0, 0, 12);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pQueueFamilyProperties_minImageTransferGranularity) + 0) = i_out_pQueueFamilyProperties_minImageTransferGranularity.width;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pQueueFamilyProperties_minImageTransferGranularity) + 4) = i_out_pQueueFamilyProperties_minImageTransferGranularity.height;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pQueueFamilyProperties_minImageTransferGranularity) + 8) = i_out_pQueueFamilyProperties_minImageTransferGranularity.depth;
lean_ctor_set(m_i_out_pQueueFamilyProperties, 1, m_i_out_pQueueFamilyProperties_minImageTransferGranularity);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pQueueFamilyProperties) + 0) = i_out_pQueueFamilyProperties.queueCount;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pQueueFamilyProperties) + 4) = i_out_pQueueFamilyProperties.timestampValidBits;
lean_array_cptr(m_out_pQueueFamilyProperties)[i] = m_i_out_pQueueFamilyProperties;
}
lean_object *temp, *tuple = m_out_pQueueFamilyProperties;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetInstanceProcAddr(b_lean_obj_arg instance, b_lean_obj_arg pName, b_lean_obj_arg w) {
vkGetInstanceProcAddr((VkInstance)lean_unbox_uint64(instance), (char*)lean_string_cstr(pName));
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkGetDeviceProcAddr(b_lean_obj_arg device, b_lean_obj_arg pName, b_lean_obj_arg w) {
vkGetDeviceProcAddr((VkDevice)lean_unbox_uint64(device), (char*)lean_string_cstr(pName));
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateDevice(b_lean_obj_arg physicalDevice, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_pQueueCreateInfos = lean_ctor_get(pCreateInfo, 0);
size_t len_pCreateInfo_pQueueCreateInfos = lean_array_size(pCreateInfo_pQueueCreateInfos);
VkDeviceQueueCreateInfo* um_pCreateInfo_pQueueCreateInfos = calloc(len_pCreateInfo_pQueueCreateInfos, sizeof(VkDeviceQueueCreateInfo));
for (size_t i = 0; i < len_pCreateInfo_pQueueCreateInfos; ++i) {
lean_object *i_pCreateInfo_pQueueCreateInfos = lean_array_cptr(pCreateInfo_pQueueCreateInfos)[i];
lean_object *i_pCreateInfo_pQueueCreateInfos_flags = lean_ctor_get(i_pCreateInfo_pQueueCreateInfos, 0);
lean_object *i_pCreateInfo_pQueueCreateInfos_pQueuePriorities = lean_ctor_get(i_pCreateInfo_pQueueCreateInfos, 1);
size_t len_i_pCreateInfo_pQueueCreateInfos_pQueuePriorities = lean_sarray_size(i_pCreateInfo_pQueueCreateInfos_pQueuePriorities);
float* um_i_pCreateInfo_pQueueCreateInfos_pQueuePriorities = calloc(len_i_pCreateInfo_pQueueCreateInfos_pQueuePriorities, sizeof(float));
for (size_t i = 0; i < len_i_pCreateInfo_pQueueCreateInfos_pQueuePriorities; ++i) {
um_i_pCreateInfo_pQueueCreateInfos_pQueuePriorities[i] = lean_float_array_uget(i_pCreateInfo_pQueueCreateInfos_pQueuePriorities, i);
}
struct VkDeviceQueueCreateInfo um_i_pCreateInfo_pQueueCreateInfos = {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
.pNext = 0,
.flags = (VkDeviceQueueCreateFlags)(VkDeviceQueueCreateFlagBits)lean_unbox_uint32(i_pCreateInfo_pQueueCreateInfos_flags),
.queueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfo_pQueueCreateInfos) + lean_ctor_num_objs(i_pCreateInfo_pQueueCreateInfos)) + 0),
.queueCount = len_i_pCreateInfo_pQueueCreateInfos_pQueuePriorities,
.pQueuePriorities = um_i_pCreateInfo_pQueueCreateInfos_pQueuePriorities,
};
um_pCreateInfo_pQueueCreateInfos[i] = um_i_pCreateInfo_pQueueCreateInfos;
}
lean_object *pCreateInfo_ppEnabledLayerNames = lean_ctor_get(pCreateInfo, 1);
size_t len_pCreateInfo_ppEnabledLayerNames = lean_array_size(pCreateInfo_ppEnabledLayerNames);
char** um_pCreateInfo_ppEnabledLayerNames = calloc(len_pCreateInfo_ppEnabledLayerNames, sizeof(char*));
for (size_t i = 0; i < len_pCreateInfo_ppEnabledLayerNames; ++i) {
lean_object *i_pCreateInfo_ppEnabledLayerNames = lean_array_cptr(pCreateInfo_ppEnabledLayerNames)[i];
um_pCreateInfo_ppEnabledLayerNames[i] = (char*)lean_string_cstr(i_pCreateInfo_ppEnabledLayerNames);
}
lean_object *pCreateInfo_ppEnabledExtensionNames = lean_ctor_get(pCreateInfo, 2);
size_t len_pCreateInfo_ppEnabledExtensionNames = lean_array_size(pCreateInfo_ppEnabledExtensionNames);
char** um_pCreateInfo_ppEnabledExtensionNames = calloc(len_pCreateInfo_ppEnabledExtensionNames, sizeof(char*));
for (size_t i = 0; i < len_pCreateInfo_ppEnabledExtensionNames; ++i) {
lean_object *i_pCreateInfo_ppEnabledExtensionNames = lean_array_cptr(pCreateInfo_ppEnabledExtensionNames)[i];
um_pCreateInfo_ppEnabledExtensionNames[i] = (char*)lean_string_cstr(i_pCreateInfo_ppEnabledExtensionNames);
}
lean_object *pCreateInfo_pEnabledFeatures = lean_ctor_get(pCreateInfo, 3);
_Bool is_some_pCreateInfo_pEnabledFeatures = !lean_is_scalar(pCreateInfo_pEnabledFeatures);
VkPhysicalDeviceFeatures um_pCreateInfo_pEnabledFeatures;
if (is_some_pCreateInfo_pEnabledFeatures) {
lean_object *some_pCreateInfo_pEnabledFeatures = lean_ctor_get(pCreateInfo_pEnabledFeatures, 0);
struct VkPhysicalDeviceFeatures um_some_pCreateInfo_pEnabledFeatures = {
.robustBufferAccess = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 0),
.fullDrawIndexUint32 = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 4),
.imageCubeArray = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 8),
.independentBlend = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 12),
.geometryShader = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 16),
.tessellationShader = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 20),
.sampleRateShading = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 24),
.dualSrcBlend = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 28),
.logicOp = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 32),
.multiDrawIndirect = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 36),
.drawIndirectFirstInstance = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 40),
.depthClamp = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 44),
.depthBiasClamp = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 48),
.fillModeNonSolid = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 52),
.depthBounds = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 56),
.wideLines = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 60),
.largePoints = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 64),
.alphaToOne = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 68),
.multiViewport = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 72),
.samplerAnisotropy = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 76),
.textureCompressionETC2 = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 80),
.textureCompressionASTC_LDR = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 84),
.textureCompressionBC = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 88),
.occlusionQueryPrecise = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 92),
.pipelineStatisticsQuery = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 96),
.vertexPipelineStoresAndAtomics = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 100),
.fragmentStoresAndAtomics = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 104),
.shaderTessellationAndGeometryPointSize = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 108),
.shaderImageGatherExtended = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 112),
.shaderStorageImageExtendedFormats = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 116),
.shaderStorageImageMultisample = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 120),
.shaderStorageImageReadWithoutFormat = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 124),
.shaderStorageImageWriteWithoutFormat = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 128),
.shaderUniformBufferArrayDynamicIndexing = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 132),
.shaderSampledImageArrayDynamicIndexing = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 136),
.shaderStorageBufferArrayDynamicIndexing = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 140),
.shaderStorageImageArrayDynamicIndexing = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 144),
.shaderClipDistance = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 148),
.shaderCullDistance = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 152),
.shaderFloat64 = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 156),
.shaderInt64 = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 160),
.shaderInt16 = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 164),
.shaderResourceResidency = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 168),
.shaderResourceMinLod = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 172),
.sparseBinding = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 176),
.sparseResidencyBuffer = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 180),
.sparseResidencyImage2D = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 184),
.sparseResidencyImage3D = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 188),
.sparseResidency2Samples = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 192),
.sparseResidency4Samples = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 196),
.sparseResidency8Samples = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 200),
.sparseResidency16Samples = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 204),
.sparseResidencyAliased = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 208),
.variableMultisampleRate = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 212),
.inheritedQueries = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pCreateInfo_pEnabledFeatures) + lean_ctor_num_objs(some_pCreateInfo_pEnabledFeatures)) + 216),
};
um_pCreateInfo_pEnabledFeatures = um_some_pCreateInfo_pEnabledFeatures;
}
struct VkDeviceCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.pNext = 0,
.flags = (VkDeviceCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.queueCreateInfoCount = len_pCreateInfo_pQueueCreateInfos,
.pQueueCreateInfos = um_pCreateInfo_pQueueCreateInfos,
.enabledLayerCount = len_pCreateInfo_ppEnabledLayerNames,
.ppEnabledLayerNames = um_pCreateInfo_ppEnabledLayerNames,
.enabledExtensionCount = len_pCreateInfo_ppEnabledExtensionNames,
.ppEnabledExtensionNames = um_pCreateInfo_ppEnabledExtensionNames,
.pEnabledFeatures = (is_some_pCreateInfo_pEnabledFeatures ? &um_pCreateInfo_pEnabledFeatures : NULL),
};
VkDevice out_pDevice;
VkResult out_ret = vkCreateDevice((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), &um_pCreateInfo, NULL, &out_pDevice);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pDevice);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyDevice(b_lean_obj_arg device, b_lean_obj_arg w) {
vkDestroyDevice((VkDevice)lean_unbox_uint64(device), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkEnumerateInstanceExtensionProperties(b_lean_obj_arg pLayerName, b_lean_obj_arg w) {
uint32_t len_out_pProperties;
VkExtensionProperties* out_pProperties;
// get length pPropertyCount of pProperties
vkEnumerateInstanceExtensionProperties((char*)lean_string_cstr(pLayerName), &len_out_pProperties, NULL);
out_pProperties = calloc(len_out_pProperties, sizeof(VkExtensionProperties));
VkResult out_ret = vkEnumerateInstanceExtensionProperties((char*)lean_string_cstr(pLayerName), &len_out_pProperties, out_pProperties);
lean_object *m_out_pProperties = lean_alloc_array(len_out_pProperties, len_out_pProperties);
for (size_t i = 0; i < len_out_pProperties; ++i) {
VkExtensionProperties i_out_pProperties = out_pProperties[i];
lean_object *m_i_out_pProperties = lean_alloc_ctor(0, 0, 4);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties) + 0) = i_out_pProperties.specVersion;
lean_array_cptr(m_out_pProperties)[i] = m_i_out_pProperties;
}
lean_object *temp, *tuple = m_out_pProperties;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkEnumerateDeviceExtensionProperties(b_lean_obj_arg physicalDevice, b_lean_obj_arg pLayerName, b_lean_obj_arg w) {
uint32_t len_out_pProperties;
VkExtensionProperties* out_pProperties;
// get length pPropertyCount of pProperties
vkEnumerateDeviceExtensionProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), (char*)lean_string_cstr(pLayerName), &len_out_pProperties, NULL);
out_pProperties = calloc(len_out_pProperties, sizeof(VkExtensionProperties));
VkResult out_ret = vkEnumerateDeviceExtensionProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), (char*)lean_string_cstr(pLayerName), &len_out_pProperties, out_pProperties);
lean_object *m_out_pProperties = lean_alloc_array(len_out_pProperties, len_out_pProperties);
for (size_t i = 0; i < len_out_pProperties; ++i) {
VkExtensionProperties i_out_pProperties = out_pProperties[i];
lean_object *m_i_out_pProperties = lean_alloc_ctor(0, 0, 4);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties) + 0) = i_out_pProperties.specVersion;
lean_array_cptr(m_out_pProperties)[i] = m_i_out_pProperties;
}
lean_object *temp, *tuple = m_out_pProperties;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkEnumerateInstanceLayerProperties(b_lean_obj_arg w) {
uint32_t len_out_pProperties;
VkLayerProperties* out_pProperties;
// get length pPropertyCount of pProperties
vkEnumerateInstanceLayerProperties(&len_out_pProperties, NULL);
out_pProperties = calloc(len_out_pProperties, sizeof(VkLayerProperties));
VkResult out_ret = vkEnumerateInstanceLayerProperties(&len_out_pProperties, out_pProperties);
lean_object *m_out_pProperties = lean_alloc_array(len_out_pProperties, len_out_pProperties);
for (size_t i = 0; i < len_out_pProperties; ++i) {
VkLayerProperties i_out_pProperties = out_pProperties[i];
lean_object *m_i_out_pProperties = lean_alloc_ctor(0, 0, 8);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties) + 0) = i_out_pProperties.specVersion;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties) + 4) = i_out_pProperties.implementationVersion;
lean_array_cptr(m_out_pProperties)[i] = m_i_out_pProperties;
}
lean_object *temp, *tuple = m_out_pProperties;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkEnumerateDeviceLayerProperties(b_lean_obj_arg physicalDevice, b_lean_obj_arg w) {
uint32_t len_out_pProperties;
VkLayerProperties* out_pProperties;
// get length pPropertyCount of pProperties
vkEnumerateDeviceLayerProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), &len_out_pProperties, NULL);
out_pProperties = calloc(len_out_pProperties, sizeof(VkLayerProperties));
VkResult out_ret = vkEnumerateDeviceLayerProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), &len_out_pProperties, out_pProperties);
lean_object *m_out_pProperties = lean_alloc_array(len_out_pProperties, len_out_pProperties);
for (size_t i = 0; i < len_out_pProperties; ++i) {
VkLayerProperties i_out_pProperties = out_pProperties[i];
lean_object *m_i_out_pProperties = lean_alloc_ctor(0, 0, 8);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties) + 0) = i_out_pProperties.specVersion;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties) + 4) = i_out_pProperties.implementationVersion;
lean_array_cptr(m_out_pProperties)[i] = m_i_out_pProperties;
}
lean_object *temp, *tuple = m_out_pProperties;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetDeviceQueue(b_lean_obj_arg device, uint32_t queueFamilyIndex, uint32_t queueIndex, b_lean_obj_arg w) {
VkQueue out_pQueue;
vkGetDeviceQueue((VkDevice)lean_unbox_uint64(device), queueFamilyIndex, queueIndex, &out_pQueue);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pQueue);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkQueueSubmit(b_lean_obj_arg queue, b_lean_obj_arg pSubmits, b_lean_obj_arg fence, b_lean_obj_arg w) {
size_t len_pSubmits = lean_array_size(pSubmits);
VkSubmitInfo* um_pSubmits = calloc(len_pSubmits, sizeof(VkSubmitInfo));
for (size_t i = 0; i < len_pSubmits; ++i) {
lean_object *i_pSubmits = lean_array_cptr(pSubmits)[i];
lean_object *i_pSubmits_pWaitSemaphores = lean_ctor_get(i_pSubmits, 0);
size_t len_i_pSubmits_pWaitSemaphores = lean_array_size(i_pSubmits_pWaitSemaphores);
VkSemaphore* um_i_pSubmits_pWaitSemaphores = calloc(len_i_pSubmits_pWaitSemaphores, sizeof(VkSemaphore));
for (size_t i = 0; i < len_i_pSubmits_pWaitSemaphores; ++i) {
lean_object *i_i_pSubmits_pWaitSemaphores = lean_array_cptr(i_pSubmits_pWaitSemaphores)[i];
um_i_pSubmits_pWaitSemaphores[i] = (VkSemaphore)lean_unbox_uint64(i_i_pSubmits_pWaitSemaphores);
}
lean_object *i_pSubmits_pWaitDstStageMask = lean_ctor_get(i_pSubmits, 1);
size_t len_i_pSubmits_pWaitDstStageMask = lean_array_size(i_pSubmits_pWaitDstStageMask);
VkPipelineStageFlags* um_i_pSubmits_pWaitDstStageMask = calloc(len_i_pSubmits_pWaitDstStageMask, sizeof(VkPipelineStageFlags));
for (size_t i = 0; i < len_i_pSubmits_pWaitDstStageMask; ++i) {
lean_object *i_i_pSubmits_pWaitDstStageMask = lean_array_cptr(i_pSubmits_pWaitDstStageMask)[i];
um_i_pSubmits_pWaitDstStageMask[i] = (VkPipelineStageFlags)(VkPipelineStageFlagBits)lean_unbox_uint32(i_i_pSubmits_pWaitDstStageMask);
}
lean_object *i_pSubmits_pCommandBuffers = lean_ctor_get(i_pSubmits, 2);
size_t len_i_pSubmits_pCommandBuffers = lean_array_size(i_pSubmits_pCommandBuffers);
VkCommandBuffer* um_i_pSubmits_pCommandBuffers = calloc(len_i_pSubmits_pCommandBuffers, sizeof(VkCommandBuffer));
for (size_t i = 0; i < len_i_pSubmits_pCommandBuffers; ++i) {
lean_object *i_i_pSubmits_pCommandBuffers = lean_array_cptr(i_pSubmits_pCommandBuffers)[i];
um_i_pSubmits_pCommandBuffers[i] = (VkCommandBuffer)lean_unbox_uint64(i_i_pSubmits_pCommandBuffers);
}
lean_object *i_pSubmits_pSignalSemaphores = lean_ctor_get(i_pSubmits, 3);
size_t len_i_pSubmits_pSignalSemaphores = lean_array_size(i_pSubmits_pSignalSemaphores);
VkSemaphore* um_i_pSubmits_pSignalSemaphores = calloc(len_i_pSubmits_pSignalSemaphores, sizeof(VkSemaphore));
for (size_t i = 0; i < len_i_pSubmits_pSignalSemaphores; ++i) {
lean_object *i_i_pSubmits_pSignalSemaphores = lean_array_cptr(i_pSubmits_pSignalSemaphores)[i];
um_i_pSubmits_pSignalSemaphores[i] = (VkSemaphore)lean_unbox_uint64(i_i_pSubmits_pSignalSemaphores);
}
struct VkSubmitInfo um_i_pSubmits = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = 0,
.waitSemaphoreCount = len_i_pSubmits_pWaitSemaphores,
.pWaitSemaphores = um_i_pSubmits_pWaitSemaphores,
.pWaitDstStageMask = um_i_pSubmits_pWaitDstStageMask,
.commandBufferCount = len_i_pSubmits_pCommandBuffers,
.pCommandBuffers = um_i_pSubmits_pCommandBuffers,
.signalSemaphoreCount = len_i_pSubmits_pSignalSemaphores,
.pSignalSemaphores = um_i_pSubmits_pSignalSemaphores,
};
um_pSubmits[i] = um_i_pSubmits;
}
VkResult out_ret = vkQueueSubmit((VkQueue)lean_unbox_uint64(queue), len_pSubmits, um_pSubmits, (VkFence)lean_unbox_uint64(fence));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkQueueWaitIdle(b_lean_obj_arg queue, b_lean_obj_arg w) {
VkResult out_ret = vkQueueWaitIdle((VkQueue)lean_unbox_uint64(queue));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDeviceWaitIdle(b_lean_obj_arg device, b_lean_obj_arg w) {
VkResult out_ret = vkDeviceWaitIdle((VkDevice)lean_unbox_uint64(device));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkAllocateMemory(b_lean_obj_arg device, b_lean_obj_arg pAllocateInfo, b_lean_obj_arg w) {
struct VkMemoryAllocateInfo um_pAllocateInfo = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = 0,
.allocationSize = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(pAllocateInfo) + lean_ctor_num_objs(pAllocateInfo)) + 0),
.memoryTypeIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pAllocateInfo) + lean_ctor_num_objs(pAllocateInfo)) + 8),
};
VkDeviceMemory out_pMemory;
VkResult out_ret = vkAllocateMemory((VkDevice)lean_unbox_uint64(device), &um_pAllocateInfo, NULL, &out_pMemory);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pMemory);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkFreeMemory(b_lean_obj_arg device, b_lean_obj_arg memory, b_lean_obj_arg w) {
vkFreeMemory((VkDevice)lean_unbox_uint64(device), (VkDeviceMemory)lean_unbox_uint64(memory), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkMapMemory(b_lean_obj_arg device, b_lean_obj_arg memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, b_lean_obj_arg w) {
Pointer out_ppData;
VkResult out_ret = vkMapMemory((VkDevice)lean_unbox_uint64(device), (VkDeviceMemory)lean_unbox_uint64(memory), offset, size, flags, &out_ppData);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ppData);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkUnmapMemory(b_lean_obj_arg device, b_lean_obj_arg memory, b_lean_obj_arg w) {
vkUnmapMemory((VkDevice)lean_unbox_uint64(device), (VkDeviceMemory)lean_unbox_uint64(memory));
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkFlushMappedMemoryRanges(b_lean_obj_arg device, b_lean_obj_arg pMemoryRanges, b_lean_obj_arg w) {
size_t len_pMemoryRanges = lean_array_size(pMemoryRanges);
VkMappedMemoryRange* um_pMemoryRanges = calloc(len_pMemoryRanges, sizeof(VkMappedMemoryRange));
for (size_t i = 0; i < len_pMemoryRanges; ++i) {
lean_object *i_pMemoryRanges = lean_array_cptr(pMemoryRanges)[i];
lean_object *i_pMemoryRanges_memory = lean_ctor_get(i_pMemoryRanges, 0);
struct VkMappedMemoryRange um_i_pMemoryRanges = {
.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
.pNext = 0,
.memory = (VkDeviceMemory)lean_unbox_uint64(i_pMemoryRanges_memory),
.offset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pMemoryRanges) + lean_ctor_num_objs(i_pMemoryRanges)) + 0),
.size = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pMemoryRanges) + lean_ctor_num_objs(i_pMemoryRanges)) + 8),
};
um_pMemoryRanges[i] = um_i_pMemoryRanges;
}
VkResult out_ret = vkFlushMappedMemoryRanges((VkDevice)lean_unbox_uint64(device), len_pMemoryRanges, um_pMemoryRanges);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkInvalidateMappedMemoryRanges(b_lean_obj_arg device, b_lean_obj_arg pMemoryRanges, b_lean_obj_arg w) {
size_t len_pMemoryRanges = lean_array_size(pMemoryRanges);
VkMappedMemoryRange* um_pMemoryRanges = calloc(len_pMemoryRanges, sizeof(VkMappedMemoryRange));
for (size_t i = 0; i < len_pMemoryRanges; ++i) {
lean_object *i_pMemoryRanges = lean_array_cptr(pMemoryRanges)[i];
lean_object *i_pMemoryRanges_memory = lean_ctor_get(i_pMemoryRanges, 0);
struct VkMappedMemoryRange um_i_pMemoryRanges = {
.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
.pNext = 0,
.memory = (VkDeviceMemory)lean_unbox_uint64(i_pMemoryRanges_memory),
.offset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pMemoryRanges) + lean_ctor_num_objs(i_pMemoryRanges)) + 0),
.size = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pMemoryRanges) + lean_ctor_num_objs(i_pMemoryRanges)) + 8),
};
um_pMemoryRanges[i] = um_i_pMemoryRanges;
}
VkResult out_ret = vkInvalidateMappedMemoryRanges((VkDevice)lean_unbox_uint64(device), len_pMemoryRanges, um_pMemoryRanges);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetDeviceMemoryCommitment(b_lean_obj_arg device, b_lean_obj_arg memory, b_lean_obj_arg w) {
VkDeviceSize out_pCommittedMemoryInBytes;
vkGetDeviceMemoryCommitment((VkDevice)lean_unbox_uint64(device), (VkDeviceMemory)lean_unbox_uint64(memory), &out_pCommittedMemoryInBytes);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pCommittedMemoryInBytes);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkBindBufferMemory(b_lean_obj_arg device, b_lean_obj_arg buffer, b_lean_obj_arg memory, VkDeviceSize memoryOffset, b_lean_obj_arg w) {
VkResult out_ret = vkBindBufferMemory((VkDevice)lean_unbox_uint64(device), (VkBuffer)lean_unbox_uint64(buffer), (VkDeviceMemory)lean_unbox_uint64(memory), memoryOffset);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkBindImageMemory(b_lean_obj_arg device, b_lean_obj_arg image, b_lean_obj_arg memory, VkDeviceSize memoryOffset, b_lean_obj_arg w) {
VkResult out_ret = vkBindImageMemory((VkDevice)lean_unbox_uint64(device), (VkImage)lean_unbox_uint64(image), (VkDeviceMemory)lean_unbox_uint64(memory), memoryOffset);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetBufferMemoryRequirements(b_lean_obj_arg device, b_lean_obj_arg buffer, b_lean_obj_arg w) {
VkMemoryRequirements out_pMemoryRequirements;
vkGetBufferMemoryRequirements((VkDevice)lean_unbox_uint64(device), (VkBuffer)lean_unbox_uint64(buffer), &out_pMemoryRequirements);
lean_object *m_out_pMemoryRequirements = lean_alloc_ctor(0, 0, 20);
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pMemoryRequirements) + 0) = out_pMemoryRequirements.size;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pMemoryRequirements) + 8) = out_pMemoryRequirements.alignment;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pMemoryRequirements) + 16) = out_pMemoryRequirements.memoryTypeBits;
lean_object *temp, *tuple = m_out_pMemoryRequirements;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetImageMemoryRequirements(b_lean_obj_arg device, b_lean_obj_arg image, b_lean_obj_arg w) {
VkMemoryRequirements out_pMemoryRequirements;
vkGetImageMemoryRequirements((VkDevice)lean_unbox_uint64(device), (VkImage)lean_unbox_uint64(image), &out_pMemoryRequirements);
lean_object *m_out_pMemoryRequirements = lean_alloc_ctor(0, 0, 20);
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pMemoryRequirements) + 0) = out_pMemoryRequirements.size;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pMemoryRequirements) + 8) = out_pMemoryRequirements.alignment;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pMemoryRequirements) + 16) = out_pMemoryRequirements.memoryTypeBits;
lean_object *temp, *tuple = m_out_pMemoryRequirements;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetImageSparseMemoryRequirements(b_lean_obj_arg device, b_lean_obj_arg image, b_lean_obj_arg w) {
uint32_t len_out_pSparseMemoryRequirements;
VkSparseImageMemoryRequirements* out_pSparseMemoryRequirements;
// get length pSparseMemoryRequirementCount of pSparseMemoryRequirements
vkGetImageSparseMemoryRequirements((VkDevice)lean_unbox_uint64(device), (VkImage)lean_unbox_uint64(image), &len_out_pSparseMemoryRequirements, NULL);
out_pSparseMemoryRequirements = calloc(len_out_pSparseMemoryRequirements, sizeof(VkSparseImageMemoryRequirements));
vkGetImageSparseMemoryRequirements((VkDevice)lean_unbox_uint64(device), (VkImage)lean_unbox_uint64(image), &len_out_pSparseMemoryRequirements, out_pSparseMemoryRequirements);
lean_object *m_out_pSparseMemoryRequirements = lean_alloc_array(len_out_pSparseMemoryRequirements, len_out_pSparseMemoryRequirements);
for (size_t i = 0; i < len_out_pSparseMemoryRequirements; ++i) {
VkSparseImageMemoryRequirements i_out_pSparseMemoryRequirements = out_pSparseMemoryRequirements[i];
lean_object *m_i_out_pSparseMemoryRequirements = lean_alloc_ctor(0, 1, 28);
VkSparseImageFormatProperties i_out_pSparseMemoryRequirements_formatProperties = i_out_pSparseMemoryRequirements.formatProperties;
lean_object *m_i_out_pSparseMemoryRequirements_formatProperties = lean_alloc_ctor(0, 3, 0);
VkImageAspectFlags i_out_pSparseMemoryRequirements_formatProperties_aspectMask = i_out_pSparseMemoryRequirements_formatProperties.aspectMask;
lean_ctor_set(m_i_out_pSparseMemoryRequirements_formatProperties, 0, lean_box_uint32((uint32_t)i_out_pSparseMemoryRequirements_formatProperties_aspectMask));
VkExtent3D i_out_pSparseMemoryRequirements_formatProperties_imageGranularity = i_out_pSparseMemoryRequirements_formatProperties.imageGranularity;
lean_object *m_i_out_pSparseMemoryRequirements_formatProperties_imageGranularity = lean_alloc_ctor(0, 0, 12);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pSparseMemoryRequirements_formatProperties_imageGranularity) + 0) = i_out_pSparseMemoryRequirements_formatProperties_imageGranularity.width;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pSparseMemoryRequirements_formatProperties_imageGranularity) + 4) = i_out_pSparseMemoryRequirements_formatProperties_imageGranularity.height;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pSparseMemoryRequirements_formatProperties_imageGranularity) + 8) = i_out_pSparseMemoryRequirements_formatProperties_imageGranularity.depth;
lean_ctor_set(m_i_out_pSparseMemoryRequirements_formatProperties, 1, m_i_out_pSparseMemoryRequirements_formatProperties_imageGranularity);
VkSparseImageFormatFlags i_out_pSparseMemoryRequirements_formatProperties_flags = i_out_pSparseMemoryRequirements_formatProperties.flags;
lean_ctor_set(m_i_out_pSparseMemoryRequirements_formatProperties, 2, lean_box_uint32((uint32_t)i_out_pSparseMemoryRequirements_formatProperties_flags));
lean_ctor_set(m_i_out_pSparseMemoryRequirements, 0, m_i_out_pSparseMemoryRequirements_formatProperties);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pSparseMemoryRequirements) + 0) = i_out_pSparseMemoryRequirements.imageMipTailFirstLod;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_i_out_pSparseMemoryRequirements) + 4) = i_out_pSparseMemoryRequirements.imageMipTailSize;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_i_out_pSparseMemoryRequirements) + 12) = i_out_pSparseMemoryRequirements.imageMipTailOffset;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_i_out_pSparseMemoryRequirements) + 20) = i_out_pSparseMemoryRequirements.imageMipTailStride;
lean_array_cptr(m_out_pSparseMemoryRequirements)[i] = m_i_out_pSparseMemoryRequirements;
}
lean_object *temp, *tuple = m_out_pSparseMemoryRequirements;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceSparseImageFormatProperties(b_lean_obj_arg physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, b_lean_obj_arg w) {
uint32_t len_out_pProperties;
VkSparseImageFormatProperties* out_pProperties;
// get length pPropertyCount of pProperties
vkGetPhysicalDeviceSparseImageFormatProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), format, type, samples, usage, tiling, &len_out_pProperties, NULL);
out_pProperties = calloc(len_out_pProperties, sizeof(VkSparseImageFormatProperties));
vkGetPhysicalDeviceSparseImageFormatProperties((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), format, type, samples, usage, tiling, &len_out_pProperties, out_pProperties);
lean_object *m_out_pProperties = lean_alloc_array(len_out_pProperties, len_out_pProperties);
for (size_t i = 0; i < len_out_pProperties; ++i) {
VkSparseImageFormatProperties i_out_pProperties = out_pProperties[i];
lean_object *m_i_out_pProperties = lean_alloc_ctor(0, 3, 0);
VkImageAspectFlags i_out_pProperties_aspectMask = i_out_pProperties.aspectMask;
lean_ctor_set(m_i_out_pProperties, 0, lean_box_uint32((uint32_t)i_out_pProperties_aspectMask));
VkExtent3D i_out_pProperties_imageGranularity = i_out_pProperties.imageGranularity;
lean_object *m_i_out_pProperties_imageGranularity = lean_alloc_ctor(0, 0, 12);
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties_imageGranularity) + 0) = i_out_pProperties_imageGranularity.width;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties_imageGranularity) + 4) = i_out_pProperties_imageGranularity.height;
*(uint32_t*)(lean_ctor_scalar_cptr(m_i_out_pProperties_imageGranularity) + 8) = i_out_pProperties_imageGranularity.depth;
lean_ctor_set(m_i_out_pProperties, 1, m_i_out_pProperties_imageGranularity);
VkSparseImageFormatFlags i_out_pProperties_flags = i_out_pProperties.flags;
lean_ctor_set(m_i_out_pProperties, 2, lean_box_uint32((uint32_t)i_out_pProperties_flags));
lean_array_cptr(m_out_pProperties)[i] = m_i_out_pProperties;
}
lean_object *temp, *tuple = m_out_pProperties;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkQueueBindSparse(b_lean_obj_arg queue, b_lean_obj_arg pBindInfo, b_lean_obj_arg fence, b_lean_obj_arg w) {
size_t len_pBindInfo = lean_array_size(pBindInfo);
VkBindSparseInfo* um_pBindInfo = calloc(len_pBindInfo, sizeof(VkBindSparseInfo));
for (size_t i = 0; i < len_pBindInfo; ++i) {
lean_object *i_pBindInfo = lean_array_cptr(pBindInfo)[i];
lean_object *i_pBindInfo_pWaitSemaphores = lean_ctor_get(i_pBindInfo, 0);
size_t len_i_pBindInfo_pWaitSemaphores = lean_array_size(i_pBindInfo_pWaitSemaphores);
VkSemaphore* um_i_pBindInfo_pWaitSemaphores = calloc(len_i_pBindInfo_pWaitSemaphores, sizeof(VkSemaphore));
for (size_t i = 0; i < len_i_pBindInfo_pWaitSemaphores; ++i) {
lean_object *i_i_pBindInfo_pWaitSemaphores = lean_array_cptr(i_pBindInfo_pWaitSemaphores)[i];
um_i_pBindInfo_pWaitSemaphores[i] = (VkSemaphore)lean_unbox_uint64(i_i_pBindInfo_pWaitSemaphores);
}
lean_object *i_pBindInfo_pBufferBinds = lean_ctor_get(i_pBindInfo, 1);
size_t len_i_pBindInfo_pBufferBinds = lean_array_size(i_pBindInfo_pBufferBinds);
VkSparseBufferMemoryBindInfo* um_i_pBindInfo_pBufferBinds = calloc(len_i_pBindInfo_pBufferBinds, sizeof(VkSparseBufferMemoryBindInfo));
for (size_t i = 0; i < len_i_pBindInfo_pBufferBinds; ++i) {
lean_object *i_i_pBindInfo_pBufferBinds = lean_array_cptr(i_pBindInfo_pBufferBinds)[i];
lean_object *i_i_pBindInfo_pBufferBinds_buffer = lean_ctor_get(i_i_pBindInfo_pBufferBinds, 0);
lean_object *i_i_pBindInfo_pBufferBinds_pBinds = lean_ctor_get(i_i_pBindInfo_pBufferBinds, 1);
size_t len_i_i_pBindInfo_pBufferBinds_pBinds = lean_array_size(i_i_pBindInfo_pBufferBinds_pBinds);
VkSparseMemoryBind* um_i_i_pBindInfo_pBufferBinds_pBinds = calloc(len_i_i_pBindInfo_pBufferBinds_pBinds, sizeof(VkSparseMemoryBind));
for (size_t i = 0; i < len_i_i_pBindInfo_pBufferBinds_pBinds; ++i) {
lean_object *i_i_i_pBindInfo_pBufferBinds_pBinds = lean_array_cptr(i_i_pBindInfo_pBufferBinds_pBinds)[i];
lean_object *i_i_i_pBindInfo_pBufferBinds_pBinds_memory = lean_ctor_get(i_i_i_pBindInfo_pBufferBinds_pBinds, 0);
lean_object *i_i_i_pBindInfo_pBufferBinds_pBinds_flags = lean_ctor_get(i_i_i_pBindInfo_pBufferBinds_pBinds, 1);
struct VkSparseMemoryBind um_i_i_i_pBindInfo_pBufferBinds_pBinds = {
.resourceOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pBufferBinds_pBinds) + lean_ctor_num_objs(i_i_i_pBindInfo_pBufferBinds_pBinds)) + 0),
.size = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pBufferBinds_pBinds) + lean_ctor_num_objs(i_i_i_pBindInfo_pBufferBinds_pBinds)) + 8),
.memory = (VkDeviceMemory)lean_unbox_uint64(i_i_i_pBindInfo_pBufferBinds_pBinds_memory),
.memoryOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pBufferBinds_pBinds) + lean_ctor_num_objs(i_i_i_pBindInfo_pBufferBinds_pBinds)) + 16),
.flags = (VkSparseMemoryBindFlags)(VkSparseMemoryBindFlagBits)lean_unbox_uint32(i_i_i_pBindInfo_pBufferBinds_pBinds_flags),
};
um_i_i_pBindInfo_pBufferBinds_pBinds[i] = um_i_i_i_pBindInfo_pBufferBinds_pBinds;
}
struct VkSparseBufferMemoryBindInfo um_i_i_pBindInfo_pBufferBinds = {
.buffer = (VkBuffer)lean_unbox_uint64(i_i_pBindInfo_pBufferBinds_buffer),
.bindCount = len_i_i_pBindInfo_pBufferBinds_pBinds,
.pBinds = um_i_i_pBindInfo_pBufferBinds_pBinds,
};
um_i_pBindInfo_pBufferBinds[i] = um_i_i_pBindInfo_pBufferBinds;
}
lean_object *i_pBindInfo_pImageOpaqueBinds = lean_ctor_get(i_pBindInfo, 2);
size_t len_i_pBindInfo_pImageOpaqueBinds = lean_array_size(i_pBindInfo_pImageOpaqueBinds);
VkSparseImageOpaqueMemoryBindInfo* um_i_pBindInfo_pImageOpaqueBinds = calloc(len_i_pBindInfo_pImageOpaqueBinds, sizeof(VkSparseImageOpaqueMemoryBindInfo));
for (size_t i = 0; i < len_i_pBindInfo_pImageOpaqueBinds; ++i) {
lean_object *i_i_pBindInfo_pImageOpaqueBinds = lean_array_cptr(i_pBindInfo_pImageOpaqueBinds)[i];
lean_object *i_i_pBindInfo_pImageOpaqueBinds_image = lean_ctor_get(i_i_pBindInfo_pImageOpaqueBinds, 0);
lean_object *i_i_pBindInfo_pImageOpaqueBinds_pBinds = lean_ctor_get(i_i_pBindInfo_pImageOpaqueBinds, 1);
size_t len_i_i_pBindInfo_pImageOpaqueBinds_pBinds = lean_array_size(i_i_pBindInfo_pImageOpaqueBinds_pBinds);
VkSparseMemoryBind* um_i_i_pBindInfo_pImageOpaqueBinds_pBinds = calloc(len_i_i_pBindInfo_pImageOpaqueBinds_pBinds, sizeof(VkSparseMemoryBind));
for (size_t i = 0; i < len_i_i_pBindInfo_pImageOpaqueBinds_pBinds; ++i) {
lean_object *i_i_i_pBindInfo_pImageOpaqueBinds_pBinds = lean_array_cptr(i_i_pBindInfo_pImageOpaqueBinds_pBinds)[i];
lean_object *i_i_i_pBindInfo_pImageOpaqueBinds_pBinds_memory = lean_ctor_get(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds, 0);
lean_object *i_i_i_pBindInfo_pImageOpaqueBinds_pBinds_flags = lean_ctor_get(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds, 1);
struct VkSparseMemoryBind um_i_i_i_pBindInfo_pImageOpaqueBinds_pBinds = {
.resourceOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds)) + 0),
.size = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds)) + 8),
.memory = (VkDeviceMemory)lean_unbox_uint64(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds_memory),
.memoryOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds)) + 16),
.flags = (VkSparseMemoryBindFlags)(VkSparseMemoryBindFlagBits)lean_unbox_uint32(i_i_i_pBindInfo_pImageOpaqueBinds_pBinds_flags),
};
um_i_i_pBindInfo_pImageOpaqueBinds_pBinds[i] = um_i_i_i_pBindInfo_pImageOpaqueBinds_pBinds;
}
struct VkSparseImageOpaqueMemoryBindInfo um_i_i_pBindInfo_pImageOpaqueBinds = {
.image = (VkImage)lean_unbox_uint64(i_i_pBindInfo_pImageOpaqueBinds_image),
.bindCount = len_i_i_pBindInfo_pImageOpaqueBinds_pBinds,
.pBinds = um_i_i_pBindInfo_pImageOpaqueBinds_pBinds,
};
um_i_pBindInfo_pImageOpaqueBinds[i] = um_i_i_pBindInfo_pImageOpaqueBinds;
}
lean_object *i_pBindInfo_pImageBinds = lean_ctor_get(i_pBindInfo, 3);
size_t len_i_pBindInfo_pImageBinds = lean_array_size(i_pBindInfo_pImageBinds);
VkSparseImageMemoryBindInfo* um_i_pBindInfo_pImageBinds = calloc(len_i_pBindInfo_pImageBinds, sizeof(VkSparseImageMemoryBindInfo));
for (size_t i = 0; i < len_i_pBindInfo_pImageBinds; ++i) {
lean_object *i_i_pBindInfo_pImageBinds = lean_array_cptr(i_pBindInfo_pImageBinds)[i];
lean_object *i_i_pBindInfo_pImageBinds_image = lean_ctor_get(i_i_pBindInfo_pImageBinds, 0);
lean_object *i_i_pBindInfo_pImageBinds_pBinds = lean_ctor_get(i_i_pBindInfo_pImageBinds, 1);
size_t len_i_i_pBindInfo_pImageBinds_pBinds = lean_array_size(i_i_pBindInfo_pImageBinds_pBinds);
VkSparseImageMemoryBind* um_i_i_pBindInfo_pImageBinds_pBinds = calloc(len_i_i_pBindInfo_pImageBinds_pBinds, sizeof(VkSparseImageMemoryBind));
for (size_t i = 0; i < len_i_i_pBindInfo_pImageBinds_pBinds; ++i) {
lean_object *i_i_i_pBindInfo_pImageBinds_pBinds = lean_array_cptr(i_i_pBindInfo_pImageBinds_pBinds)[i];
lean_object *i_i_i_pBindInfo_pImageBinds_pBinds_subresource = lean_ctor_get(i_i_i_pBindInfo_pImageBinds_pBinds, 0);
lean_object *i_i_i_pBindInfo_pImageBinds_pBinds_subresource_aspectMask = lean_ctor_get(i_i_i_pBindInfo_pImageBinds_pBinds_subresource, 0);
struct VkImageSubresource um_i_i_i_pBindInfo_pImageBinds_pBinds_subresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_i_i_pBindInfo_pImageBinds_pBinds_subresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds_subresource) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds_subresource)) + 0),
.arrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds_subresource) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds_subresource)) + 4),
};
lean_object *i_i_i_pBindInfo_pImageBinds_pBinds_offset = lean_ctor_get(i_i_i_pBindInfo_pImageBinds_pBinds, 1);
struct VkOffset3D um_i_i_i_pBindInfo_pImageBinds_pBinds_offset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds_offset) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds_offset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds_offset) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds_offset)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds_offset) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds_offset)) + 8),
};
lean_object *i_i_i_pBindInfo_pImageBinds_pBinds_extent = lean_ctor_get(i_i_i_pBindInfo_pImageBinds_pBinds, 2);
struct VkExtent3D um_i_i_i_pBindInfo_pImageBinds_pBinds_extent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds_extent) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds_extent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds_extent) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds_extent)) + 4),
.depth = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds_extent) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds_extent)) + 8),
};
lean_object *i_i_i_pBindInfo_pImageBinds_pBinds_memory = lean_ctor_get(i_i_i_pBindInfo_pImageBinds_pBinds, 3);
lean_object *i_i_i_pBindInfo_pImageBinds_pBinds_flags = lean_ctor_get(i_i_i_pBindInfo_pImageBinds_pBinds, 4);
struct VkSparseImageMemoryBind um_i_i_i_pBindInfo_pImageBinds_pBinds = {
.subresource = um_i_i_i_pBindInfo_pImageBinds_pBinds_subresource,
.offset = um_i_i_i_pBindInfo_pImageBinds_pBinds_offset,
.extent = um_i_i_i_pBindInfo_pImageBinds_pBinds_extent,
.memory = (VkDeviceMemory)lean_unbox_uint64(i_i_i_pBindInfo_pImageBinds_pBinds_memory),
.memoryOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_i_pBindInfo_pImageBinds_pBinds) + lean_ctor_num_objs(i_i_i_pBindInfo_pImageBinds_pBinds)) + 0),
.flags = (VkSparseMemoryBindFlags)(VkSparseMemoryBindFlagBits)lean_unbox_uint32(i_i_i_pBindInfo_pImageBinds_pBinds_flags),
};
um_i_i_pBindInfo_pImageBinds_pBinds[i] = um_i_i_i_pBindInfo_pImageBinds_pBinds;
}
struct VkSparseImageMemoryBindInfo um_i_i_pBindInfo_pImageBinds = {
.image = (VkImage)lean_unbox_uint64(i_i_pBindInfo_pImageBinds_image),
.bindCount = len_i_i_pBindInfo_pImageBinds_pBinds,
.pBinds = um_i_i_pBindInfo_pImageBinds_pBinds,
};
um_i_pBindInfo_pImageBinds[i] = um_i_i_pBindInfo_pImageBinds;
}
lean_object *i_pBindInfo_pSignalSemaphores = lean_ctor_get(i_pBindInfo, 4);
size_t len_i_pBindInfo_pSignalSemaphores = lean_array_size(i_pBindInfo_pSignalSemaphores);
VkSemaphore* um_i_pBindInfo_pSignalSemaphores = calloc(len_i_pBindInfo_pSignalSemaphores, sizeof(VkSemaphore));
for (size_t i = 0; i < len_i_pBindInfo_pSignalSemaphores; ++i) {
lean_object *i_i_pBindInfo_pSignalSemaphores = lean_array_cptr(i_pBindInfo_pSignalSemaphores)[i];
um_i_pBindInfo_pSignalSemaphores[i] = (VkSemaphore)lean_unbox_uint64(i_i_pBindInfo_pSignalSemaphores);
}
struct VkBindSparseInfo um_i_pBindInfo = {
.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,
.pNext = 0,
.waitSemaphoreCount = len_i_pBindInfo_pWaitSemaphores,
.pWaitSemaphores = um_i_pBindInfo_pWaitSemaphores,
.bufferBindCount = len_i_pBindInfo_pBufferBinds,
.pBufferBinds = um_i_pBindInfo_pBufferBinds,
.imageOpaqueBindCount = len_i_pBindInfo_pImageOpaqueBinds,
.pImageOpaqueBinds = um_i_pBindInfo_pImageOpaqueBinds,
.imageBindCount = len_i_pBindInfo_pImageBinds,
.pImageBinds = um_i_pBindInfo_pImageBinds,
.signalSemaphoreCount = len_i_pBindInfo_pSignalSemaphores,
.pSignalSemaphores = um_i_pBindInfo_pSignalSemaphores,
};
um_pBindInfo[i] = um_i_pBindInfo;
}
VkResult out_ret = vkQueueBindSparse((VkQueue)lean_unbox_uint64(queue), len_pBindInfo, um_pBindInfo, (VkFence)lean_unbox_uint64(fence));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCreateFence(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
struct VkFenceCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = 0,
.flags = (VkFenceCreateFlags)(VkFenceCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
};
VkFence out_pFence;
VkResult out_ret = vkCreateFence((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pFence);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pFence);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyFence(b_lean_obj_arg device, b_lean_obj_arg fence, b_lean_obj_arg w) {
vkDestroyFence((VkDevice)lean_unbox_uint64(device), (VkFence)lean_unbox_uint64(fence), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkResetFences(b_lean_obj_arg device, b_lean_obj_arg pFences, b_lean_obj_arg w) {
size_t len_pFences = lean_array_size(pFences);
VkFence* um_pFences = calloc(len_pFences, sizeof(VkFence));
for (size_t i = 0; i < len_pFences; ++i) {
lean_object *i_pFences = lean_array_cptr(pFences)[i];
um_pFences[i] = (VkFence)lean_unbox_uint64(i_pFences);
}
VkResult out_ret = vkResetFences((VkDevice)lean_unbox_uint64(device), len_pFences, um_pFences);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetFenceStatus(b_lean_obj_arg device, b_lean_obj_arg fence, b_lean_obj_arg w) {
VkResult out_ret = vkGetFenceStatus((VkDevice)lean_unbox_uint64(device), (VkFence)lean_unbox_uint64(fence));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkWaitForFences(b_lean_obj_arg device, b_lean_obj_arg pFences, VkBool32 waitAll, uint64_t timeout, b_lean_obj_arg w) {
size_t len_pFences = lean_array_size(pFences);
VkFence* um_pFences = calloc(len_pFences, sizeof(VkFence));
for (size_t i = 0; i < len_pFences; ++i) {
lean_object *i_pFences = lean_array_cptr(pFences)[i];
um_pFences[i] = (VkFence)lean_unbox_uint64(i_pFences);
}
VkResult out_ret = vkWaitForFences((VkDevice)lean_unbox_uint64(device), len_pFences, um_pFences, waitAll, timeout);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCreateSemaphore(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
struct VkSemaphoreCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = 0,
.flags = (VkSemaphoreCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
};
VkSemaphore out_pSemaphore;
VkResult out_ret = vkCreateSemaphore((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pSemaphore);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pSemaphore);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroySemaphore(b_lean_obj_arg device, b_lean_obj_arg semaphore, b_lean_obj_arg w) {
vkDestroySemaphore((VkDevice)lean_unbox_uint64(device), (VkSemaphore)lean_unbox_uint64(semaphore), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateEvent(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
struct VkEventCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
.pNext = 0,
.flags = (VkEventCreateFlags)(VkEventCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
};
VkEvent out_pEvent;
VkResult out_ret = vkCreateEvent((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pEvent);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pEvent);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyEvent(b_lean_obj_arg device, b_lean_obj_arg event, b_lean_obj_arg w) {
vkDestroyEvent((VkDevice)lean_unbox_uint64(device), (VkEvent)lean_unbox_uint64(event), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkGetEventStatus(b_lean_obj_arg device, b_lean_obj_arg event, b_lean_obj_arg w) {
VkResult out_ret = vkGetEventStatus((VkDevice)lean_unbox_uint64(device), (VkEvent)lean_unbox_uint64(event));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkSetEvent(b_lean_obj_arg device, b_lean_obj_arg event, b_lean_obj_arg w) {
VkResult out_ret = vkSetEvent((VkDevice)lean_unbox_uint64(device), (VkEvent)lean_unbox_uint64(event));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkResetEvent(b_lean_obj_arg device, b_lean_obj_arg event, b_lean_obj_arg w) {
VkResult out_ret = vkResetEvent((VkDevice)lean_unbox_uint64(device), (VkEvent)lean_unbox_uint64(event));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCreateQueryPool(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_queryType = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_pipelineStatistics = lean_ctor_get(pCreateInfo, 1);
struct VkQueryPoolCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
.pNext = 0,
.flags = (VkQueryPoolCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.queryType = (VkQueryType)lean_unbox_uint32(pCreateInfo_queryType),
.queryCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 4),
.pipelineStatistics = (VkQueryPipelineStatisticFlags)(VkQueryPipelineStatisticFlagBits)lean_unbox_uint32(pCreateInfo_pipelineStatistics),
};
VkQueryPool out_pQueryPool;
VkResult out_ret = vkCreateQueryPool((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pQueryPool);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pQueryPool);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyQueryPool(b_lean_obj_arg device, b_lean_obj_arg queryPool, b_lean_obj_arg w) {
vkDestroyQueryPool((VkDevice)lean_unbox_uint64(device), (VkQueryPool)lean_unbox_uint64(queryPool), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateBuffer(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_usage = lean_ctor_get(pCreateInfo, 1);
lean_object *pCreateInfo_sharingMode = lean_ctor_get(pCreateInfo, 2);
lean_object *pCreateInfo_pQueueFamilyIndices = lean_ctor_get(pCreateInfo, 3);
size_t len_pCreateInfo_pQueueFamilyIndices = lean_array_size(pCreateInfo_pQueueFamilyIndices);
uint32_t* um_pCreateInfo_pQueueFamilyIndices = calloc(len_pCreateInfo_pQueueFamilyIndices, sizeof(uint32_t));
for (size_t i = 0; i < len_pCreateInfo_pQueueFamilyIndices; ++i) {
lean_object *i_pCreateInfo_pQueueFamilyIndices = lean_array_cptr(pCreateInfo_pQueueFamilyIndices)[i];
um_pCreateInfo_pQueueFamilyIndices[i] = lean_unbox_uint32(i_pCreateInfo_pQueueFamilyIndices);
}
struct VkBufferCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = 0,
.flags = (VkBufferCreateFlags)(VkBufferCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.size = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.usage = (VkBufferUsageFlags)(VkBufferUsageFlagBits)lean_unbox_uint32(pCreateInfo_usage),
.sharingMode = (VkSharingMode)lean_unbox_uint32(pCreateInfo_sharingMode),
.queueFamilyIndexCount = len_pCreateInfo_pQueueFamilyIndices,
.pQueueFamilyIndices = um_pCreateInfo_pQueueFamilyIndices,
};
VkBuffer out_pBuffer;
VkResult out_ret = vkCreateBuffer((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pBuffer);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pBuffer);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyBuffer(b_lean_obj_arg device, b_lean_obj_arg buffer, b_lean_obj_arg w) {
vkDestroyBuffer((VkDevice)lean_unbox_uint64(device), (VkBuffer)lean_unbox_uint64(buffer), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateBufferView(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_buffer = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_format = lean_ctor_get(pCreateInfo, 1);
struct VkBufferViewCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
.pNext = 0,
.flags = (VkBufferViewCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 16),
.buffer = (VkBuffer)lean_unbox_uint64(pCreateInfo_buffer),
.format = (VkFormat)lean_unbox_uint32(pCreateInfo_format),
.offset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.range = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 8),
};
VkBufferView out_pView;
VkResult out_ret = vkCreateBufferView((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pView);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pView);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyBufferView(b_lean_obj_arg device, b_lean_obj_arg bufferView, b_lean_obj_arg w) {
vkDestroyBufferView((VkDevice)lean_unbox_uint64(device), (VkBufferView)lean_unbox_uint64(bufferView), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateImage(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_imageType = lean_ctor_get(pCreateInfo, 1);
lean_object *pCreateInfo_format = lean_ctor_get(pCreateInfo, 2);
lean_object *pCreateInfo_extent = lean_ctor_get(pCreateInfo, 3);
struct VkExtent3D um_pCreateInfo_extent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_extent) + lean_ctor_num_objs(pCreateInfo_extent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_extent) + lean_ctor_num_objs(pCreateInfo_extent)) + 4),
.depth = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_extent) + lean_ctor_num_objs(pCreateInfo_extent)) + 8),
};
lean_object *pCreateInfo_samples = lean_ctor_get(pCreateInfo, 4);
lean_object *pCreateInfo_tiling = lean_ctor_get(pCreateInfo, 5);
lean_object *pCreateInfo_usage = lean_ctor_get(pCreateInfo, 6);
lean_object *pCreateInfo_sharingMode = lean_ctor_get(pCreateInfo, 7);
lean_object *pCreateInfo_pQueueFamilyIndices = lean_ctor_get(pCreateInfo, 8);
size_t len_pCreateInfo_pQueueFamilyIndices = lean_array_size(pCreateInfo_pQueueFamilyIndices);
uint32_t* um_pCreateInfo_pQueueFamilyIndices = calloc(len_pCreateInfo_pQueueFamilyIndices, sizeof(uint32_t));
for (size_t i = 0; i < len_pCreateInfo_pQueueFamilyIndices; ++i) {
lean_object *i_pCreateInfo_pQueueFamilyIndices = lean_array_cptr(pCreateInfo_pQueueFamilyIndices)[i];
um_pCreateInfo_pQueueFamilyIndices[i] = lean_unbox_uint32(i_pCreateInfo_pQueueFamilyIndices);
}
lean_object *pCreateInfo_initialLayout = lean_ctor_get(pCreateInfo, 9);
struct VkImageCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = 0,
.flags = (VkImageCreateFlags)(VkImageCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.imageType = (VkImageType)lean_unbox_uint32(pCreateInfo_imageType),
.format = (VkFormat)lean_unbox_uint32(pCreateInfo_format),
.extent = um_pCreateInfo_extent,
.mipLevels = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.arrayLayers = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 4),
.samples = (VkSampleCountFlagBits)lean_unbox_uint32(pCreateInfo_samples),
.tiling = (VkImageTiling)lean_unbox_uint32(pCreateInfo_tiling),
.usage = (VkImageUsageFlags)(VkImageUsageFlagBits)lean_unbox_uint32(pCreateInfo_usage),
.sharingMode = (VkSharingMode)lean_unbox_uint32(pCreateInfo_sharingMode),
.queueFamilyIndexCount = len_pCreateInfo_pQueueFamilyIndices,
.pQueueFamilyIndices = um_pCreateInfo_pQueueFamilyIndices,
.initialLayout = (VkImageLayout)lean_unbox_uint32(pCreateInfo_initialLayout),
};
VkImage out_pImage;
VkResult out_ret = vkCreateImage((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pImage);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pImage);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyImage(b_lean_obj_arg device, b_lean_obj_arg image, b_lean_obj_arg w) {
vkDestroyImage((VkDevice)lean_unbox_uint64(device), (VkImage)lean_unbox_uint64(image), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkGetImageSubresourceLayout(b_lean_obj_arg device, b_lean_obj_arg image, b_lean_obj_arg pSubresource, b_lean_obj_arg w) {
lean_object *pSubresource_aspectMask = lean_ctor_get(pSubresource, 0);
struct VkImageSubresource um_pSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(pSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pSubresource) + lean_ctor_num_objs(pSubresource)) + 0),
.arrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pSubresource) + lean_ctor_num_objs(pSubresource)) + 4),
};
VkSubresourceLayout out_pLayout;
vkGetImageSubresourceLayout((VkDevice)lean_unbox_uint64(device), (VkImage)lean_unbox_uint64(image), &um_pSubresource, &out_pLayout);
lean_object *m_out_pLayout = lean_alloc_ctor(0, 0, 40);
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pLayout) + 0) = out_pLayout.offset;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pLayout) + 8) = out_pLayout.size;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pLayout) + 16) = out_pLayout.rowPitch;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pLayout) + 24) = out_pLayout.arrayPitch;
*(VkDeviceSize*)(lean_ctor_scalar_cptr(m_out_pLayout) + 32) = out_pLayout.depthPitch;
lean_object *temp, *tuple = m_out_pLayout;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCreateImageView(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_image = lean_ctor_get(pCreateInfo, 1);
lean_object *pCreateInfo_viewType = lean_ctor_get(pCreateInfo, 2);
lean_object *pCreateInfo_format = lean_ctor_get(pCreateInfo, 3);
lean_object *pCreateInfo_components = lean_ctor_get(pCreateInfo, 4);
lean_object *pCreateInfo_components_r = lean_ctor_get(pCreateInfo_components, 0);
lean_object *pCreateInfo_components_g = lean_ctor_get(pCreateInfo_components, 1);
lean_object *pCreateInfo_components_b = lean_ctor_get(pCreateInfo_components, 2);
lean_object *pCreateInfo_components_a = lean_ctor_get(pCreateInfo_components, 3);
struct VkComponentMapping um_pCreateInfo_components = {
.r = (VkComponentSwizzle)lean_unbox_uint32(pCreateInfo_components_r),
.g = (VkComponentSwizzle)lean_unbox_uint32(pCreateInfo_components_g),
.b = (VkComponentSwizzle)lean_unbox_uint32(pCreateInfo_components_b),
.a = (VkComponentSwizzle)lean_unbox_uint32(pCreateInfo_components_a),
};
lean_object *pCreateInfo_subresourceRange = lean_ctor_get(pCreateInfo, 5);
lean_object *pCreateInfo_subresourceRange_aspectMask = lean_ctor_get(pCreateInfo_subresourceRange, 0);
struct VkImageSubresourceRange um_pCreateInfo_subresourceRange = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(pCreateInfo_subresourceRange_aspectMask),
.baseMipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_subresourceRange) + lean_ctor_num_objs(pCreateInfo_subresourceRange)) + 0),
.levelCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_subresourceRange) + lean_ctor_num_objs(pCreateInfo_subresourceRange)) + 4),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_subresourceRange) + lean_ctor_num_objs(pCreateInfo_subresourceRange)) + 8),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_subresourceRange) + lean_ctor_num_objs(pCreateInfo_subresourceRange)) + 12),
};
struct VkImageViewCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = 0,
.flags = (VkImageViewCreateFlags)(VkImageViewCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.image = (VkImage)lean_unbox_uint64(pCreateInfo_image),
.viewType = (VkImageViewType)lean_unbox_uint32(pCreateInfo_viewType),
.format = (VkFormat)lean_unbox_uint32(pCreateInfo_format),
.components = um_pCreateInfo_components,
.subresourceRange = um_pCreateInfo_subresourceRange,
};
VkImageView out_pView;
VkResult out_ret = vkCreateImageView((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pView);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pView);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyImageView(b_lean_obj_arg device, b_lean_obj_arg imageView, b_lean_obj_arg w) {
vkDestroyImageView((VkDevice)lean_unbox_uint64(device), (VkImageView)lean_unbox_uint64(imageView), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateShaderModule(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_pCode = lean_ctor_get(pCreateInfo, 0);
size_t len_pCreateInfo_pCode = lean_sarray_size(pCreateInfo_pCode);
void *um_pCreateInfo_pCode = lean_sarray_cptr(pCreateInfo_pCode);
struct VkShaderModuleCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
.pNext = 0,
.flags = (VkShaderModuleCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.codeSize = len_pCreateInfo_pCode,
.pCode = um_pCreateInfo_pCode,
};
VkShaderModule out_pShaderModule;
VkResult out_ret = vkCreateShaderModule((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pShaderModule);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pShaderModule);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyShaderModule(b_lean_obj_arg device, b_lean_obj_arg shaderModule, b_lean_obj_arg w) {
vkDestroyShaderModule((VkDevice)lean_unbox_uint64(device), (VkShaderModule)lean_unbox_uint64(shaderModule), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreatePipelineCache(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_pInitialData = lean_ctor_get(pCreateInfo, 1);
size_t len_pCreateInfo_pInitialData = lean_sarray_size(pCreateInfo_pInitialData);
void *um_pCreateInfo_pInitialData = lean_sarray_cptr(pCreateInfo_pInitialData);
struct VkPipelineCacheCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineCacheCreateFlags)(VkPipelineCacheCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.initialDataSize = len_pCreateInfo_pInitialData,
.pInitialData = um_pCreateInfo_pInitialData,
};
VkPipelineCache out_pPipelineCache;
VkResult out_ret = vkCreatePipelineCache((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pPipelineCache);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pPipelineCache);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyPipelineCache(b_lean_obj_arg device, b_lean_obj_arg pipelineCache, b_lean_obj_arg w) {
vkDestroyPipelineCache((VkDevice)lean_unbox_uint64(device), (VkPipelineCache)lean_unbox_uint64(pipelineCache), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkMergePipelineCaches(b_lean_obj_arg device, b_lean_obj_arg dstCache, b_lean_obj_arg pSrcCaches, b_lean_obj_arg w) {
size_t len_pSrcCaches = lean_array_size(pSrcCaches);
VkPipelineCache* um_pSrcCaches = calloc(len_pSrcCaches, sizeof(VkPipelineCache));
for (size_t i = 0; i < len_pSrcCaches; ++i) {
lean_object *i_pSrcCaches = lean_array_cptr(pSrcCaches)[i];
um_pSrcCaches[i] = (VkPipelineCache)lean_unbox_uint64(i_pSrcCaches);
}
VkResult out_ret = vkMergePipelineCaches((VkDevice)lean_unbox_uint64(device), (VkPipelineCache)lean_unbox_uint64(dstCache), len_pSrcCaches, um_pSrcCaches);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCreateGraphicsPipelines(b_lean_obj_arg device, b_lean_obj_arg pCreateInfos, b_lean_obj_arg w) {
size_t len_pCreateInfos = lean_array_size(pCreateInfos);
VkGraphicsPipelineCreateInfo* um_pCreateInfos = calloc(len_pCreateInfos, sizeof(VkGraphicsPipelineCreateInfo));
for (size_t i = 0; i < len_pCreateInfos; ++i) {
lean_object *i_pCreateInfos = lean_array_cptr(pCreateInfos)[i];
lean_object *i_pCreateInfos_flags = lean_ctor_get(i_pCreateInfos, 0);
lean_object *i_pCreateInfos_pStages = lean_ctor_get(i_pCreateInfos, 1);
size_t len_i_pCreateInfos_pStages = lean_array_size(i_pCreateInfos_pStages);
VkPipelineShaderStageCreateInfo* um_i_pCreateInfos_pStages = calloc(len_i_pCreateInfos_pStages, sizeof(VkPipelineShaderStageCreateInfo));
for (size_t i = 0; i < len_i_pCreateInfos_pStages; ++i) {
lean_object *i_i_pCreateInfos_pStages = lean_array_cptr(i_pCreateInfos_pStages)[i];
lean_object *i_i_pCreateInfos_pStages_flags = lean_ctor_get(i_i_pCreateInfos_pStages, 0);
lean_object *i_i_pCreateInfos_pStages_stage = lean_ctor_get(i_i_pCreateInfos_pStages, 1);
lean_object *i_i_pCreateInfos_pStages_module = lean_ctor_get(i_i_pCreateInfos_pStages, 2);
lean_object *i_i_pCreateInfos_pStages_pName = lean_ctor_get(i_i_pCreateInfos_pStages, 3);
lean_object *i_i_pCreateInfos_pStages_pSpecializationInfo = lean_ctor_get(i_i_pCreateInfos_pStages, 4);
_Bool is_some_i_i_pCreateInfos_pStages_pSpecializationInfo = !lean_is_scalar(i_i_pCreateInfos_pStages_pSpecializationInfo);
VkSpecializationInfo um_i_i_pCreateInfos_pStages_pSpecializationInfo;
if (is_some_i_i_pCreateInfos_pStages_pSpecializationInfo) {
lean_object *some_i_i_pCreateInfos_pStages_pSpecializationInfo = lean_ctor_get(i_i_pCreateInfos_pStages_pSpecializationInfo, 0);
lean_object *some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries = lean_ctor_get(some_i_i_pCreateInfos_pStages_pSpecializationInfo, 0);
size_t len_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries = lean_array_size(some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries);
VkSpecializationMapEntry* um_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries = calloc(len_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries, sizeof(VkSpecializationMapEntry));
for (size_t i = 0; i < len_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries; ++i) {
lean_object *i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries = lean_array_cptr(some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries)[i];
struct VkSpecializationMapEntry um_i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries = {
.constantID = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries) + lean_ctor_num_objs(i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries)) + 8),
.offset = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries) + lean_ctor_num_objs(i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries)) + 12),
.size = (uint64_t)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries) + lean_ctor_num_objs(i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries)) + 0),
};
um_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries[i] = um_i_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries;
}
lean_object *some_i_i_pCreateInfos_pStages_pSpecializationInfo_pData = lean_ctor_get(some_i_i_pCreateInfos_pStages_pSpecializationInfo, 1);
size_t len_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pData = lean_sarray_size(some_i_i_pCreateInfos_pStages_pSpecializationInfo_pData);
void *um_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pData = lean_sarray_cptr(some_i_i_pCreateInfos_pStages_pSpecializationInfo_pData);
struct VkSpecializationInfo um_some_i_i_pCreateInfos_pStages_pSpecializationInfo = {
.mapEntryCount = len_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries,
.pMapEntries = um_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pMapEntries,
.dataSize = len_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pData,
.pData = um_some_i_i_pCreateInfos_pStages_pSpecializationInfo_pData,
};
um_i_i_pCreateInfos_pStages_pSpecializationInfo = um_some_i_i_pCreateInfos_pStages_pSpecializationInfo;
}
struct VkPipelineShaderStageCreateInfo um_i_i_pCreateInfos_pStages = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineShaderStageCreateFlags)(VkPipelineShaderStageCreateFlagBits)lean_unbox_uint32(i_i_pCreateInfos_pStages_flags),
.stage = (VkShaderStageFlagBits)lean_unbox_uint32(i_i_pCreateInfos_pStages_stage),
.module = (VkShaderModule)lean_unbox_uint64(i_i_pCreateInfos_pStages_module),
.pName = (char*)lean_string_cstr(i_i_pCreateInfos_pStages_pName),
.pSpecializationInfo = (is_some_i_i_pCreateInfos_pStages_pSpecializationInfo ? &um_i_i_pCreateInfos_pStages_pSpecializationInfo : NULL),
};
um_i_pCreateInfos_pStages[i] = um_i_i_pCreateInfos_pStages;
}
lean_object *i_pCreateInfos_pVertexInputState = lean_ctor_get(i_pCreateInfos, 2);
_Bool is_some_i_pCreateInfos_pVertexInputState = !lean_is_scalar(i_pCreateInfos_pVertexInputState);
VkPipelineVertexInputStateCreateInfo um_i_pCreateInfos_pVertexInputState;
if (is_some_i_pCreateInfos_pVertexInputState) {
lean_object *some_i_pCreateInfos_pVertexInputState = lean_ctor_get(i_pCreateInfos_pVertexInputState, 0);
lean_object *some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions = lean_ctor_get(some_i_pCreateInfos_pVertexInputState, 0);
size_t len_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions = lean_array_size(some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions);
VkVertexInputBindingDescription* um_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions = calloc(len_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions, sizeof(VkVertexInputBindingDescription));
for (size_t i = 0; i < len_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions; ++i) {
lean_object *i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions = lean_array_cptr(some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions)[i];
lean_object *i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions_inputRate = lean_ctor_get(i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions, 0);
struct VkVertexInputBindingDescription um_i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions = {
.binding = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions) + lean_ctor_num_objs(i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions)) + 0),
.stride = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions) + lean_ctor_num_objs(i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions)) + 4),
.inputRate = (VkVertexInputRate)lean_unbox_uint32(i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions_inputRate),
};
um_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions[i] = um_i_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions;
}
lean_object *some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions = lean_ctor_get(some_i_pCreateInfos_pVertexInputState, 1);
size_t len_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions = lean_array_size(some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions);
VkVertexInputAttributeDescription* um_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions = calloc(len_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions, sizeof(VkVertexInputAttributeDescription));
for (size_t i = 0; i < len_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions; ++i) {
lean_object *i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions = lean_array_cptr(some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions)[i];
lean_object *i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions_format = lean_ctor_get(i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions, 0);
struct VkVertexInputAttributeDescription um_i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions = {
.location = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions) + lean_ctor_num_objs(i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions)) + 0),
.binding = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions) + lean_ctor_num_objs(i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions)) + 4),
.format = (VkFormat)lean_unbox_uint32(i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions_format),
.offset = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions) + lean_ctor_num_objs(i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions)) + 8),
};
um_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions[i] = um_i_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions;
}
struct VkPipelineVertexInputStateCreateInfo um_some_i_pCreateInfos_pVertexInputState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineVertexInputStateCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pVertexInputState) + lean_ctor_num_objs(some_i_pCreateInfos_pVertexInputState)) + 0),
.vertexBindingDescriptionCount = len_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions,
.pVertexBindingDescriptions = um_some_i_pCreateInfos_pVertexInputState_pVertexBindingDescriptions,
.vertexAttributeDescriptionCount = len_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions,
.pVertexAttributeDescriptions = um_some_i_pCreateInfos_pVertexInputState_pVertexAttributeDescriptions,
};
um_i_pCreateInfos_pVertexInputState = um_some_i_pCreateInfos_pVertexInputState;
}
lean_object *i_pCreateInfos_pInputAssemblyState = lean_ctor_get(i_pCreateInfos, 3);
_Bool is_some_i_pCreateInfos_pInputAssemblyState = !lean_is_scalar(i_pCreateInfos_pInputAssemblyState);
VkPipelineInputAssemblyStateCreateInfo um_i_pCreateInfos_pInputAssemblyState;
if (is_some_i_pCreateInfos_pInputAssemblyState) {
lean_object *some_i_pCreateInfos_pInputAssemblyState = lean_ctor_get(i_pCreateInfos_pInputAssemblyState, 0);
lean_object *some_i_pCreateInfos_pInputAssemblyState_topology = lean_ctor_get(some_i_pCreateInfos_pInputAssemblyState, 0);
struct VkPipelineInputAssemblyStateCreateInfo um_some_i_pCreateInfos_pInputAssemblyState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineInputAssemblyStateCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pInputAssemblyState) + lean_ctor_num_objs(some_i_pCreateInfos_pInputAssemblyState)) + 0),
.topology = (VkPrimitiveTopology)lean_unbox_uint32(some_i_pCreateInfos_pInputAssemblyState_topology),
.primitiveRestartEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pInputAssemblyState) + lean_ctor_num_objs(some_i_pCreateInfos_pInputAssemblyState)) + 4),
};
um_i_pCreateInfos_pInputAssemblyState = um_some_i_pCreateInfos_pInputAssemblyState;
}
lean_object *i_pCreateInfos_pTessellationState = lean_ctor_get(i_pCreateInfos, 4);
_Bool is_some_i_pCreateInfos_pTessellationState = !lean_is_scalar(i_pCreateInfos_pTessellationState);
VkPipelineTessellationStateCreateInfo um_i_pCreateInfos_pTessellationState;
if (is_some_i_pCreateInfos_pTessellationState) {
lean_object *some_i_pCreateInfos_pTessellationState = lean_ctor_get(i_pCreateInfos_pTessellationState, 0);
struct VkPipelineTessellationStateCreateInfo um_some_i_pCreateInfos_pTessellationState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineTessellationStateCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pTessellationState) + lean_ctor_num_objs(some_i_pCreateInfos_pTessellationState)) + 0),
.patchControlPoints = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pTessellationState) + lean_ctor_num_objs(some_i_pCreateInfos_pTessellationState)) + 4),
};
um_i_pCreateInfos_pTessellationState = um_some_i_pCreateInfos_pTessellationState;
}
lean_object *i_pCreateInfos_pViewportState = lean_ctor_get(i_pCreateInfos, 5);
_Bool is_some_i_pCreateInfos_pViewportState = !lean_is_scalar(i_pCreateInfos_pViewportState);
VkPipelineViewportStateCreateInfo um_i_pCreateInfos_pViewportState;
if (is_some_i_pCreateInfos_pViewportState) {
lean_object *some_i_pCreateInfos_pViewportState = lean_ctor_get(i_pCreateInfos_pViewportState, 0);
lean_object *some_i_pCreateInfos_pViewportState_pViewports = lean_ctor_get(some_i_pCreateInfos_pViewportState, 0);
size_t len_some_i_pCreateInfos_pViewportState_pViewports = lean_array_size(some_i_pCreateInfos_pViewportState_pViewports);
VkViewport* um_some_i_pCreateInfos_pViewportState_pViewports = calloc(len_some_i_pCreateInfos_pViewportState_pViewports, sizeof(VkViewport));
for (size_t i = 0; i < len_some_i_pCreateInfos_pViewportState_pViewports; ++i) {
lean_object *i_some_i_pCreateInfos_pViewportState_pViewports = lean_array_cptr(some_i_pCreateInfos_pViewportState_pViewports)[i];
struct VkViewport um_i_some_i_pCreateInfos_pViewportState_pViewports = {
.x = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pViewports) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pViewports)) + 0),
.y = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pViewports) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pViewports)) + 8),
.width = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pViewports) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pViewports)) + 16),
.height = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pViewports) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pViewports)) + 24),
.minDepth = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pViewports) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pViewports)) + 32),
.maxDepth = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pViewports) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pViewports)) + 40),
};
um_some_i_pCreateInfos_pViewportState_pViewports[i] = um_i_some_i_pCreateInfos_pViewportState_pViewports;
}
lean_object *some_i_pCreateInfos_pViewportState_pScissors = lean_ctor_get(some_i_pCreateInfos_pViewportState, 1);
size_t len_some_i_pCreateInfos_pViewportState_pScissors = lean_array_size(some_i_pCreateInfos_pViewportState_pScissors);
VkRect2D* um_some_i_pCreateInfos_pViewportState_pScissors = calloc(len_some_i_pCreateInfos_pViewportState_pScissors, sizeof(VkRect2D));
for (size_t i = 0; i < len_some_i_pCreateInfos_pViewportState_pScissors; ++i) {
lean_object *i_some_i_pCreateInfos_pViewportState_pScissors = lean_array_cptr(some_i_pCreateInfos_pViewportState_pScissors)[i];
lean_object *i_some_i_pCreateInfos_pViewportState_pScissors_offset = lean_ctor_get(i_some_i_pCreateInfos_pViewportState_pScissors, 0);
struct VkOffset2D um_i_some_i_pCreateInfos_pViewportState_pScissors_offset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pScissors_offset) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pScissors_offset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pScissors_offset) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pScissors_offset)) + 4),
};
lean_object *i_some_i_pCreateInfos_pViewportState_pScissors_extent = lean_ctor_get(i_some_i_pCreateInfos_pViewportState_pScissors, 1);
struct VkExtent2D um_i_some_i_pCreateInfos_pViewportState_pScissors_extent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pScissors_extent) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pScissors_extent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pViewportState_pScissors_extent) + lean_ctor_num_objs(i_some_i_pCreateInfos_pViewportState_pScissors_extent)) + 4),
};
struct VkRect2D um_i_some_i_pCreateInfos_pViewportState_pScissors = {
.offset = um_i_some_i_pCreateInfos_pViewportState_pScissors_offset,
.extent = um_i_some_i_pCreateInfos_pViewportState_pScissors_extent,
};
um_some_i_pCreateInfos_pViewportState_pScissors[i] = um_i_some_i_pCreateInfos_pViewportState_pScissors;
}
struct VkPipelineViewportStateCreateInfo um_some_i_pCreateInfos_pViewportState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineViewportStateCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pViewportState) + lean_ctor_num_objs(some_i_pCreateInfos_pViewportState)) + 0),
.viewportCount = len_some_i_pCreateInfos_pViewportState_pViewports,
.pViewports = um_some_i_pCreateInfos_pViewportState_pViewports,
.scissorCount = len_some_i_pCreateInfos_pViewportState_pScissors,
.pScissors = um_some_i_pCreateInfos_pViewportState_pScissors,
};
um_i_pCreateInfos_pViewportState = um_some_i_pCreateInfos_pViewportState;
}
lean_object *i_pCreateInfos_pRasterizationState = lean_ctor_get(i_pCreateInfos, 6);
_Bool is_some_i_pCreateInfos_pRasterizationState = !lean_is_scalar(i_pCreateInfos_pRasterizationState);
VkPipelineRasterizationStateCreateInfo um_i_pCreateInfos_pRasterizationState;
if (is_some_i_pCreateInfos_pRasterizationState) {
lean_object *some_i_pCreateInfos_pRasterizationState = lean_ctor_get(i_pCreateInfos_pRasterizationState, 0);
lean_object *some_i_pCreateInfos_pRasterizationState_polygonMode = lean_ctor_get(some_i_pCreateInfos_pRasterizationState, 0);
lean_object *some_i_pCreateInfos_pRasterizationState_cullMode = lean_ctor_get(some_i_pCreateInfos_pRasterizationState, 1);
lean_object *some_i_pCreateInfos_pRasterizationState_frontFace = lean_ctor_get(some_i_pCreateInfos_pRasterizationState, 2);
struct VkPipelineRasterizationStateCreateInfo um_some_i_pCreateInfos_pRasterizationState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineRasterizationStateCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pRasterizationState) + lean_ctor_num_objs(some_i_pCreateInfos_pRasterizationState)) + 32),
.depthClampEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pRasterizationState) + lean_ctor_num_objs(some_i_pCreateInfos_pRasterizationState)) + 36),
.rasterizerDiscardEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pRasterizationState) + lean_ctor_num_objs(some_i_pCreateInfos_pRasterizationState)) + 40),
.polygonMode = (VkPolygonMode)lean_unbox_uint32(some_i_pCreateInfos_pRasterizationState_polygonMode),
.cullMode = (VkCullModeFlags)(VkCullModeFlagBits)lean_unbox_uint32(some_i_pCreateInfos_pRasterizationState_cullMode),
.frontFace = (VkFrontFace)lean_unbox_uint32(some_i_pCreateInfos_pRasterizationState_frontFace),
.depthBiasEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pRasterizationState) + lean_ctor_num_objs(some_i_pCreateInfos_pRasterizationState)) + 44),
.depthBiasConstantFactor = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pRasterizationState) + lean_ctor_num_objs(some_i_pCreateInfos_pRasterizationState)) + 0),
.depthBiasClamp = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pRasterizationState) + lean_ctor_num_objs(some_i_pCreateInfos_pRasterizationState)) + 8),
.depthBiasSlopeFactor = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pRasterizationState) + lean_ctor_num_objs(some_i_pCreateInfos_pRasterizationState)) + 16),
.lineWidth = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pRasterizationState) + lean_ctor_num_objs(some_i_pCreateInfos_pRasterizationState)) + 24),
};
um_i_pCreateInfos_pRasterizationState = um_some_i_pCreateInfos_pRasterizationState;
}
lean_object *i_pCreateInfos_pMultisampleState = lean_ctor_get(i_pCreateInfos, 7);
_Bool is_some_i_pCreateInfos_pMultisampleState = !lean_is_scalar(i_pCreateInfos_pMultisampleState);
VkPipelineMultisampleStateCreateInfo um_i_pCreateInfos_pMultisampleState;
if (is_some_i_pCreateInfos_pMultisampleState) {
lean_object *some_i_pCreateInfos_pMultisampleState = lean_ctor_get(i_pCreateInfos_pMultisampleState, 0);
lean_object *some_i_pCreateInfos_pMultisampleState_rasterizationSamples = lean_ctor_get(some_i_pCreateInfos_pMultisampleState, 0);
struct VkPipelineMultisampleStateCreateInfo um_some_i_pCreateInfos_pMultisampleState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineMultisampleStateCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pMultisampleState) + lean_ctor_num_objs(some_i_pCreateInfos_pMultisampleState)) + 8),
.rasterizationSamples = (VkSampleCountFlagBits)lean_unbox_uint32(some_i_pCreateInfos_pMultisampleState_rasterizationSamples),
.sampleShadingEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pMultisampleState) + lean_ctor_num_objs(some_i_pCreateInfos_pMultisampleState)) + 12),
.minSampleShading = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pMultisampleState) + lean_ctor_num_objs(some_i_pCreateInfos_pMultisampleState)) + 0),
.pSampleMask = NULL,
.alphaToCoverageEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pMultisampleState) + lean_ctor_num_objs(some_i_pCreateInfos_pMultisampleState)) + 16),
.alphaToOneEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pMultisampleState) + lean_ctor_num_objs(some_i_pCreateInfos_pMultisampleState)) + 20),
};
um_i_pCreateInfos_pMultisampleState = um_some_i_pCreateInfos_pMultisampleState;
}
lean_object *i_pCreateInfos_pDepthStencilState = lean_ctor_get(i_pCreateInfos, 8);
_Bool is_some_i_pCreateInfos_pDepthStencilState = !lean_is_scalar(i_pCreateInfos_pDepthStencilState);
VkPipelineDepthStencilStateCreateInfo um_i_pCreateInfos_pDepthStencilState;
if (is_some_i_pCreateInfos_pDepthStencilState) {
lean_object *some_i_pCreateInfos_pDepthStencilState = lean_ctor_get(i_pCreateInfos_pDepthStencilState, 0);
lean_object *some_i_pCreateInfos_pDepthStencilState_flags = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState, 0);
lean_object *some_i_pCreateInfos_pDepthStencilState_depthCompareOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState, 1);
lean_object *some_i_pCreateInfos_pDepthStencilState_front = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState, 2);
lean_object *some_i_pCreateInfos_pDepthStencilState_front_failOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState_front, 0);
lean_object *some_i_pCreateInfos_pDepthStencilState_front_passOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState_front, 1);
lean_object *some_i_pCreateInfos_pDepthStencilState_front_depthFailOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState_front, 2);
lean_object *some_i_pCreateInfos_pDepthStencilState_front_compareOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState_front, 3);
struct VkStencilOpState um_some_i_pCreateInfos_pDepthStencilState_front = {
.failOp = (VkStencilOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_front_failOp),
.passOp = (VkStencilOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_front_passOp),
.depthFailOp = (VkStencilOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_front_depthFailOp),
.compareOp = (VkCompareOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_front_compareOp),
.compareMask = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState_front) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState_front)) + 0),
.writeMask = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState_front) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState_front)) + 4),
.reference = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState_front) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState_front)) + 8),
};
lean_object *some_i_pCreateInfos_pDepthStencilState_back = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState, 3);
lean_object *some_i_pCreateInfos_pDepthStencilState_back_failOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState_back, 0);
lean_object *some_i_pCreateInfos_pDepthStencilState_back_passOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState_back, 1);
lean_object *some_i_pCreateInfos_pDepthStencilState_back_depthFailOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState_back, 2);
lean_object *some_i_pCreateInfos_pDepthStencilState_back_compareOp = lean_ctor_get(some_i_pCreateInfos_pDepthStencilState_back, 3);
struct VkStencilOpState um_some_i_pCreateInfos_pDepthStencilState_back = {
.failOp = (VkStencilOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_back_failOp),
.passOp = (VkStencilOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_back_passOp),
.depthFailOp = (VkStencilOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_back_depthFailOp),
.compareOp = (VkCompareOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_back_compareOp),
.compareMask = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState_back) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState_back)) + 0),
.writeMask = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState_back) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState_back)) + 4),
.reference = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState_back) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState_back)) + 8),
};
struct VkPipelineDepthStencilStateCreateInfo um_some_i_pCreateInfos_pDepthStencilState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineDepthStencilStateCreateFlags)(VkPipelineDepthStencilStateCreateFlagBits)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_flags),
.depthTestEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState)) + 16),
.depthWriteEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState)) + 20),
.depthCompareOp = (VkCompareOp)lean_unbox_uint32(some_i_pCreateInfos_pDepthStencilState_depthCompareOp),
.depthBoundsTestEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState)) + 24),
.stencilTestEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState)) + 28),
.front = um_some_i_pCreateInfos_pDepthStencilState_front,
.back = um_some_i_pCreateInfos_pDepthStencilState_back,
.minDepthBounds = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState)) + 0),
.maxDepthBounds = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDepthStencilState) + lean_ctor_num_objs(some_i_pCreateInfos_pDepthStencilState)) + 8),
};
um_i_pCreateInfos_pDepthStencilState = um_some_i_pCreateInfos_pDepthStencilState;
}
lean_object *i_pCreateInfos_pColorBlendState = lean_ctor_get(i_pCreateInfos, 9);
_Bool is_some_i_pCreateInfos_pColorBlendState = !lean_is_scalar(i_pCreateInfos_pColorBlendState);
VkPipelineColorBlendStateCreateInfo um_i_pCreateInfos_pColorBlendState;
if (is_some_i_pCreateInfos_pColorBlendState) {
lean_object *some_i_pCreateInfos_pColorBlendState = lean_ctor_get(i_pCreateInfos_pColorBlendState, 0);
lean_object *some_i_pCreateInfos_pColorBlendState_flags = lean_ctor_get(some_i_pCreateInfos_pColorBlendState, 0);
lean_object *some_i_pCreateInfos_pColorBlendState_logicOp = lean_ctor_get(some_i_pCreateInfos_pColorBlendState, 1);
lean_object *some_i_pCreateInfos_pColorBlendState_pAttachments = lean_ctor_get(some_i_pCreateInfos_pColorBlendState, 2);
size_t len_some_i_pCreateInfos_pColorBlendState_pAttachments = lean_array_size(some_i_pCreateInfos_pColorBlendState_pAttachments);
VkPipelineColorBlendAttachmentState* um_some_i_pCreateInfos_pColorBlendState_pAttachments = calloc(len_some_i_pCreateInfos_pColorBlendState_pAttachments, sizeof(VkPipelineColorBlendAttachmentState));
for (size_t i = 0; i < len_some_i_pCreateInfos_pColorBlendState_pAttachments; ++i) {
lean_object *i_some_i_pCreateInfos_pColorBlendState_pAttachments = lean_array_cptr(some_i_pCreateInfos_pColorBlendState_pAttachments)[i];
lean_object *i_some_i_pCreateInfos_pColorBlendState_pAttachments_srcColorBlendFactor = lean_ctor_get(i_some_i_pCreateInfos_pColorBlendState_pAttachments, 0);
lean_object *i_some_i_pCreateInfos_pColorBlendState_pAttachments_dstColorBlendFactor = lean_ctor_get(i_some_i_pCreateInfos_pColorBlendState_pAttachments, 1);
lean_object *i_some_i_pCreateInfos_pColorBlendState_pAttachments_colorBlendOp = lean_ctor_get(i_some_i_pCreateInfos_pColorBlendState_pAttachments, 2);
lean_object *i_some_i_pCreateInfos_pColorBlendState_pAttachments_srcAlphaBlendFactor = lean_ctor_get(i_some_i_pCreateInfos_pColorBlendState_pAttachments, 3);
lean_object *i_some_i_pCreateInfos_pColorBlendState_pAttachments_dstAlphaBlendFactor = lean_ctor_get(i_some_i_pCreateInfos_pColorBlendState_pAttachments, 4);
lean_object *i_some_i_pCreateInfos_pColorBlendState_pAttachments_alphaBlendOp = lean_ctor_get(i_some_i_pCreateInfos_pColorBlendState_pAttachments, 5);
lean_object *i_some_i_pCreateInfos_pColorBlendState_pAttachments_colorWriteMask = lean_ctor_get(i_some_i_pCreateInfos_pColorBlendState_pAttachments, 6);
struct VkPipelineColorBlendAttachmentState um_i_some_i_pCreateInfos_pColorBlendState_pAttachments = {
.blendEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_pColorBlendState_pAttachments) + lean_ctor_num_objs(i_some_i_pCreateInfos_pColorBlendState_pAttachments)) + 0),
.srcColorBlendFactor = (VkBlendFactor)lean_unbox_uint32(i_some_i_pCreateInfos_pColorBlendState_pAttachments_srcColorBlendFactor),
.dstColorBlendFactor = (VkBlendFactor)lean_unbox_uint32(i_some_i_pCreateInfos_pColorBlendState_pAttachments_dstColorBlendFactor),
.colorBlendOp = (VkBlendOp)lean_unbox_uint32(i_some_i_pCreateInfos_pColorBlendState_pAttachments_colorBlendOp),
.srcAlphaBlendFactor = (VkBlendFactor)lean_unbox_uint32(i_some_i_pCreateInfos_pColorBlendState_pAttachments_srcAlphaBlendFactor),
.dstAlphaBlendFactor = (VkBlendFactor)lean_unbox_uint32(i_some_i_pCreateInfos_pColorBlendState_pAttachments_dstAlphaBlendFactor),
.alphaBlendOp = (VkBlendOp)lean_unbox_uint32(i_some_i_pCreateInfos_pColorBlendState_pAttachments_alphaBlendOp),
.colorWriteMask = (VkColorComponentFlags)(VkColorComponentFlagBits)lean_unbox_uint32(i_some_i_pCreateInfos_pColorBlendState_pAttachments_colorWriteMask),
};
um_some_i_pCreateInfos_pColorBlendState_pAttachments[i] = um_i_some_i_pCreateInfos_pColorBlendState_pAttachments;
}
lean_object *some_i_pCreateInfos_pColorBlendState_blendConstants = lean_ctor_get(some_i_pCreateInfos_pColorBlendState, 3);
size_t len_some_i_pCreateInfos_pColorBlendState_blendConstants = lean_sarray_size(some_i_pCreateInfos_pColorBlendState_blendConstants);
float* um_some_i_pCreateInfos_pColorBlendState_blendConstants = calloc(len_some_i_pCreateInfos_pColorBlendState_blendConstants, sizeof(float));
for (size_t i = 0; i < len_some_i_pCreateInfos_pColorBlendState_blendConstants; ++i) {
um_some_i_pCreateInfos_pColorBlendState_blendConstants[i] = lean_float_array_uget(some_i_pCreateInfos_pColorBlendState_blendConstants, i);
}
if (len_some_i_pCreateInfos_pColorBlendState_blendConstants != 4) abort();
struct VkPipelineColorBlendStateCreateInfo um_some_i_pCreateInfos_pColorBlendState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineColorBlendStateCreateFlags)(VkPipelineColorBlendStateCreateFlagBits)lean_unbox_uint32(some_i_pCreateInfos_pColorBlendState_flags),
.logicOpEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pColorBlendState) + lean_ctor_num_objs(some_i_pCreateInfos_pColorBlendState)) + 0),
.logicOp = (VkLogicOp)lean_unbox_uint32(some_i_pCreateInfos_pColorBlendState_logicOp),
.attachmentCount = len_some_i_pCreateInfos_pColorBlendState_pAttachments,
.pAttachments = um_some_i_pCreateInfos_pColorBlendState_pAttachments,
.blendConstants = {um_some_i_pCreateInfos_pColorBlendState_blendConstants[0],um_some_i_pCreateInfos_pColorBlendState_blendConstants[1],um_some_i_pCreateInfos_pColorBlendState_blendConstants[2],um_some_i_pCreateInfos_pColorBlendState_blendConstants[3],},
};
um_i_pCreateInfos_pColorBlendState = um_some_i_pCreateInfos_pColorBlendState;
}
lean_object *i_pCreateInfos_pDynamicState = lean_ctor_get(i_pCreateInfos, 10);
_Bool is_some_i_pCreateInfos_pDynamicState = !lean_is_scalar(i_pCreateInfos_pDynamicState);
VkPipelineDynamicStateCreateInfo um_i_pCreateInfos_pDynamicState;
if (is_some_i_pCreateInfos_pDynamicState) {
lean_object *some_i_pCreateInfos_pDynamicState = lean_ctor_get(i_pCreateInfos_pDynamicState, 0);
lean_object *some_i_pCreateInfos_pDynamicState_pDynamicStates = lean_ctor_get(some_i_pCreateInfos_pDynamicState, 0);
size_t len_some_i_pCreateInfos_pDynamicState_pDynamicStates = lean_array_size(some_i_pCreateInfos_pDynamicState_pDynamicStates);
VkDynamicState* um_some_i_pCreateInfos_pDynamicState_pDynamicStates = calloc(len_some_i_pCreateInfos_pDynamicState_pDynamicStates, sizeof(VkDynamicState));
for (size_t i = 0; i < len_some_i_pCreateInfos_pDynamicState_pDynamicStates; ++i) {
lean_object *i_some_i_pCreateInfos_pDynamicState_pDynamicStates = lean_array_cptr(some_i_pCreateInfos_pDynamicState_pDynamicStates)[i];
um_some_i_pCreateInfos_pDynamicState_pDynamicStates[i] = (VkDynamicState)lean_unbox_uint32(i_some_i_pCreateInfos_pDynamicState_pDynamicStates);
}
struct VkPipelineDynamicStateCreateInfo um_some_i_pCreateInfos_pDynamicState = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineDynamicStateCreateFlags)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfos_pDynamicState) + lean_ctor_num_objs(some_i_pCreateInfos_pDynamicState)) + 0),
.dynamicStateCount = len_some_i_pCreateInfos_pDynamicState_pDynamicStates,
.pDynamicStates = um_some_i_pCreateInfos_pDynamicState_pDynamicStates,
};
um_i_pCreateInfos_pDynamicState = um_some_i_pCreateInfos_pDynamicState;
}
lean_object *i_pCreateInfos_layout = lean_ctor_get(i_pCreateInfos, 11);
lean_object *i_pCreateInfos_renderPass = lean_ctor_get(i_pCreateInfos, 12);
struct VkGraphicsPipelineCreateInfo um_i_pCreateInfos = {
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineCreateFlags)(VkPipelineCreateFlagBits)lean_unbox_uint32(i_pCreateInfos_flags),
.stageCount = len_i_pCreateInfos_pStages,
.pStages = um_i_pCreateInfos_pStages,
.pVertexInputState = (is_some_i_pCreateInfos_pVertexInputState ? &um_i_pCreateInfos_pVertexInputState : NULL),
.pInputAssemblyState = (is_some_i_pCreateInfos_pInputAssemblyState ? &um_i_pCreateInfos_pInputAssemblyState : NULL),
.pTessellationState = (is_some_i_pCreateInfos_pTessellationState ? &um_i_pCreateInfos_pTessellationState : NULL),
.pViewportState = (is_some_i_pCreateInfos_pViewportState ? &um_i_pCreateInfos_pViewportState : NULL),
.pRasterizationState = (is_some_i_pCreateInfos_pRasterizationState ? &um_i_pCreateInfos_pRasterizationState : NULL),
.pMultisampleState = (is_some_i_pCreateInfos_pMultisampleState ? &um_i_pCreateInfos_pMultisampleState : NULL),
.pDepthStencilState = (is_some_i_pCreateInfos_pDepthStencilState ? &um_i_pCreateInfos_pDepthStencilState : NULL),
.pColorBlendState = (is_some_i_pCreateInfos_pColorBlendState ? &um_i_pCreateInfos_pColorBlendState : NULL),
.pDynamicState = (is_some_i_pCreateInfos_pDynamicState ? &um_i_pCreateInfos_pDynamicState : NULL),
.layout = (VkPipelineLayout)lean_unbox_uint64(i_pCreateInfos_layout),
.renderPass = (VkRenderPass)lean_unbox_uint64(i_pCreateInfos_renderPass),
.subpass = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfos) + lean_ctor_num_objs(i_pCreateInfos)) + 0),
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = -1,
};
um_pCreateInfos[i] = um_i_pCreateInfos;
}
VkPipeline* out_pPipelines;
out_pPipelines = calloc(len_pCreateInfos, sizeof(VkPipeline));
uint32_t len_out_pPipelines = len_pCreateInfos;
VkResult out_ret = vkCreateGraphicsPipelines((VkDevice)lean_unbox_uint64(device), VK_NULL_HANDLE, len_pCreateInfos, um_pCreateInfos, NULL, out_pPipelines);
lean_object *m_out_pPipelines = lean_alloc_array(len_out_pPipelines, len_out_pPipelines);
for (size_t i = 0; i < len_out_pPipelines; ++i) {
VkPipeline i_out_pPipelines = out_pPipelines[i];
lean_array_cptr(m_out_pPipelines)[i] = lean_box_uint64((uint64_t)i_out_pPipelines);
}
lean_object *temp, *tuple = m_out_pPipelines;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCreateComputePipelines(b_lean_obj_arg device, b_lean_obj_arg pipelineCache, b_lean_obj_arg pCreateInfos, b_lean_obj_arg w) {
size_t len_pCreateInfos = lean_array_size(pCreateInfos);
VkComputePipelineCreateInfo* um_pCreateInfos = calloc(len_pCreateInfos, sizeof(VkComputePipelineCreateInfo));
for (size_t i = 0; i < len_pCreateInfos; ++i) {
lean_object *i_pCreateInfos = lean_array_cptr(pCreateInfos)[i];
lean_object *i_pCreateInfos_flags = lean_ctor_get(i_pCreateInfos, 0);
lean_object *i_pCreateInfos_stage = lean_ctor_get(i_pCreateInfos, 1);
lean_object *i_pCreateInfos_stage_flags = lean_ctor_get(i_pCreateInfos_stage, 0);
lean_object *i_pCreateInfos_stage_stage = lean_ctor_get(i_pCreateInfos_stage, 1);
lean_object *i_pCreateInfos_stage_module = lean_ctor_get(i_pCreateInfos_stage, 2);
lean_object *i_pCreateInfos_stage_pName = lean_ctor_get(i_pCreateInfos_stage, 3);
lean_object *i_pCreateInfos_stage_pSpecializationInfo = lean_ctor_get(i_pCreateInfos_stage, 4);
_Bool is_some_i_pCreateInfos_stage_pSpecializationInfo = !lean_is_scalar(i_pCreateInfos_stage_pSpecializationInfo);
VkSpecializationInfo um_i_pCreateInfos_stage_pSpecializationInfo;
if (is_some_i_pCreateInfos_stage_pSpecializationInfo) {
lean_object *some_i_pCreateInfos_stage_pSpecializationInfo = lean_ctor_get(i_pCreateInfos_stage_pSpecializationInfo, 0);
lean_object *some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries = lean_ctor_get(some_i_pCreateInfos_stage_pSpecializationInfo, 0);
size_t len_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries = lean_array_size(some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries);
VkSpecializationMapEntry* um_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries = calloc(len_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries, sizeof(VkSpecializationMapEntry));
for (size_t i = 0; i < len_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries; ++i) {
lean_object *i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries = lean_array_cptr(some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries)[i];
struct VkSpecializationMapEntry um_i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries = {
.constantID = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries) + lean_ctor_num_objs(i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries)) + 8),
.offset = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries) + lean_ctor_num_objs(i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries)) + 12),
.size = (uint64_t)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries) + lean_ctor_num_objs(i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries)) + 0),
};
um_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries[i] = um_i_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries;
}
lean_object *some_i_pCreateInfos_stage_pSpecializationInfo_pData = lean_ctor_get(some_i_pCreateInfos_stage_pSpecializationInfo, 1);
size_t len_some_i_pCreateInfos_stage_pSpecializationInfo_pData = lean_sarray_size(some_i_pCreateInfos_stage_pSpecializationInfo_pData);
void *um_some_i_pCreateInfos_stage_pSpecializationInfo_pData = lean_sarray_cptr(some_i_pCreateInfos_stage_pSpecializationInfo_pData);
struct VkSpecializationInfo um_some_i_pCreateInfos_stage_pSpecializationInfo = {
.mapEntryCount = len_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries,
.pMapEntries = um_some_i_pCreateInfos_stage_pSpecializationInfo_pMapEntries,
.dataSize = len_some_i_pCreateInfos_stage_pSpecializationInfo_pData,
.pData = um_some_i_pCreateInfos_stage_pSpecializationInfo_pData,
};
um_i_pCreateInfos_stage_pSpecializationInfo = um_some_i_pCreateInfos_stage_pSpecializationInfo;
}
struct VkPipelineShaderStageCreateInfo um_i_pCreateInfos_stage = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineShaderStageCreateFlags)(VkPipelineShaderStageCreateFlagBits)lean_unbox_uint32(i_pCreateInfos_stage_flags),
.stage = (VkShaderStageFlagBits)lean_unbox_uint32(i_pCreateInfos_stage_stage),
.module = (VkShaderModule)lean_unbox_uint64(i_pCreateInfos_stage_module),
.pName = (char*)lean_string_cstr(i_pCreateInfos_stage_pName),
.pSpecializationInfo = (is_some_i_pCreateInfos_stage_pSpecializationInfo ? &um_i_pCreateInfos_stage_pSpecializationInfo : NULL),
};
lean_object *i_pCreateInfos_layout = lean_ctor_get(i_pCreateInfos, 2);
lean_object *i_pCreateInfos_basePipelineHandle = lean_ctor_get(i_pCreateInfos, 3);
struct VkComputePipelineCreateInfo um_i_pCreateInfos = {
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineCreateFlags)(VkPipelineCreateFlagBits)lean_unbox_uint32(i_pCreateInfos_flags),
.stage = um_i_pCreateInfos_stage,
.layout = (VkPipelineLayout)lean_unbox_uint64(i_pCreateInfos_layout),
.basePipelineHandle = (VkPipeline)lean_unbox_uint64(i_pCreateInfos_basePipelineHandle),
.basePipelineIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfos) + lean_ctor_num_objs(i_pCreateInfos)) + 0),
};
um_pCreateInfos[i] = um_i_pCreateInfos;
}
VkPipeline* out_pPipelines;
uint32_t len_out_pPipelines = len_pCreateInfos;
VkResult out_ret = vkCreateComputePipelines((VkDevice)lean_unbox_uint64(device), (VkPipelineCache)lean_unbox_uint64(pipelineCache), len_pCreateInfos, um_pCreateInfos, NULL, out_pPipelines);
lean_object *m_out_pPipelines = lean_alloc_array(len_out_pPipelines, len_out_pPipelines);
for (size_t i = 0; i < len_out_pPipelines; ++i) {
VkPipeline i_out_pPipelines = out_pPipelines[i];
lean_array_cptr(m_out_pPipelines)[i] = lean_box_uint64((uint64_t)i_out_pPipelines);
}
lean_object *temp, *tuple = m_out_pPipelines;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyPipeline(b_lean_obj_arg device, b_lean_obj_arg pipeline, b_lean_obj_arg w) {
vkDestroyPipeline((VkDevice)lean_unbox_uint64(device), (VkPipeline)lean_unbox_uint64(pipeline), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreatePipelineLayout(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_pSetLayouts = lean_ctor_get(pCreateInfo, 1);
size_t len_pCreateInfo_pSetLayouts = lean_array_size(pCreateInfo_pSetLayouts);
VkDescriptorSetLayout* um_pCreateInfo_pSetLayouts = calloc(len_pCreateInfo_pSetLayouts, sizeof(VkDescriptorSetLayout));
for (size_t i = 0; i < len_pCreateInfo_pSetLayouts; ++i) {
lean_object *i_pCreateInfo_pSetLayouts = lean_array_cptr(pCreateInfo_pSetLayouts)[i];
um_pCreateInfo_pSetLayouts[i] = (VkDescriptorSetLayout)lean_unbox_uint64(i_pCreateInfo_pSetLayouts);
}
lean_object *pCreateInfo_pPushConstantRanges = lean_ctor_get(pCreateInfo, 2);
size_t len_pCreateInfo_pPushConstantRanges = lean_array_size(pCreateInfo_pPushConstantRanges);
VkPushConstantRange* um_pCreateInfo_pPushConstantRanges = calloc(len_pCreateInfo_pPushConstantRanges, sizeof(VkPushConstantRange));
for (size_t i = 0; i < len_pCreateInfo_pPushConstantRanges; ++i) {
lean_object *i_pCreateInfo_pPushConstantRanges = lean_array_cptr(pCreateInfo_pPushConstantRanges)[i];
lean_object *i_pCreateInfo_pPushConstantRanges_stageFlags = lean_ctor_get(i_pCreateInfo_pPushConstantRanges, 0);
struct VkPushConstantRange um_i_pCreateInfo_pPushConstantRanges = {
.stageFlags = (VkShaderStageFlags)(VkShaderStageFlagBits)lean_unbox_uint32(i_pCreateInfo_pPushConstantRanges_stageFlags),
.offset = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfo_pPushConstantRanges) + lean_ctor_num_objs(i_pCreateInfo_pPushConstantRanges)) + 0),
.size = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfo_pPushConstantRanges) + lean_ctor_num_objs(i_pCreateInfo_pPushConstantRanges)) + 4),
};
um_pCreateInfo_pPushConstantRanges[i] = um_i_pCreateInfo_pPushConstantRanges;
}
struct VkPipelineLayoutCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.pNext = 0,
.flags = (VkPipelineLayoutCreateFlags)(VkPipelineLayoutCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.setLayoutCount = len_pCreateInfo_pSetLayouts,
.pSetLayouts = um_pCreateInfo_pSetLayouts,
.pushConstantRangeCount = len_pCreateInfo_pPushConstantRanges,
.pPushConstantRanges = um_pCreateInfo_pPushConstantRanges,
};
VkPipelineLayout out_pPipelineLayout;
VkResult out_ret = vkCreatePipelineLayout((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pPipelineLayout);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pPipelineLayout);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyPipelineLayout(b_lean_obj_arg device, b_lean_obj_arg pipelineLayout, b_lean_obj_arg w) {
vkDestroyPipelineLayout((VkDevice)lean_unbox_uint64(device), (VkPipelineLayout)lean_unbox_uint64(pipelineLayout), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateSampler(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_magFilter = lean_ctor_get(pCreateInfo, 1);
lean_object *pCreateInfo_minFilter = lean_ctor_get(pCreateInfo, 2);
lean_object *pCreateInfo_mipmapMode = lean_ctor_get(pCreateInfo, 3);
lean_object *pCreateInfo_addressModeU = lean_ctor_get(pCreateInfo, 4);
lean_object *pCreateInfo_addressModeV = lean_ctor_get(pCreateInfo, 5);
lean_object *pCreateInfo_addressModeW = lean_ctor_get(pCreateInfo, 6);
lean_object *pCreateInfo_compareOp = lean_ctor_get(pCreateInfo, 7);
lean_object *pCreateInfo_borderColor = lean_ctor_get(pCreateInfo, 8);
struct VkSamplerCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = 0,
.flags = (VkSamplerCreateFlags)(VkSamplerCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.magFilter = (VkFilter)lean_unbox_uint32(pCreateInfo_magFilter),
.minFilter = (VkFilter)lean_unbox_uint32(pCreateInfo_minFilter),
.mipmapMode = (VkSamplerMipmapMode)lean_unbox_uint32(pCreateInfo_mipmapMode),
.addressModeU = (VkSamplerAddressMode)lean_unbox_uint32(pCreateInfo_addressModeU),
.addressModeV = (VkSamplerAddressMode)lean_unbox_uint32(pCreateInfo_addressModeV),
.addressModeW = (VkSamplerAddressMode)lean_unbox_uint32(pCreateInfo_addressModeW),
.mipLodBias = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.anisotropyEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 32),
.maxAnisotropy = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 8),
.compareEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 36),
.compareOp = (VkCompareOp)lean_unbox_uint32(pCreateInfo_compareOp),
.minLod = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 16),
.maxLod = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 24),
.borderColor = (VkBorderColor)lean_unbox_uint32(pCreateInfo_borderColor),
.unnormalizedCoordinates = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 40),
};
VkSampler out_pSampler;
VkResult out_ret = vkCreateSampler((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pSampler);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pSampler);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroySampler(b_lean_obj_arg device, b_lean_obj_arg sampler, b_lean_obj_arg w) {
vkDestroySampler((VkDevice)lean_unbox_uint64(device), (VkSampler)lean_unbox_uint64(sampler), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateDescriptorSetLayout(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_pBindings = lean_ctor_get(pCreateInfo, 1);
size_t len_pCreateInfo_pBindings = lean_array_size(pCreateInfo_pBindings);
VkDescriptorSetLayoutBinding* um_pCreateInfo_pBindings = calloc(len_pCreateInfo_pBindings, sizeof(VkDescriptorSetLayoutBinding));
for (size_t i = 0; i < len_pCreateInfo_pBindings; ++i) {
lean_object *i_pCreateInfo_pBindings = lean_array_cptr(pCreateInfo_pBindings)[i];
lean_object *i_pCreateInfo_pBindings_descriptorType = lean_ctor_get(i_pCreateInfo_pBindings, 0);
lean_object *i_pCreateInfo_pBindings_stageFlags = lean_ctor_get(i_pCreateInfo_pBindings, 1);
lean_object *i_pCreateInfo_pBindings_pImmutableSamplers = lean_ctor_get(i_pCreateInfo_pBindings, 2);
size_t len_i_pCreateInfo_pBindings_pImmutableSamplers = lean_array_size(i_pCreateInfo_pBindings_pImmutableSamplers);
VkSampler* um_i_pCreateInfo_pBindings_pImmutableSamplers = calloc(len_i_pCreateInfo_pBindings_pImmutableSamplers, sizeof(VkSampler));
for (size_t i = 0; i < len_i_pCreateInfo_pBindings_pImmutableSamplers; ++i) {
lean_object *i_i_pCreateInfo_pBindings_pImmutableSamplers = lean_array_cptr(i_pCreateInfo_pBindings_pImmutableSamplers)[i];
um_i_pCreateInfo_pBindings_pImmutableSamplers[i] = (VkSampler)lean_unbox_uint64(i_i_pCreateInfo_pBindings_pImmutableSamplers);
}
struct VkDescriptorSetLayoutBinding um_i_pCreateInfo_pBindings = {
.binding = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfo_pBindings) + lean_ctor_num_objs(i_pCreateInfo_pBindings)) + 0),
.descriptorType = (VkDescriptorType)lean_unbox_uint32(i_pCreateInfo_pBindings_descriptorType),
.descriptorCount = len_i_pCreateInfo_pBindings_pImmutableSamplers,
.stageFlags = (VkShaderStageFlags)(VkShaderStageFlagBits)lean_unbox_uint32(i_pCreateInfo_pBindings_stageFlags),
.pImmutableSamplers = um_i_pCreateInfo_pBindings_pImmutableSamplers,
};
um_pCreateInfo_pBindings[i] = um_i_pCreateInfo_pBindings;
}
struct VkDescriptorSetLayoutCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = 0,
.flags = (VkDescriptorSetLayoutCreateFlags)(VkDescriptorSetLayoutCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.bindingCount = len_pCreateInfo_pBindings,
.pBindings = um_pCreateInfo_pBindings,
};
VkDescriptorSetLayout out_pSetLayout;
VkResult out_ret = vkCreateDescriptorSetLayout((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pSetLayout);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pSetLayout);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyDescriptorSetLayout(b_lean_obj_arg device, b_lean_obj_arg descriptorSetLayout, b_lean_obj_arg w) {
vkDestroyDescriptorSetLayout((VkDevice)lean_unbox_uint64(device), (VkDescriptorSetLayout)lean_unbox_uint64(descriptorSetLayout), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateDescriptorPool(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_pPoolSizes = lean_ctor_get(pCreateInfo, 1);
size_t len_pCreateInfo_pPoolSizes = lean_array_size(pCreateInfo_pPoolSizes);
VkDescriptorPoolSize* um_pCreateInfo_pPoolSizes = calloc(len_pCreateInfo_pPoolSizes, sizeof(VkDescriptorPoolSize));
for (size_t i = 0; i < len_pCreateInfo_pPoolSizes; ++i) {
lean_object *i_pCreateInfo_pPoolSizes = lean_array_cptr(pCreateInfo_pPoolSizes)[i];
lean_object *i_pCreateInfo_pPoolSizes_type = lean_ctor_get(i_pCreateInfo_pPoolSizes, 0);
struct VkDescriptorPoolSize um_i_pCreateInfo_pPoolSizes = {
.type = (VkDescriptorType)lean_unbox_uint32(i_pCreateInfo_pPoolSizes_type),
.descriptorCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfo_pPoolSizes) + lean_ctor_num_objs(i_pCreateInfo_pPoolSizes)) + 0),
};
um_pCreateInfo_pPoolSizes[i] = um_i_pCreateInfo_pPoolSizes;
}
struct VkDescriptorPoolCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = 0,
.flags = (VkDescriptorPoolCreateFlags)(VkDescriptorPoolCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.maxSets = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.poolSizeCount = len_pCreateInfo_pPoolSizes,
.pPoolSizes = um_pCreateInfo_pPoolSizes,
};
VkDescriptorPool out_pDescriptorPool;
VkResult out_ret = vkCreateDescriptorPool((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pDescriptorPool);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pDescriptorPool);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyDescriptorPool(b_lean_obj_arg device, b_lean_obj_arg descriptorPool, b_lean_obj_arg w) {
vkDestroyDescriptorPool((VkDevice)lean_unbox_uint64(device), (VkDescriptorPool)lean_unbox_uint64(descriptorPool), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkResetDescriptorPool(b_lean_obj_arg device, b_lean_obj_arg descriptorPool, VkDescriptorPoolResetFlags flags, b_lean_obj_arg w) {
VkResult out_ret = vkResetDescriptorPool((VkDevice)lean_unbox_uint64(device), (VkDescriptorPool)lean_unbox_uint64(descriptorPool), flags);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkAllocateDescriptorSets(b_lean_obj_arg device, b_lean_obj_arg pAllocateInfo, b_lean_obj_arg w) {
lean_object *pAllocateInfo_descriptorPool = lean_ctor_get(pAllocateInfo, 0);
lean_object *pAllocateInfo_pSetLayouts = lean_ctor_get(pAllocateInfo, 1);
size_t len_pAllocateInfo_pSetLayouts = lean_array_size(pAllocateInfo_pSetLayouts);
VkDescriptorSetLayout* um_pAllocateInfo_pSetLayouts = calloc(len_pAllocateInfo_pSetLayouts, sizeof(VkDescriptorSetLayout));
for (size_t i = 0; i < len_pAllocateInfo_pSetLayouts; ++i) {
lean_object *i_pAllocateInfo_pSetLayouts = lean_array_cptr(pAllocateInfo_pSetLayouts)[i];
um_pAllocateInfo_pSetLayouts[i] = (VkDescriptorSetLayout)lean_unbox_uint64(i_pAllocateInfo_pSetLayouts);
}
struct VkDescriptorSetAllocateInfo um_pAllocateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = 0,
.descriptorPool = (VkDescriptorPool)lean_unbox_uint64(pAllocateInfo_descriptorPool),
.descriptorSetCount = len_pAllocateInfo_pSetLayouts,
.pSetLayouts = um_pAllocateInfo_pSetLayouts,
};
VkDescriptorSet* out_pDescriptorSets;
uint32_t len_out_pDescriptorSets = um_pAllocateInfo.descriptorSetCount;
VkResult out_ret = vkAllocateDescriptorSets((VkDevice)lean_unbox_uint64(device), &um_pAllocateInfo, out_pDescriptorSets);
lean_object *m_out_pDescriptorSets = lean_alloc_array(len_out_pDescriptorSets, len_out_pDescriptorSets);
for (size_t i = 0; i < len_out_pDescriptorSets; ++i) {
VkDescriptorSet i_out_pDescriptorSets = out_pDescriptorSets[i];
lean_array_cptr(m_out_pDescriptorSets)[i] = lean_box_uint64((uint64_t)i_out_pDescriptorSets);
}
lean_object *temp, *tuple = m_out_pDescriptorSets;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkFreeDescriptorSets(b_lean_obj_arg device, b_lean_obj_arg descriptorPool, b_lean_obj_arg pDescriptorSets, b_lean_obj_arg w) {
size_t len_pDescriptorSets = lean_array_size(pDescriptorSets);
VkDescriptorSet* um_pDescriptorSets = calloc(len_pDescriptorSets, sizeof(VkDescriptorSet));
for (size_t i = 0; i < len_pDescriptorSets; ++i) {
lean_object *i_pDescriptorSets = lean_array_cptr(pDescriptorSets)[i];
um_pDescriptorSets[i] = (VkDescriptorSet)lean_unbox_uint64(i_pDescriptorSets);
}
VkResult out_ret = vkFreeDescriptorSets((VkDevice)lean_unbox_uint64(device), (VkDescriptorPool)lean_unbox_uint64(descriptorPool), len_pDescriptorSets, um_pDescriptorSets);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkUpdateDescriptorSets(b_lean_obj_arg device, b_lean_obj_arg pDescriptorWrites, b_lean_obj_arg pDescriptorCopies, b_lean_obj_arg w) {
size_t len_pDescriptorWrites = lean_array_size(pDescriptorWrites);
VkWriteDescriptorSet* um_pDescriptorWrites = calloc(len_pDescriptorWrites, sizeof(VkWriteDescriptorSet));
for (size_t i = 0; i < len_pDescriptorWrites; ++i) {
lean_object *i_pDescriptorWrites = lean_array_cptr(pDescriptorWrites)[i];
lean_object *i_pDescriptorWrites_dstSet = lean_ctor_get(i_pDescriptorWrites, 0);
lean_object *i_pDescriptorWrites_descriptorType = lean_ctor_get(i_pDescriptorWrites, 1);
lean_object *i_pDescriptorWrites_pImageInfo = lean_ctor_get(i_pDescriptorWrites, 2);
size_t len_i_pDescriptorWrites_pImageInfo = lean_array_size(i_pDescriptorWrites_pImageInfo);
VkDescriptorImageInfo* um_i_pDescriptorWrites_pImageInfo = calloc(len_i_pDescriptorWrites_pImageInfo, sizeof(VkDescriptorImageInfo));
for (size_t i = 0; i < len_i_pDescriptorWrites_pImageInfo; ++i) {
lean_object *i_i_pDescriptorWrites_pImageInfo = lean_array_cptr(i_pDescriptorWrites_pImageInfo)[i];
lean_object *i_i_pDescriptorWrites_pImageInfo_sampler = lean_ctor_get(i_i_pDescriptorWrites_pImageInfo, 0);
lean_object *i_i_pDescriptorWrites_pImageInfo_imageView = lean_ctor_get(i_i_pDescriptorWrites_pImageInfo, 1);
lean_object *i_i_pDescriptorWrites_pImageInfo_imageLayout = lean_ctor_get(i_i_pDescriptorWrites_pImageInfo, 2);
struct VkDescriptorImageInfo um_i_i_pDescriptorWrites_pImageInfo = {
.sampler = (VkSampler)lean_unbox_uint64(i_i_pDescriptorWrites_pImageInfo_sampler),
.imageView = (VkImageView)lean_unbox_uint64(i_i_pDescriptorWrites_pImageInfo_imageView),
.imageLayout = (VkImageLayout)lean_unbox_uint32(i_i_pDescriptorWrites_pImageInfo_imageLayout),
};
um_i_pDescriptorWrites_pImageInfo[i] = um_i_i_pDescriptorWrites_pImageInfo;
}
lean_object *i_pDescriptorWrites_pBufferInfo = lean_ctor_get(i_pDescriptorWrites, 3);
size_t len_i_pDescriptorWrites_pBufferInfo = lean_array_size(i_pDescriptorWrites_pBufferInfo);
VkDescriptorBufferInfo* um_i_pDescriptorWrites_pBufferInfo = calloc(len_i_pDescriptorWrites_pBufferInfo, sizeof(VkDescriptorBufferInfo));
for (size_t i = 0; i < len_i_pDescriptorWrites_pBufferInfo; ++i) {
lean_object *i_i_pDescriptorWrites_pBufferInfo = lean_array_cptr(i_pDescriptorWrites_pBufferInfo)[i];
lean_object *i_i_pDescriptorWrites_pBufferInfo_buffer = lean_ctor_get(i_i_pDescriptorWrites_pBufferInfo, 0);
struct VkDescriptorBufferInfo um_i_i_pDescriptorWrites_pBufferInfo = {
.buffer = (VkBuffer)lean_unbox_uint64(i_i_pDescriptorWrites_pBufferInfo_buffer),
.offset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_pDescriptorWrites_pBufferInfo) + lean_ctor_num_objs(i_i_pDescriptorWrites_pBufferInfo)) + 0),
.range = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_pDescriptorWrites_pBufferInfo) + lean_ctor_num_objs(i_i_pDescriptorWrites_pBufferInfo)) + 8),
};
um_i_pDescriptorWrites_pBufferInfo[i] = um_i_i_pDescriptorWrites_pBufferInfo;
}
lean_object *i_pDescriptorWrites_pTexelBufferView = lean_ctor_get(i_pDescriptorWrites, 4);
size_t len_i_pDescriptorWrites_pTexelBufferView = lean_array_size(i_pDescriptorWrites_pTexelBufferView);
VkBufferView* um_i_pDescriptorWrites_pTexelBufferView = calloc(len_i_pDescriptorWrites_pTexelBufferView, sizeof(VkBufferView));
for (size_t i = 0; i < len_i_pDescriptorWrites_pTexelBufferView; ++i) {
lean_object *i_i_pDescriptorWrites_pTexelBufferView = lean_array_cptr(i_pDescriptorWrites_pTexelBufferView)[i];
um_i_pDescriptorWrites_pTexelBufferView[i] = (VkBufferView)lean_unbox_uint64(i_i_pDescriptorWrites_pTexelBufferView);
}
struct VkWriteDescriptorSet um_i_pDescriptorWrites = {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = 0,
.dstSet = (VkDescriptorSet)lean_unbox_uint64(i_pDescriptorWrites_dstSet),
.dstBinding = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pDescriptorWrites) + lean_ctor_num_objs(i_pDescriptorWrites)) + 0),
.dstArrayElement = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pDescriptorWrites) + lean_ctor_num_objs(i_pDescriptorWrites)) + 4),
.descriptorCount = len_i_pDescriptorWrites_pImageInfo,
.descriptorType = (VkDescriptorType)lean_unbox_uint32(i_pDescriptorWrites_descriptorType),
.pImageInfo = um_i_pDescriptorWrites_pImageInfo,
.pBufferInfo = um_i_pDescriptorWrites_pBufferInfo,
.pTexelBufferView = um_i_pDescriptorWrites_pTexelBufferView,
};
um_pDescriptorWrites[i] = um_i_pDescriptorWrites;
}
size_t len_pDescriptorCopies = lean_array_size(pDescriptorCopies);
VkCopyDescriptorSet* um_pDescriptorCopies = calloc(len_pDescriptorCopies, sizeof(VkCopyDescriptorSet));
for (size_t i = 0; i < len_pDescriptorCopies; ++i) {
lean_object *i_pDescriptorCopies = lean_array_cptr(pDescriptorCopies)[i];
lean_object *i_pDescriptorCopies_srcSet = lean_ctor_get(i_pDescriptorCopies, 0);
lean_object *i_pDescriptorCopies_dstSet = lean_ctor_get(i_pDescriptorCopies, 1);
struct VkCopyDescriptorSet um_i_pDescriptorCopies = {
.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET,
.pNext = 0,
.srcSet = (VkDescriptorSet)lean_unbox_uint64(i_pDescriptorCopies_srcSet),
.srcBinding = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pDescriptorCopies) + lean_ctor_num_objs(i_pDescriptorCopies)) + 0),
.srcArrayElement = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pDescriptorCopies) + lean_ctor_num_objs(i_pDescriptorCopies)) + 4),
.dstSet = (VkDescriptorSet)lean_unbox_uint64(i_pDescriptorCopies_dstSet),
.dstBinding = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pDescriptorCopies) + lean_ctor_num_objs(i_pDescriptorCopies)) + 8),
.dstArrayElement = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pDescriptorCopies) + lean_ctor_num_objs(i_pDescriptorCopies)) + 12),
.descriptorCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pDescriptorCopies) + lean_ctor_num_objs(i_pDescriptorCopies)) + 16),
};
um_pDescriptorCopies[i] = um_i_pDescriptorCopies;
}
vkUpdateDescriptorSets((VkDevice)lean_unbox_uint64(device), len_pDescriptorWrites, um_pDescriptorWrites, len_pDescriptorCopies, um_pDescriptorCopies);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateFramebuffer(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_renderPass = lean_ctor_get(pCreateInfo, 1);
lean_object *pCreateInfo_pAttachments = lean_ctor_get(pCreateInfo, 2);
size_t len_pCreateInfo_pAttachments = lean_array_size(pCreateInfo_pAttachments);
VkImageView* um_pCreateInfo_pAttachments = calloc(len_pCreateInfo_pAttachments, sizeof(VkImageView));
for (size_t i = 0; i < len_pCreateInfo_pAttachments; ++i) {
lean_object *i_pCreateInfo_pAttachments = lean_array_cptr(pCreateInfo_pAttachments)[i];
um_pCreateInfo_pAttachments[i] = (VkImageView)lean_unbox_uint64(i_pCreateInfo_pAttachments);
}
struct VkFramebufferCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.pNext = 0,
.flags = (VkFramebufferCreateFlags)(VkFramebufferCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.renderPass = (VkRenderPass)lean_unbox_uint64(pCreateInfo_renderPass),
.attachmentCount = len_pCreateInfo_pAttachments,
.pAttachments = um_pCreateInfo_pAttachments,
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 4),
.layers = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 8),
};
VkFramebuffer out_pFramebuffer;
VkResult out_ret = vkCreateFramebuffer((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pFramebuffer);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pFramebuffer);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyFramebuffer(b_lean_obj_arg device, b_lean_obj_arg framebuffer, b_lean_obj_arg w) {
vkDestroyFramebuffer((VkDevice)lean_unbox_uint64(device), (VkFramebuffer)lean_unbox_uint64(framebuffer), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCreateRenderPass(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_pAttachments = lean_ctor_get(pCreateInfo, 1);
size_t len_pCreateInfo_pAttachments = lean_array_size(pCreateInfo_pAttachments);
VkAttachmentDescription* um_pCreateInfo_pAttachments = calloc(len_pCreateInfo_pAttachments, sizeof(VkAttachmentDescription));
for (size_t i = 0; i < len_pCreateInfo_pAttachments; ++i) {
lean_object *i_pCreateInfo_pAttachments = lean_array_cptr(pCreateInfo_pAttachments)[i];
lean_object *i_pCreateInfo_pAttachments_flags = lean_ctor_get(i_pCreateInfo_pAttachments, 0);
lean_object *i_pCreateInfo_pAttachments_format = lean_ctor_get(i_pCreateInfo_pAttachments, 1);
lean_object *i_pCreateInfo_pAttachments_samples = lean_ctor_get(i_pCreateInfo_pAttachments, 2);
lean_object *i_pCreateInfo_pAttachments_loadOp = lean_ctor_get(i_pCreateInfo_pAttachments, 3);
lean_object *i_pCreateInfo_pAttachments_storeOp = lean_ctor_get(i_pCreateInfo_pAttachments, 4);
lean_object *i_pCreateInfo_pAttachments_stencilLoadOp = lean_ctor_get(i_pCreateInfo_pAttachments, 5);
lean_object *i_pCreateInfo_pAttachments_stencilStoreOp = lean_ctor_get(i_pCreateInfo_pAttachments, 6);
lean_object *i_pCreateInfo_pAttachments_initialLayout = lean_ctor_get(i_pCreateInfo_pAttachments, 7);
lean_object *i_pCreateInfo_pAttachments_finalLayout = lean_ctor_get(i_pCreateInfo_pAttachments, 8);
struct VkAttachmentDescription um_i_pCreateInfo_pAttachments = {
.flags = (VkAttachmentDescriptionFlags)(VkAttachmentDescriptionFlagBits)lean_unbox_uint32(i_pCreateInfo_pAttachments_flags),
.format = (VkFormat)lean_unbox_uint32(i_pCreateInfo_pAttachments_format),
.samples = (VkSampleCountFlagBits)lean_unbox_uint32(i_pCreateInfo_pAttachments_samples),
.loadOp = (VkAttachmentLoadOp)lean_unbox_uint32(i_pCreateInfo_pAttachments_loadOp),
.storeOp = (VkAttachmentStoreOp)lean_unbox_uint32(i_pCreateInfo_pAttachments_storeOp),
.stencilLoadOp = (VkAttachmentLoadOp)lean_unbox_uint32(i_pCreateInfo_pAttachments_stencilLoadOp),
.stencilStoreOp = (VkAttachmentStoreOp)lean_unbox_uint32(i_pCreateInfo_pAttachments_stencilStoreOp),
.initialLayout = (VkImageLayout)lean_unbox_uint32(i_pCreateInfo_pAttachments_initialLayout),
.finalLayout = (VkImageLayout)lean_unbox_uint32(i_pCreateInfo_pAttachments_finalLayout),
};
um_pCreateInfo_pAttachments[i] = um_i_pCreateInfo_pAttachments;
}
lean_object *pCreateInfo_pSubpasses = lean_ctor_get(pCreateInfo, 2);
size_t len_pCreateInfo_pSubpasses = lean_array_size(pCreateInfo_pSubpasses);
VkSubpassDescription* um_pCreateInfo_pSubpasses = calloc(len_pCreateInfo_pSubpasses, sizeof(VkSubpassDescription));
for (size_t i = 0; i < len_pCreateInfo_pSubpasses; ++i) {
lean_object *i_pCreateInfo_pSubpasses = lean_array_cptr(pCreateInfo_pSubpasses)[i];
lean_object *i_pCreateInfo_pSubpasses_flags = lean_ctor_get(i_pCreateInfo_pSubpasses, 0);
lean_object *i_pCreateInfo_pSubpasses_pipelineBindPoint = lean_ctor_get(i_pCreateInfo_pSubpasses, 1);
lean_object *i_pCreateInfo_pSubpasses_pInputAttachments = lean_ctor_get(i_pCreateInfo_pSubpasses, 2);
size_t len_i_pCreateInfo_pSubpasses_pInputAttachments = lean_array_size(i_pCreateInfo_pSubpasses_pInputAttachments);
VkAttachmentReference* um_i_pCreateInfo_pSubpasses_pInputAttachments = calloc(len_i_pCreateInfo_pSubpasses_pInputAttachments, sizeof(VkAttachmentReference));
for (size_t i = 0; i < len_i_pCreateInfo_pSubpasses_pInputAttachments; ++i) {
lean_object *i_i_pCreateInfo_pSubpasses_pInputAttachments = lean_array_cptr(i_pCreateInfo_pSubpasses_pInputAttachments)[i];
lean_object *i_i_pCreateInfo_pSubpasses_pInputAttachments_layout = lean_ctor_get(i_i_pCreateInfo_pSubpasses_pInputAttachments, 0);
struct VkAttachmentReference um_i_i_pCreateInfo_pSubpasses_pInputAttachments = {
.attachment = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_pCreateInfo_pSubpasses_pInputAttachments) + lean_ctor_num_objs(i_i_pCreateInfo_pSubpasses_pInputAttachments)) + 0),
.layout = (VkImageLayout)lean_unbox_uint32(i_i_pCreateInfo_pSubpasses_pInputAttachments_layout),
};
um_i_pCreateInfo_pSubpasses_pInputAttachments[i] = um_i_i_pCreateInfo_pSubpasses_pInputAttachments;
}
lean_object *i_pCreateInfo_pSubpasses_pColorAttachments = lean_ctor_get(i_pCreateInfo_pSubpasses, 3);
size_t len_i_pCreateInfo_pSubpasses_pColorAttachments = lean_array_size(i_pCreateInfo_pSubpasses_pColorAttachments);
VkAttachmentReference* um_i_pCreateInfo_pSubpasses_pColorAttachments = calloc(len_i_pCreateInfo_pSubpasses_pColorAttachments, sizeof(VkAttachmentReference));
for (size_t i = 0; i < len_i_pCreateInfo_pSubpasses_pColorAttachments; ++i) {
lean_object *i_i_pCreateInfo_pSubpasses_pColorAttachments = lean_array_cptr(i_pCreateInfo_pSubpasses_pColorAttachments)[i];
lean_object *i_i_pCreateInfo_pSubpasses_pColorAttachments_layout = lean_ctor_get(i_i_pCreateInfo_pSubpasses_pColorAttachments, 0);
struct VkAttachmentReference um_i_i_pCreateInfo_pSubpasses_pColorAttachments = {
.attachment = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_i_pCreateInfo_pSubpasses_pColorAttachments) + lean_ctor_num_objs(i_i_pCreateInfo_pSubpasses_pColorAttachments)) + 0),
.layout = (VkImageLayout)lean_unbox_uint32(i_i_pCreateInfo_pSubpasses_pColorAttachments_layout),
};
um_i_pCreateInfo_pSubpasses_pColorAttachments[i] = um_i_i_pCreateInfo_pSubpasses_pColorAttachments;
}
lean_object *i_pCreateInfo_pSubpasses_pDepthStencilAttachment = lean_ctor_get(i_pCreateInfo_pSubpasses, 4);
_Bool is_some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment = !lean_is_scalar(i_pCreateInfo_pSubpasses_pDepthStencilAttachment);
VkAttachmentReference um_i_pCreateInfo_pSubpasses_pDepthStencilAttachment;
if (is_some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment) {
lean_object *some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment = lean_ctor_get(i_pCreateInfo_pSubpasses_pDepthStencilAttachment, 0);
lean_object *some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment_layout = lean_ctor_get(some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment, 0);
struct VkAttachmentReference um_some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment = {
.attachment = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment) + lean_ctor_num_objs(some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment)) + 0),
.layout = (VkImageLayout)lean_unbox_uint32(some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment_layout),
};
um_i_pCreateInfo_pSubpasses_pDepthStencilAttachment = um_some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment;
}
lean_object *i_pCreateInfo_pSubpasses_pPreserveAttachments = lean_ctor_get(i_pCreateInfo_pSubpasses, 5);
size_t len_i_pCreateInfo_pSubpasses_pPreserveAttachments = lean_array_size(i_pCreateInfo_pSubpasses_pPreserveAttachments);
uint32_t* um_i_pCreateInfo_pSubpasses_pPreserveAttachments = calloc(len_i_pCreateInfo_pSubpasses_pPreserveAttachments, sizeof(uint32_t));
for (size_t i = 0; i < len_i_pCreateInfo_pSubpasses_pPreserveAttachments; ++i) {
lean_object *i_i_pCreateInfo_pSubpasses_pPreserveAttachments = lean_array_cptr(i_pCreateInfo_pSubpasses_pPreserveAttachments)[i];
um_i_pCreateInfo_pSubpasses_pPreserveAttachments[i] = lean_unbox_uint32(i_i_pCreateInfo_pSubpasses_pPreserveAttachments);
}
struct VkSubpassDescription um_i_pCreateInfo_pSubpasses = {
.flags = (VkSubpassDescriptionFlags)(VkSubpassDescriptionFlagBits)lean_unbox_uint32(i_pCreateInfo_pSubpasses_flags),
.pipelineBindPoint = (VkPipelineBindPoint)lean_unbox_uint32(i_pCreateInfo_pSubpasses_pipelineBindPoint),
.inputAttachmentCount = len_i_pCreateInfo_pSubpasses_pInputAttachments,
.pInputAttachments = um_i_pCreateInfo_pSubpasses_pInputAttachments,
.colorAttachmentCount = len_i_pCreateInfo_pSubpasses_pColorAttachments,
.pColorAttachments = um_i_pCreateInfo_pSubpasses_pColorAttachments,
.pResolveAttachments = NULL,
.pDepthStencilAttachment = (is_some_i_pCreateInfo_pSubpasses_pDepthStencilAttachment ? &um_i_pCreateInfo_pSubpasses_pDepthStencilAttachment : NULL),
.preserveAttachmentCount = len_i_pCreateInfo_pSubpasses_pPreserveAttachments,
.pPreserveAttachments = um_i_pCreateInfo_pSubpasses_pPreserveAttachments,
};
um_pCreateInfo_pSubpasses[i] = um_i_pCreateInfo_pSubpasses;
}
lean_object *pCreateInfo_pDependencies = lean_ctor_get(pCreateInfo, 3);
size_t len_pCreateInfo_pDependencies = lean_array_size(pCreateInfo_pDependencies);
VkSubpassDependency* um_pCreateInfo_pDependencies = calloc(len_pCreateInfo_pDependencies, sizeof(VkSubpassDependency));
for (size_t i = 0; i < len_pCreateInfo_pDependencies; ++i) {
lean_object *i_pCreateInfo_pDependencies = lean_array_cptr(pCreateInfo_pDependencies)[i];
lean_object *i_pCreateInfo_pDependencies_srcStageMask = lean_ctor_get(i_pCreateInfo_pDependencies, 0);
lean_object *i_pCreateInfo_pDependencies_dstStageMask = lean_ctor_get(i_pCreateInfo_pDependencies, 1);
lean_object *i_pCreateInfo_pDependencies_srcAccessMask = lean_ctor_get(i_pCreateInfo_pDependencies, 2);
lean_object *i_pCreateInfo_pDependencies_dstAccessMask = lean_ctor_get(i_pCreateInfo_pDependencies, 3);
lean_object *i_pCreateInfo_pDependencies_dependencyFlags = lean_ctor_get(i_pCreateInfo_pDependencies, 4);
struct VkSubpassDependency um_i_pCreateInfo_pDependencies = {
.srcSubpass = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfo_pDependencies) + lean_ctor_num_objs(i_pCreateInfo_pDependencies)) + 0),
.dstSubpass = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pCreateInfo_pDependencies) + lean_ctor_num_objs(i_pCreateInfo_pDependencies)) + 4),
.srcStageMask = (VkPipelineStageFlags)(VkPipelineStageFlagBits)lean_unbox_uint32(i_pCreateInfo_pDependencies_srcStageMask),
.dstStageMask = (VkPipelineStageFlags)(VkPipelineStageFlagBits)lean_unbox_uint32(i_pCreateInfo_pDependencies_dstStageMask),
.srcAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pCreateInfo_pDependencies_srcAccessMask),
.dstAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pCreateInfo_pDependencies_dstAccessMask),
.dependencyFlags = (VkDependencyFlags)(VkDependencyFlagBits)lean_unbox_uint32(i_pCreateInfo_pDependencies_dependencyFlags),
};
um_pCreateInfo_pDependencies[i] = um_i_pCreateInfo_pDependencies;
}
struct VkRenderPassCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.pNext = 0,
.flags = (VkRenderPassCreateFlags)(VkRenderPassCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.attachmentCount = len_pCreateInfo_pAttachments,
.pAttachments = um_pCreateInfo_pAttachments,
.subpassCount = len_pCreateInfo_pSubpasses,
.pSubpasses = um_pCreateInfo_pSubpasses,
.dependencyCount = len_pCreateInfo_pDependencies,
.pDependencies = um_pCreateInfo_pDependencies,
};
VkRenderPass out_pRenderPass;
VkResult out_ret = vkCreateRenderPass((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pRenderPass);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pRenderPass);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyRenderPass(b_lean_obj_arg device, b_lean_obj_arg renderPass, b_lean_obj_arg w) {
vkDestroyRenderPass((VkDevice)lean_unbox_uint64(device), (VkRenderPass)lean_unbox_uint64(renderPass), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkGetRenderAreaGranularity(b_lean_obj_arg device, b_lean_obj_arg renderPass, b_lean_obj_arg w) {
VkExtent2D out_pGranularity;
vkGetRenderAreaGranularity((VkDevice)lean_unbox_uint64(device), (VkRenderPass)lean_unbox_uint64(renderPass), &out_pGranularity);
lean_object *m_out_pGranularity = lean_alloc_ctor(0, 0, 8);
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pGranularity) + 0) = out_pGranularity.width;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pGranularity) + 4) = out_pGranularity.height;
lean_object *temp, *tuple = m_out_pGranularity;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCreateCommandPool(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
struct VkCommandPoolCreateInfo um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = 0,
.flags = (VkCommandPoolCreateFlags)(VkCommandPoolCreateFlagBits)lean_unbox_uint32(pCreateInfo_flags),
.queueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
};
VkCommandPool out_pCommandPool;
VkResult out_ret = vkCreateCommandPool((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pCommandPool);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pCommandPool);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroyCommandPool(b_lean_obj_arg device, b_lean_obj_arg commandPool, b_lean_obj_arg w) {
vkDestroyCommandPool((VkDevice)lean_unbox_uint64(device), (VkCommandPool)lean_unbox_uint64(commandPool), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkResetCommandPool(b_lean_obj_arg device, b_lean_obj_arg commandPool, VkCommandPoolResetFlags flags, b_lean_obj_arg w) {
VkResult out_ret = vkResetCommandPool((VkDevice)lean_unbox_uint64(device), (VkCommandPool)lean_unbox_uint64(commandPool), flags);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkAllocateCommandBuffers(b_lean_obj_arg device, b_lean_obj_arg pAllocateInfo, b_lean_obj_arg w) {
lean_object *pAllocateInfo_commandPool = lean_ctor_get(pAllocateInfo, 0);
lean_object *pAllocateInfo_level = lean_ctor_get(pAllocateInfo, 1);
struct VkCommandBufferAllocateInfo um_pAllocateInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = 0,
.commandPool = (VkCommandPool)lean_unbox_uint64(pAllocateInfo_commandPool),
.level = (VkCommandBufferLevel)lean_unbox_uint32(pAllocateInfo_level),
.commandBufferCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pAllocateInfo) + lean_ctor_num_objs(pAllocateInfo)) + 0),
};
VkCommandBuffer* out_pCommandBuffers;
uint32_t len_out_pCommandBuffers = um_pAllocateInfo.commandBufferCount;
out_pCommandBuffers = calloc(len_out_pCommandBuffers, sizeof(VkCommandBuffer));
VkResult out_ret = vkAllocateCommandBuffers((VkDevice)lean_unbox_uint64(device), &um_pAllocateInfo, out_pCommandBuffers);
lean_object *m_out_pCommandBuffers = lean_alloc_array(len_out_pCommandBuffers, len_out_pCommandBuffers);
for (size_t i = 0; i < len_out_pCommandBuffers; ++i) {
VkCommandBuffer i_out_pCommandBuffers = out_pCommandBuffers[i];
lean_array_cptr(m_out_pCommandBuffers)[i] = lean_box_uint64((uint64_t)i_out_pCommandBuffers);
}
lean_object *temp, *tuple = m_out_pCommandBuffers;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkFreeCommandBuffers(b_lean_obj_arg device, b_lean_obj_arg commandPool, b_lean_obj_arg pCommandBuffers, b_lean_obj_arg w) {
size_t len_pCommandBuffers = lean_array_size(pCommandBuffers);
VkCommandBuffer* um_pCommandBuffers = calloc(len_pCommandBuffers, sizeof(VkCommandBuffer));
for (size_t i = 0; i < len_pCommandBuffers; ++i) {
lean_object *i_pCommandBuffers = lean_array_cptr(pCommandBuffers)[i];
um_pCommandBuffers[i] = (VkCommandBuffer)lean_unbox_uint64(i_pCommandBuffers);
}
vkFreeCommandBuffers((VkDevice)lean_unbox_uint64(device), (VkCommandPool)lean_unbox_uint64(commandPool), len_pCommandBuffers, um_pCommandBuffers);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkBeginCommandBuffer(b_lean_obj_arg commandBuffer, b_lean_obj_arg pBeginInfo, b_lean_obj_arg w) {
lean_object *pBeginInfo_flags = lean_ctor_get(pBeginInfo, 0);
lean_object *pBeginInfo_pInheritanceInfo = lean_ctor_get(pBeginInfo, 1);
_Bool is_some_pBeginInfo_pInheritanceInfo = !lean_is_scalar(pBeginInfo_pInheritanceInfo);
VkCommandBufferInheritanceInfo um_pBeginInfo_pInheritanceInfo;
if (is_some_pBeginInfo_pInheritanceInfo) {
lean_object *some_pBeginInfo_pInheritanceInfo = lean_ctor_get(pBeginInfo_pInheritanceInfo, 0);
lean_object *some_pBeginInfo_pInheritanceInfo_renderPass = lean_ctor_get(some_pBeginInfo_pInheritanceInfo, 0);
lean_object *some_pBeginInfo_pInheritanceInfo_framebuffer = lean_ctor_get(some_pBeginInfo_pInheritanceInfo, 1);
lean_object *some_pBeginInfo_pInheritanceInfo_queryFlags = lean_ctor_get(some_pBeginInfo_pInheritanceInfo, 2);
lean_object *some_pBeginInfo_pInheritanceInfo_pipelineStatistics = lean_ctor_get(some_pBeginInfo_pInheritanceInfo, 3);
struct VkCommandBufferInheritanceInfo um_some_pBeginInfo_pInheritanceInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
.pNext = 0,
.renderPass = (VkRenderPass)lean_unbox_uint64(some_pBeginInfo_pInheritanceInfo_renderPass),
.subpass = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pBeginInfo_pInheritanceInfo) + lean_ctor_num_objs(some_pBeginInfo_pInheritanceInfo)) + 0),
.framebuffer = (VkFramebuffer)lean_unbox_uint64(some_pBeginInfo_pInheritanceInfo_framebuffer),
.occlusionQueryEnable = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(some_pBeginInfo_pInheritanceInfo) + lean_ctor_num_objs(some_pBeginInfo_pInheritanceInfo)) + 4),
.queryFlags = (VkQueryControlFlags)(VkQueryControlFlagBits)lean_unbox_uint32(some_pBeginInfo_pInheritanceInfo_queryFlags),
.pipelineStatistics = (VkQueryPipelineStatisticFlags)(VkQueryPipelineStatisticFlagBits)lean_unbox_uint32(some_pBeginInfo_pInheritanceInfo_pipelineStatistics),
};
um_pBeginInfo_pInheritanceInfo = um_some_pBeginInfo_pInheritanceInfo;
}
struct VkCommandBufferBeginInfo um_pBeginInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = 0,
.flags = (VkCommandBufferUsageFlags)(VkCommandBufferUsageFlagBits)lean_unbox_uint32(pBeginInfo_flags),
.pInheritanceInfo = (is_some_pBeginInfo_pInheritanceInfo ? &um_pBeginInfo_pInheritanceInfo : NULL),
};
VkResult out_ret = vkBeginCommandBuffer((VkCommandBuffer)lean_unbox_uint64(commandBuffer), &um_pBeginInfo);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkEndCommandBuffer(b_lean_obj_arg commandBuffer, b_lean_obj_arg w) {
VkResult out_ret = vkEndCommandBuffer((VkCommandBuffer)lean_unbox_uint64(commandBuffer));
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkResetCommandBuffer(b_lean_obj_arg commandBuffer, VkCommandBufferResetFlags flags, b_lean_obj_arg w) {
VkResult out_ret = vkResetCommandBuffer((VkCommandBuffer)lean_unbox_uint64(commandBuffer), flags);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCmdBindPipeline(b_lean_obj_arg commandBuffer, VkPipelineBindPoint pipelineBindPoint, b_lean_obj_arg pipeline, b_lean_obj_arg w) {
vkCmdBindPipeline((VkCommandBuffer)lean_unbox_uint64(commandBuffer), pipelineBindPoint, (VkPipeline)lean_unbox_uint64(pipeline));
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetViewport(b_lean_obj_arg commandBuffer, uint32_t firstViewport, b_lean_obj_arg pViewports, b_lean_obj_arg w) {
size_t len_pViewports = lean_array_size(pViewports);
VkViewport* um_pViewports = calloc(len_pViewports, sizeof(VkViewport));
for (size_t i = 0; i < len_pViewports; ++i) {
lean_object *i_pViewports = lean_array_cptr(pViewports)[i];
struct VkViewport um_i_pViewports = {
.x = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_pViewports) + lean_ctor_num_objs(i_pViewports)) + 0),
.y = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_pViewports) + lean_ctor_num_objs(i_pViewports)) + 8),
.width = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_pViewports) + lean_ctor_num_objs(i_pViewports)) + 16),
.height = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_pViewports) + lean_ctor_num_objs(i_pViewports)) + 24),
.minDepth = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_pViewports) + lean_ctor_num_objs(i_pViewports)) + 32),
.maxDepth = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_pViewports) + lean_ctor_num_objs(i_pViewports)) + 40),
};
um_pViewports[i] = um_i_pViewports;
}
vkCmdSetViewport((VkCommandBuffer)lean_unbox_uint64(commandBuffer), firstViewport, len_pViewports, um_pViewports);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetScissor(b_lean_obj_arg commandBuffer, uint32_t firstScissor, b_lean_obj_arg pScissors, b_lean_obj_arg w) {
size_t len_pScissors = lean_array_size(pScissors);
VkRect2D* um_pScissors = calloc(len_pScissors, sizeof(VkRect2D));
for (size_t i = 0; i < len_pScissors; ++i) {
lean_object *i_pScissors = lean_array_cptr(pScissors)[i];
lean_object *i_pScissors_offset = lean_ctor_get(i_pScissors, 0);
struct VkOffset2D um_i_pScissors_offset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pScissors_offset) + lean_ctor_num_objs(i_pScissors_offset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pScissors_offset) + lean_ctor_num_objs(i_pScissors_offset)) + 4),
};
lean_object *i_pScissors_extent = lean_ctor_get(i_pScissors, 1);
struct VkExtent2D um_i_pScissors_extent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pScissors_extent) + lean_ctor_num_objs(i_pScissors_extent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pScissors_extent) + lean_ctor_num_objs(i_pScissors_extent)) + 4),
};
struct VkRect2D um_i_pScissors = {
.offset = um_i_pScissors_offset,
.extent = um_i_pScissors_extent,
};
um_pScissors[i] = um_i_pScissors;
}
vkCmdSetScissor((VkCommandBuffer)lean_unbox_uint64(commandBuffer), firstScissor, len_pScissors, um_pScissors);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetLineWidth(b_lean_obj_arg commandBuffer, double lineWidth, b_lean_obj_arg w) {
vkCmdSetLineWidth((VkCommandBuffer)lean_unbox_uint64(commandBuffer), lineWidth);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetDepthBias(b_lean_obj_arg commandBuffer, double depthBiasConstantFactor, double depthBiasClamp, double depthBiasSlopeFactor, b_lean_obj_arg w) {
vkCmdSetDepthBias((VkCommandBuffer)lean_unbox_uint64(commandBuffer), depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetDepthBounds(b_lean_obj_arg commandBuffer, double minDepthBounds, double maxDepthBounds, b_lean_obj_arg w) {
vkCmdSetDepthBounds((VkCommandBuffer)lean_unbox_uint64(commandBuffer), minDepthBounds, maxDepthBounds);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetStencilCompareMask(b_lean_obj_arg commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask, b_lean_obj_arg w) {
vkCmdSetStencilCompareMask((VkCommandBuffer)lean_unbox_uint64(commandBuffer), faceMask, compareMask);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetStencilWriteMask(b_lean_obj_arg commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask, b_lean_obj_arg w) {
vkCmdSetStencilWriteMask((VkCommandBuffer)lean_unbox_uint64(commandBuffer), faceMask, writeMask);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetStencilReference(b_lean_obj_arg commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference, b_lean_obj_arg w) {
vkCmdSetStencilReference((VkCommandBuffer)lean_unbox_uint64(commandBuffer), faceMask, reference);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdBindDescriptorSets(b_lean_obj_arg commandBuffer, VkPipelineBindPoint pipelineBindPoint, b_lean_obj_arg layout, uint32_t firstSet, b_lean_obj_arg pDescriptorSets, b_lean_obj_arg pDynamicOffsets, b_lean_obj_arg w) {
size_t len_pDescriptorSets = lean_array_size(pDescriptorSets);
VkDescriptorSet* um_pDescriptorSets = calloc(len_pDescriptorSets, sizeof(VkDescriptorSet));
for (size_t i = 0; i < len_pDescriptorSets; ++i) {
lean_object *i_pDescriptorSets = lean_array_cptr(pDescriptorSets)[i];
um_pDescriptorSets[i] = (VkDescriptorSet)lean_unbox_uint64(i_pDescriptorSets);
}
size_t len_pDynamicOffsets = lean_array_size(pDynamicOffsets);
uint32_t* um_pDynamicOffsets = calloc(len_pDynamicOffsets, sizeof(uint32_t));
for (size_t i = 0; i < len_pDynamicOffsets; ++i) {
lean_object *i_pDynamicOffsets = lean_array_cptr(pDynamicOffsets)[i];
um_pDynamicOffsets[i] = lean_unbox_uint32(i_pDynamicOffsets);
}
vkCmdBindDescriptorSets((VkCommandBuffer)lean_unbox_uint64(commandBuffer), pipelineBindPoint, (VkPipelineLayout)lean_unbox_uint64(layout), firstSet, len_pDescriptorSets, um_pDescriptorSets, len_pDynamicOffsets, um_pDynamicOffsets);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdBindIndexBuffer(b_lean_obj_arg commandBuffer, b_lean_obj_arg buffer, VkDeviceSize offset, VkIndexType indexType, b_lean_obj_arg w) {
vkCmdBindIndexBuffer((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkBuffer)lean_unbox_uint64(buffer), offset, indexType);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdBindVertexBuffers(b_lean_obj_arg commandBuffer, uint32_t firstBinding, b_lean_obj_arg pBuffers, b_lean_obj_arg pOffsets, b_lean_obj_arg w) {
size_t len_pBuffers = lean_array_size(pBuffers);
VkBuffer* um_pBuffers = calloc(len_pBuffers, sizeof(VkBuffer));
for (size_t i = 0; i < len_pBuffers; ++i) {
lean_object *i_pBuffers = lean_array_cptr(pBuffers)[i];
um_pBuffers[i] = (VkBuffer)lean_unbox_uint64(i_pBuffers);
}
size_t len_pOffsets = lean_array_size(pOffsets);
VkDeviceSize* um_pOffsets = calloc(len_pOffsets, sizeof(VkDeviceSize));
for (size_t i = 0; i < len_pOffsets; ++i) {
lean_object *i_pOffsets = lean_array_cptr(pOffsets)[i];
um_pOffsets[i] = (VkDeviceSize)lean_unbox_uint64(i_pOffsets);
}
vkCmdBindVertexBuffers((VkCommandBuffer)lean_unbox_uint64(commandBuffer), firstBinding, len_pBuffers, um_pBuffers, um_pOffsets);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdDraw(b_lean_obj_arg commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, b_lean_obj_arg w) {
vkCmdDraw((VkCommandBuffer)lean_unbox_uint64(commandBuffer), vertexCount, instanceCount, firstVertex, firstInstance);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdDrawIndexed(b_lean_obj_arg commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, uint32_t vertexOffset, uint32_t firstInstance, b_lean_obj_arg w) {
vkCmdDrawIndexed((VkCommandBuffer)lean_unbox_uint64(commandBuffer), indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdDrawIndirect(b_lean_obj_arg commandBuffer, b_lean_obj_arg buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride, b_lean_obj_arg w) {
vkCmdDrawIndirect((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkBuffer)lean_unbox_uint64(buffer), offset, drawCount, stride);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdDrawIndexedIndirect(b_lean_obj_arg commandBuffer, b_lean_obj_arg buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride, b_lean_obj_arg w) {
vkCmdDrawIndexedIndirect((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkBuffer)lean_unbox_uint64(buffer), offset, drawCount, stride);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdDispatch(b_lean_obj_arg commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, b_lean_obj_arg w) {
vkCmdDispatch((VkCommandBuffer)lean_unbox_uint64(commandBuffer), groupCountX, groupCountY, groupCountZ);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdDispatchIndirect(b_lean_obj_arg commandBuffer, b_lean_obj_arg buffer, VkDeviceSize offset, b_lean_obj_arg w) {
vkCmdDispatchIndirect((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkBuffer)lean_unbox_uint64(buffer), offset);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdCopyBuffer(b_lean_obj_arg commandBuffer, b_lean_obj_arg srcBuffer, b_lean_obj_arg dstBuffer, b_lean_obj_arg pRegions, b_lean_obj_arg w) {
size_t len_pRegions = lean_array_size(pRegions);
VkBufferCopy* um_pRegions = calloc(len_pRegions, sizeof(VkBufferCopy));
for (size_t i = 0; i < len_pRegions; ++i) {
lean_object *i_pRegions = lean_array_cptr(pRegions)[i];
struct VkBufferCopy um_i_pRegions = {
.srcOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 0),
.dstOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 8),
.size = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 16),
};
um_pRegions[i] = um_i_pRegions;
}
vkCmdCopyBuffer((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkBuffer)lean_unbox_uint64(srcBuffer), (VkBuffer)lean_unbox_uint64(dstBuffer), len_pRegions, um_pRegions);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdCopyImage(b_lean_obj_arg commandBuffer, b_lean_obj_arg srcImage, VkImageLayout srcImageLayout, b_lean_obj_arg dstImage, VkImageLayout dstImageLayout, b_lean_obj_arg pRegions, b_lean_obj_arg w) {
size_t len_pRegions = lean_array_size(pRegions);
VkImageCopy* um_pRegions = calloc(len_pRegions, sizeof(VkImageCopy));
for (size_t i = 0; i < len_pRegions; ++i) {
lean_object *i_pRegions = lean_array_cptr(pRegions)[i];
lean_object *i_pRegions_srcSubresource = lean_ctor_get(i_pRegions, 0);
lean_object *i_pRegions_srcSubresource_aspectMask = lean_ctor_get(i_pRegions_srcSubresource, 0);
struct VkImageSubresourceLayers um_i_pRegions_srcSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRegions_srcSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 0),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 4),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 8),
};
lean_object *i_pRegions_srcOffset = lean_ctor_get(i_pRegions, 1);
struct VkOffset3D um_i_pRegions_srcOffset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffset) + lean_ctor_num_objs(i_pRegions_srcOffset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffset) + lean_ctor_num_objs(i_pRegions_srcOffset)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffset) + lean_ctor_num_objs(i_pRegions_srcOffset)) + 8),
};
lean_object *i_pRegions_dstSubresource = lean_ctor_get(i_pRegions, 2);
lean_object *i_pRegions_dstSubresource_aspectMask = lean_ctor_get(i_pRegions_dstSubresource, 0);
struct VkImageSubresourceLayers um_i_pRegions_dstSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRegions_dstSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 0),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 4),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 8),
};
lean_object *i_pRegions_dstOffset = lean_ctor_get(i_pRegions, 3);
struct VkOffset3D um_i_pRegions_dstOffset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffset) + lean_ctor_num_objs(i_pRegions_dstOffset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffset) + lean_ctor_num_objs(i_pRegions_dstOffset)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffset) + lean_ctor_num_objs(i_pRegions_dstOffset)) + 8),
};
lean_object *i_pRegions_extent = lean_ctor_get(i_pRegions, 4);
struct VkExtent3D um_i_pRegions_extent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_extent) + lean_ctor_num_objs(i_pRegions_extent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_extent) + lean_ctor_num_objs(i_pRegions_extent)) + 4),
.depth = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_extent) + lean_ctor_num_objs(i_pRegions_extent)) + 8),
};
struct VkImageCopy um_i_pRegions = {
.srcSubresource = um_i_pRegions_srcSubresource,
.srcOffset = um_i_pRegions_srcOffset,
.dstSubresource = um_i_pRegions_dstSubresource,
.dstOffset = um_i_pRegions_dstOffset,
.extent = um_i_pRegions_extent,
};
um_pRegions[i] = um_i_pRegions;
}
vkCmdCopyImage((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkImage)lean_unbox_uint64(srcImage), srcImageLayout, (VkImage)lean_unbox_uint64(dstImage), dstImageLayout, len_pRegions, um_pRegions);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdBlitImage(b_lean_obj_arg commandBuffer, b_lean_obj_arg srcImage, VkImageLayout srcImageLayout, b_lean_obj_arg dstImage, VkImageLayout dstImageLayout, b_lean_obj_arg pRegions, VkFilter filter, b_lean_obj_arg w) {
size_t len_pRegions = lean_array_size(pRegions);
VkImageBlit* um_pRegions = calloc(len_pRegions, sizeof(VkImageBlit));
for (size_t i = 0; i < len_pRegions; ++i) {
lean_object *i_pRegions = lean_array_cptr(pRegions)[i];
lean_object *i_pRegions_srcSubresource = lean_ctor_get(i_pRegions, 0);
lean_object *i_pRegions_srcSubresource_aspectMask = lean_ctor_get(i_pRegions_srcSubresource, 0);
struct VkImageSubresourceLayers um_i_pRegions_srcSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRegions_srcSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 0),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 4),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 8),
};
lean_object *i_pRegions_srcOffsets = lean_ctor_get(i_pRegions, 1);
struct VkOffset3D um_i_pRegions_srcOffsets = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffsets) + lean_ctor_num_objs(i_pRegions_srcOffsets)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffsets) + lean_ctor_num_objs(i_pRegions_srcOffsets)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffsets) + lean_ctor_num_objs(i_pRegions_srcOffsets)) + 8),
};
lean_object *i_pRegions_dstSubresource = lean_ctor_get(i_pRegions, 2);
lean_object *i_pRegions_dstSubresource_aspectMask = lean_ctor_get(i_pRegions_dstSubresource, 0);
struct VkImageSubresourceLayers um_i_pRegions_dstSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRegions_dstSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 0),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 4),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 8),
};
lean_object *i_pRegions_dstOffsets = lean_ctor_get(i_pRegions, 3);
struct VkOffset3D um_i_pRegions_dstOffsets = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffsets) + lean_ctor_num_objs(i_pRegions_dstOffsets)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffsets) + lean_ctor_num_objs(i_pRegions_dstOffsets)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffsets) + lean_ctor_num_objs(i_pRegions_dstOffsets)) + 8),
};
struct VkImageBlit um_i_pRegions = {
.srcSubresource = um_i_pRegions_srcSubresource,
.srcOffsets = um_i_pRegions_srcOffsets,
.dstSubresource = um_i_pRegions_dstSubresource,
.dstOffsets = um_i_pRegions_dstOffsets,
};
um_pRegions[i] = um_i_pRegions;
}
vkCmdBlitImage((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkImage)lean_unbox_uint64(srcImage), srcImageLayout, (VkImage)lean_unbox_uint64(dstImage), dstImageLayout, len_pRegions, um_pRegions, filter);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdCopyBufferToImage(b_lean_obj_arg commandBuffer, b_lean_obj_arg srcBuffer, b_lean_obj_arg dstImage, VkImageLayout dstImageLayout, b_lean_obj_arg pRegions, b_lean_obj_arg w) {
size_t len_pRegions = lean_array_size(pRegions);
VkBufferImageCopy* um_pRegions = calloc(len_pRegions, sizeof(VkBufferImageCopy));
for (size_t i = 0; i < len_pRegions; ++i) {
lean_object *i_pRegions = lean_array_cptr(pRegions)[i];
lean_object *i_pRegions_imageSubresource = lean_ctor_get(i_pRegions, 0);
lean_object *i_pRegions_imageSubresource_aspectMask = lean_ctor_get(i_pRegions_imageSubresource, 0);
struct VkImageSubresourceLayers um_i_pRegions_imageSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRegions_imageSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageSubresource) + lean_ctor_num_objs(i_pRegions_imageSubresource)) + 0),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageSubresource) + lean_ctor_num_objs(i_pRegions_imageSubresource)) + 4),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageSubresource) + lean_ctor_num_objs(i_pRegions_imageSubresource)) + 8),
};
lean_object *i_pRegions_imageOffset = lean_ctor_get(i_pRegions, 1);
struct VkOffset3D um_i_pRegions_imageOffset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageOffset) + lean_ctor_num_objs(i_pRegions_imageOffset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageOffset) + lean_ctor_num_objs(i_pRegions_imageOffset)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageOffset) + lean_ctor_num_objs(i_pRegions_imageOffset)) + 8),
};
lean_object *i_pRegions_imageExtent = lean_ctor_get(i_pRegions, 2);
struct VkExtent3D um_i_pRegions_imageExtent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageExtent) + lean_ctor_num_objs(i_pRegions_imageExtent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageExtent) + lean_ctor_num_objs(i_pRegions_imageExtent)) + 4),
.depth = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageExtent) + lean_ctor_num_objs(i_pRegions_imageExtent)) + 8),
};
struct VkBufferImageCopy um_i_pRegions = {
.bufferOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 0),
.bufferRowLength = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 8),
.bufferImageHeight = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 12),
.imageSubresource = um_i_pRegions_imageSubresource,
.imageOffset = um_i_pRegions_imageOffset,
.imageExtent = um_i_pRegions_imageExtent,
};
um_pRegions[i] = um_i_pRegions;
}
vkCmdCopyBufferToImage((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkBuffer)lean_unbox_uint64(srcBuffer), (VkImage)lean_unbox_uint64(dstImage), dstImageLayout, len_pRegions, um_pRegions);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdCopyImageToBuffer(b_lean_obj_arg commandBuffer, b_lean_obj_arg srcImage, VkImageLayout srcImageLayout, b_lean_obj_arg dstBuffer, b_lean_obj_arg pRegions, b_lean_obj_arg w) {
size_t len_pRegions = lean_array_size(pRegions);
VkBufferImageCopy* um_pRegions = calloc(len_pRegions, sizeof(VkBufferImageCopy));
for (size_t i = 0; i < len_pRegions; ++i) {
lean_object *i_pRegions = lean_array_cptr(pRegions)[i];
lean_object *i_pRegions_imageSubresource = lean_ctor_get(i_pRegions, 0);
lean_object *i_pRegions_imageSubresource_aspectMask = lean_ctor_get(i_pRegions_imageSubresource, 0);
struct VkImageSubresourceLayers um_i_pRegions_imageSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRegions_imageSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageSubresource) + lean_ctor_num_objs(i_pRegions_imageSubresource)) + 0),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageSubresource) + lean_ctor_num_objs(i_pRegions_imageSubresource)) + 4),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageSubresource) + lean_ctor_num_objs(i_pRegions_imageSubresource)) + 8),
};
lean_object *i_pRegions_imageOffset = lean_ctor_get(i_pRegions, 1);
struct VkOffset3D um_i_pRegions_imageOffset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageOffset) + lean_ctor_num_objs(i_pRegions_imageOffset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageOffset) + lean_ctor_num_objs(i_pRegions_imageOffset)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageOffset) + lean_ctor_num_objs(i_pRegions_imageOffset)) + 8),
};
lean_object *i_pRegions_imageExtent = lean_ctor_get(i_pRegions, 2);
struct VkExtent3D um_i_pRegions_imageExtent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageExtent) + lean_ctor_num_objs(i_pRegions_imageExtent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageExtent) + lean_ctor_num_objs(i_pRegions_imageExtent)) + 4),
.depth = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_imageExtent) + lean_ctor_num_objs(i_pRegions_imageExtent)) + 8),
};
struct VkBufferImageCopy um_i_pRegions = {
.bufferOffset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 0),
.bufferRowLength = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 8),
.bufferImageHeight = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions) + lean_ctor_num_objs(i_pRegions)) + 12),
.imageSubresource = um_i_pRegions_imageSubresource,
.imageOffset = um_i_pRegions_imageOffset,
.imageExtent = um_i_pRegions_imageExtent,
};
um_pRegions[i] = um_i_pRegions;
}
vkCmdCopyImageToBuffer((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkImage)lean_unbox_uint64(srcImage), srcImageLayout, (VkBuffer)lean_unbox_uint64(dstBuffer), len_pRegions, um_pRegions);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdUpdateBuffer(b_lean_obj_arg commandBuffer, b_lean_obj_arg dstBuffer, VkDeviceSize dstOffset, b_lean_obj_arg pData, b_lean_obj_arg w) {
size_t len_pData = lean_sarray_size(pData);
void *um_pData = lean_sarray_cptr(pData);
vkCmdUpdateBuffer((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkBuffer)lean_unbox_uint64(dstBuffer), dstOffset, len_pData, um_pData);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdFillBuffer(b_lean_obj_arg commandBuffer, b_lean_obj_arg dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data, b_lean_obj_arg w) {
vkCmdFillBuffer((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkBuffer)lean_unbox_uint64(dstBuffer), dstOffset, size, data);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdClearColorImage(b_lean_obj_arg commandBuffer, b_lean_obj_arg image, VkImageLayout imageLayout, b_lean_obj_arg pColor, b_lean_obj_arg pRanges, b_lean_obj_arg w) {
union VkClearColorValue um_pColor;
switch (lean_ptr_tag(pColor)) {
case 0: {
lean_object *pColor_float32 = lean_ctor_get(pColor, 0);
size_t len_pColor_float32 = lean_sarray_size(pColor_float32);
float* um_pColor_float32 = calloc(len_pColor_float32, sizeof(float));
for (size_t i = 0; i < len_pColor_float32; ++i) {
um_pColor_float32[i] = lean_float_array_uget(pColor_float32, i);
}
if (len_pColor_float32 != 4) abort();
um_pColor = (union VkClearColorValue){ .float32 = {um_pColor_float32[0],um_pColor_float32[1],um_pColor_float32[2],um_pColor_float32[3],} };
} break;
case 1: {
lean_object *pColor_int32 = lean_ctor_get(pColor, 0);
size_t len_pColor_int32 = lean_array_size(pColor_int32);
uint32_t* um_pColor_int32 = calloc(len_pColor_int32, sizeof(uint32_t));
for (size_t i = 0; i < len_pColor_int32; ++i) {
lean_object *i_pColor_int32 = lean_array_cptr(pColor_int32)[i];
um_pColor_int32[i] = lean_unbox_uint32(i_pColor_int32);
}
if (len_pColor_int32 != 4) abort();
um_pColor = (union VkClearColorValue){ .int32 = {um_pColor_int32[0],um_pColor_int32[1],um_pColor_int32[2],um_pColor_int32[3],} };
} break;
case 2: {
lean_object *pColor_uint32 = lean_ctor_get(pColor, 0);
size_t len_pColor_uint32 = lean_array_size(pColor_uint32);
uint32_t* um_pColor_uint32 = calloc(len_pColor_uint32, sizeof(uint32_t));
for (size_t i = 0; i < len_pColor_uint32; ++i) {
lean_object *i_pColor_uint32 = lean_array_cptr(pColor_uint32)[i];
um_pColor_uint32[i] = lean_unbox_uint32(i_pColor_uint32);
}
if (len_pColor_uint32 != 4) abort();
um_pColor = (union VkClearColorValue){ .uint32 = {um_pColor_uint32[0],um_pColor_uint32[1],um_pColor_uint32[2],um_pColor_uint32[3],} };
} break;
}
size_t len_pRanges = lean_array_size(pRanges);
VkImageSubresourceRange* um_pRanges = calloc(len_pRanges, sizeof(VkImageSubresourceRange));
for (size_t i = 0; i < len_pRanges; ++i) {
lean_object *i_pRanges = lean_array_cptr(pRanges)[i];
lean_object *i_pRanges_aspectMask = lean_ctor_get(i_pRanges, 0);
struct VkImageSubresourceRange um_i_pRanges = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRanges_aspectMask),
.baseMipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRanges) + lean_ctor_num_objs(i_pRanges)) + 0),
.levelCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRanges) + lean_ctor_num_objs(i_pRanges)) + 4),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRanges) + lean_ctor_num_objs(i_pRanges)) + 8),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRanges) + lean_ctor_num_objs(i_pRanges)) + 12),
};
um_pRanges[i] = um_i_pRanges;
}
vkCmdClearColorImage((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkImage)lean_unbox_uint64(image), imageLayout, &um_pColor, len_pRanges, um_pRanges);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdClearDepthStencilImage(b_lean_obj_arg commandBuffer, b_lean_obj_arg image, VkImageLayout imageLayout, b_lean_obj_arg pDepthStencil, b_lean_obj_arg pRanges, b_lean_obj_arg w) {
struct VkClearDepthStencilValue um_pDepthStencil = {
.depth = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(pDepthStencil) + lean_ctor_num_objs(pDepthStencil)) + 0),
.stencil = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pDepthStencil) + lean_ctor_num_objs(pDepthStencil)) + 8),
};
size_t len_pRanges = lean_array_size(pRanges);
VkImageSubresourceRange* um_pRanges = calloc(len_pRanges, sizeof(VkImageSubresourceRange));
for (size_t i = 0; i < len_pRanges; ++i) {
lean_object *i_pRanges = lean_array_cptr(pRanges)[i];
lean_object *i_pRanges_aspectMask = lean_ctor_get(i_pRanges, 0);
struct VkImageSubresourceRange um_i_pRanges = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRanges_aspectMask),
.baseMipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRanges) + lean_ctor_num_objs(i_pRanges)) + 0),
.levelCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRanges) + lean_ctor_num_objs(i_pRanges)) + 4),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRanges) + lean_ctor_num_objs(i_pRanges)) + 8),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRanges) + lean_ctor_num_objs(i_pRanges)) + 12),
};
um_pRanges[i] = um_i_pRanges;
}
vkCmdClearDepthStencilImage((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkImage)lean_unbox_uint64(image), imageLayout, &um_pDepthStencil, len_pRanges, um_pRanges);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdClearAttachments(b_lean_obj_arg commandBuffer, b_lean_obj_arg pAttachments, b_lean_obj_arg pRects, b_lean_obj_arg w) {
size_t len_pAttachments = lean_array_size(pAttachments);
VkClearAttachment* um_pAttachments = calloc(len_pAttachments, sizeof(VkClearAttachment));
for (size_t i = 0; i < len_pAttachments; ++i) {
lean_object *i_pAttachments = lean_array_cptr(pAttachments)[i];
lean_object *i_pAttachments_aspectMask = lean_ctor_get(i_pAttachments, 0);
lean_object *i_pAttachments_clearValue = lean_ctor_get(i_pAttachments, 1);
union VkClearValue um_i_pAttachments_clearValue;
switch (lean_ptr_tag(i_pAttachments_clearValue)) {
case 0: {
lean_object *i_pAttachments_clearValue_color = lean_ctor_get(i_pAttachments_clearValue, 0);
union VkClearColorValue um_i_pAttachments_clearValue_color;
switch (lean_ptr_tag(i_pAttachments_clearValue_color)) {
case 0: {
lean_object *i_pAttachments_clearValue_color_float32 = lean_ctor_get(i_pAttachments_clearValue_color, 0);
size_t len_i_pAttachments_clearValue_color_float32 = lean_sarray_size(i_pAttachments_clearValue_color_float32);
float* um_i_pAttachments_clearValue_color_float32 = calloc(len_i_pAttachments_clearValue_color_float32, sizeof(float));
for (size_t i = 0; i < len_i_pAttachments_clearValue_color_float32; ++i) {
um_i_pAttachments_clearValue_color_float32[i] = lean_float_array_uget(i_pAttachments_clearValue_color_float32, i);
}
if (len_i_pAttachments_clearValue_color_float32 != 4) abort();
um_i_pAttachments_clearValue_color = (union VkClearColorValue){ .float32 = {um_i_pAttachments_clearValue_color_float32[0],um_i_pAttachments_clearValue_color_float32[1],um_i_pAttachments_clearValue_color_float32[2],um_i_pAttachments_clearValue_color_float32[3],} };
} break;
case 1: {
lean_object *i_pAttachments_clearValue_color_int32 = lean_ctor_get(i_pAttachments_clearValue_color, 0);
size_t len_i_pAttachments_clearValue_color_int32 = lean_array_size(i_pAttachments_clearValue_color_int32);
uint32_t* um_i_pAttachments_clearValue_color_int32 = calloc(len_i_pAttachments_clearValue_color_int32, sizeof(uint32_t));
for (size_t i = 0; i < len_i_pAttachments_clearValue_color_int32; ++i) {
lean_object *i_i_pAttachments_clearValue_color_int32 = lean_array_cptr(i_pAttachments_clearValue_color_int32)[i];
um_i_pAttachments_clearValue_color_int32[i] = lean_unbox_uint32(i_i_pAttachments_clearValue_color_int32);
}
if (len_i_pAttachments_clearValue_color_int32 != 4) abort();
um_i_pAttachments_clearValue_color = (union VkClearColorValue){ .int32 = {um_i_pAttachments_clearValue_color_int32[0],um_i_pAttachments_clearValue_color_int32[1],um_i_pAttachments_clearValue_color_int32[2],um_i_pAttachments_clearValue_color_int32[3],} };
} break;
case 2: {
lean_object *i_pAttachments_clearValue_color_uint32 = lean_ctor_get(i_pAttachments_clearValue_color, 0);
size_t len_i_pAttachments_clearValue_color_uint32 = lean_array_size(i_pAttachments_clearValue_color_uint32);
uint32_t* um_i_pAttachments_clearValue_color_uint32 = calloc(len_i_pAttachments_clearValue_color_uint32, sizeof(uint32_t));
for (size_t i = 0; i < len_i_pAttachments_clearValue_color_uint32; ++i) {
lean_object *i_i_pAttachments_clearValue_color_uint32 = lean_array_cptr(i_pAttachments_clearValue_color_uint32)[i];
um_i_pAttachments_clearValue_color_uint32[i] = lean_unbox_uint32(i_i_pAttachments_clearValue_color_uint32);
}
if (len_i_pAttachments_clearValue_color_uint32 != 4) abort();
um_i_pAttachments_clearValue_color = (union VkClearColorValue){ .uint32 = {um_i_pAttachments_clearValue_color_uint32[0],um_i_pAttachments_clearValue_color_uint32[1],um_i_pAttachments_clearValue_color_uint32[2],um_i_pAttachments_clearValue_color_uint32[3],} };
} break;
}
um_i_pAttachments_clearValue = (union VkClearValue){ .color = um_i_pAttachments_clearValue_color };
} break;
case 1: {
lean_object *i_pAttachments_clearValue_depthStencil = lean_ctor_get(i_pAttachments_clearValue, 0);
struct VkClearDepthStencilValue um_i_pAttachments_clearValue_depthStencil = {
.depth = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_pAttachments_clearValue_depthStencil) + lean_ctor_num_objs(i_pAttachments_clearValue_depthStencil)) + 0),
.stencil = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pAttachments_clearValue_depthStencil) + lean_ctor_num_objs(i_pAttachments_clearValue_depthStencil)) + 8),
};
um_i_pAttachments_clearValue = (union VkClearValue){ .depthStencil = um_i_pAttachments_clearValue_depthStencil };
} break;
}
struct VkClearAttachment um_i_pAttachments = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pAttachments_aspectMask),
.colorAttachment = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pAttachments) + lean_ctor_num_objs(i_pAttachments)) + 0),
.clearValue = um_i_pAttachments_clearValue,
};
um_pAttachments[i] = um_i_pAttachments;
}
size_t len_pRects = lean_array_size(pRects);
VkClearRect* um_pRects = calloc(len_pRects, sizeof(VkClearRect));
for (size_t i = 0; i < len_pRects; ++i) {
lean_object *i_pRects = lean_array_cptr(pRects)[i];
lean_object *i_pRects_rect = lean_ctor_get(i_pRects, 0);
lean_object *i_pRects_rect_offset = lean_ctor_get(i_pRects_rect, 0);
struct VkOffset2D um_i_pRects_rect_offset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRects_rect_offset) + lean_ctor_num_objs(i_pRects_rect_offset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRects_rect_offset) + lean_ctor_num_objs(i_pRects_rect_offset)) + 4),
};
lean_object *i_pRects_rect_extent = lean_ctor_get(i_pRects_rect, 1);
struct VkExtent2D um_i_pRects_rect_extent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRects_rect_extent) + lean_ctor_num_objs(i_pRects_rect_extent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRects_rect_extent) + lean_ctor_num_objs(i_pRects_rect_extent)) + 4),
};
struct VkRect2D um_i_pRects_rect = {
.offset = um_i_pRects_rect_offset,
.extent = um_i_pRects_rect_extent,
};
struct VkClearRect um_i_pRects = {
.rect = um_i_pRects_rect,
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRects) + lean_ctor_num_objs(i_pRects)) + 0),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRects) + lean_ctor_num_objs(i_pRects)) + 4),
};
um_pRects[i] = um_i_pRects;
}
vkCmdClearAttachments((VkCommandBuffer)lean_unbox_uint64(commandBuffer), len_pAttachments, um_pAttachments, len_pRects, um_pRects);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdResolveImage(b_lean_obj_arg commandBuffer, b_lean_obj_arg srcImage, VkImageLayout srcImageLayout, b_lean_obj_arg dstImage, VkImageLayout dstImageLayout, b_lean_obj_arg pRegions, b_lean_obj_arg w) {
size_t len_pRegions = lean_array_size(pRegions);
VkImageResolve* um_pRegions = calloc(len_pRegions, sizeof(VkImageResolve));
for (size_t i = 0; i < len_pRegions; ++i) {
lean_object *i_pRegions = lean_array_cptr(pRegions)[i];
lean_object *i_pRegions_srcSubresource = lean_ctor_get(i_pRegions, 0);
lean_object *i_pRegions_srcSubresource_aspectMask = lean_ctor_get(i_pRegions_srcSubresource, 0);
struct VkImageSubresourceLayers um_i_pRegions_srcSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRegions_srcSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 0),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 4),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcSubresource) + lean_ctor_num_objs(i_pRegions_srcSubresource)) + 8),
};
lean_object *i_pRegions_srcOffset = lean_ctor_get(i_pRegions, 1);
struct VkOffset3D um_i_pRegions_srcOffset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffset) + lean_ctor_num_objs(i_pRegions_srcOffset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffset) + lean_ctor_num_objs(i_pRegions_srcOffset)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_srcOffset) + lean_ctor_num_objs(i_pRegions_srcOffset)) + 8),
};
lean_object *i_pRegions_dstSubresource = lean_ctor_get(i_pRegions, 2);
lean_object *i_pRegions_dstSubresource_aspectMask = lean_ctor_get(i_pRegions_dstSubresource, 0);
struct VkImageSubresourceLayers um_i_pRegions_dstSubresource = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pRegions_dstSubresource_aspectMask),
.mipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 0),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 4),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstSubresource) + lean_ctor_num_objs(i_pRegions_dstSubresource)) + 8),
};
lean_object *i_pRegions_dstOffset = lean_ctor_get(i_pRegions, 3);
struct VkOffset3D um_i_pRegions_dstOffset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffset) + lean_ctor_num_objs(i_pRegions_dstOffset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffset) + lean_ctor_num_objs(i_pRegions_dstOffset)) + 4),
.z = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_dstOffset) + lean_ctor_num_objs(i_pRegions_dstOffset)) + 8),
};
lean_object *i_pRegions_extent = lean_ctor_get(i_pRegions, 4);
struct VkExtent3D um_i_pRegions_extent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_extent) + lean_ctor_num_objs(i_pRegions_extent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_extent) + lean_ctor_num_objs(i_pRegions_extent)) + 4),
.depth = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRegions_extent) + lean_ctor_num_objs(i_pRegions_extent)) + 8),
};
struct VkImageResolve um_i_pRegions = {
.srcSubresource = um_i_pRegions_srcSubresource,
.srcOffset = um_i_pRegions_srcOffset,
.dstSubresource = um_i_pRegions_dstSubresource,
.dstOffset = um_i_pRegions_dstOffset,
.extent = um_i_pRegions_extent,
};
um_pRegions[i] = um_i_pRegions;
}
vkCmdResolveImage((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkImage)lean_unbox_uint64(srcImage), srcImageLayout, (VkImage)lean_unbox_uint64(dstImage), dstImageLayout, len_pRegions, um_pRegions);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdSetEvent(b_lean_obj_arg commandBuffer, b_lean_obj_arg event, VkPipelineStageFlags stageMask, b_lean_obj_arg w) {
vkCmdSetEvent((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkEvent)lean_unbox_uint64(event), stageMask);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdResetEvent(b_lean_obj_arg commandBuffer, b_lean_obj_arg event, VkPipelineStageFlags stageMask, b_lean_obj_arg w) {
vkCmdResetEvent((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkEvent)lean_unbox_uint64(event), stageMask);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdWaitEvents(b_lean_obj_arg commandBuffer, b_lean_obj_arg pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, b_lean_obj_arg pMemoryBarriers, b_lean_obj_arg pBufferMemoryBarriers, b_lean_obj_arg pImageMemoryBarriers, b_lean_obj_arg w) {
size_t len_pEvents = lean_array_size(pEvents);
VkEvent* um_pEvents = calloc(len_pEvents, sizeof(VkEvent));
for (size_t i = 0; i < len_pEvents; ++i) {
lean_object *i_pEvents = lean_array_cptr(pEvents)[i];
um_pEvents[i] = (VkEvent)lean_unbox_uint64(i_pEvents);
}
size_t len_pMemoryBarriers = lean_array_size(pMemoryBarriers);
VkMemoryBarrier* um_pMemoryBarriers = calloc(len_pMemoryBarriers, sizeof(VkMemoryBarrier));
for (size_t i = 0; i < len_pMemoryBarriers; ++i) {
lean_object *i_pMemoryBarriers = lean_array_cptr(pMemoryBarriers)[i];
lean_object *i_pMemoryBarriers_srcAccessMask = lean_ctor_get(i_pMemoryBarriers, 0);
lean_object *i_pMemoryBarriers_dstAccessMask = lean_ctor_get(i_pMemoryBarriers, 1);
struct VkMemoryBarrier um_i_pMemoryBarriers = {
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
.pNext = 0,
.srcAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pMemoryBarriers_srcAccessMask),
.dstAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pMemoryBarriers_dstAccessMask),
};
um_pMemoryBarriers[i] = um_i_pMemoryBarriers;
}
size_t len_pBufferMemoryBarriers = lean_array_size(pBufferMemoryBarriers);
VkBufferMemoryBarrier* um_pBufferMemoryBarriers = calloc(len_pBufferMemoryBarriers, sizeof(VkBufferMemoryBarrier));
for (size_t i = 0; i < len_pBufferMemoryBarriers; ++i) {
lean_object *i_pBufferMemoryBarriers = lean_array_cptr(pBufferMemoryBarriers)[i];
lean_object *i_pBufferMemoryBarriers_srcAccessMask = lean_ctor_get(i_pBufferMemoryBarriers, 0);
lean_object *i_pBufferMemoryBarriers_dstAccessMask = lean_ctor_get(i_pBufferMemoryBarriers, 1);
lean_object *i_pBufferMemoryBarriers_buffer = lean_ctor_get(i_pBufferMemoryBarriers, 2);
struct VkBufferMemoryBarrier um_i_pBufferMemoryBarriers = {
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
.pNext = 0,
.srcAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pBufferMemoryBarriers_srcAccessMask),
.dstAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pBufferMemoryBarriers_dstAccessMask),
.srcQueueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pBufferMemoryBarriers) + lean_ctor_num_objs(i_pBufferMemoryBarriers)) + 16),
.dstQueueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pBufferMemoryBarriers) + lean_ctor_num_objs(i_pBufferMemoryBarriers)) + 20),
.buffer = (VkBuffer)lean_unbox_uint64(i_pBufferMemoryBarriers_buffer),
.offset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pBufferMemoryBarriers) + lean_ctor_num_objs(i_pBufferMemoryBarriers)) + 0),
.size = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pBufferMemoryBarriers) + lean_ctor_num_objs(i_pBufferMemoryBarriers)) + 8),
};
um_pBufferMemoryBarriers[i] = um_i_pBufferMemoryBarriers;
}
size_t len_pImageMemoryBarriers = lean_array_size(pImageMemoryBarriers);
VkImageMemoryBarrier* um_pImageMemoryBarriers = calloc(len_pImageMemoryBarriers, sizeof(VkImageMemoryBarrier));
for (size_t i = 0; i < len_pImageMemoryBarriers; ++i) {
lean_object *i_pImageMemoryBarriers = lean_array_cptr(pImageMemoryBarriers)[i];
lean_object *i_pImageMemoryBarriers_srcAccessMask = lean_ctor_get(i_pImageMemoryBarriers, 0);
lean_object *i_pImageMemoryBarriers_dstAccessMask = lean_ctor_get(i_pImageMemoryBarriers, 1);
lean_object *i_pImageMemoryBarriers_oldLayout = lean_ctor_get(i_pImageMemoryBarriers, 2);
lean_object *i_pImageMemoryBarriers_newLayout = lean_ctor_get(i_pImageMemoryBarriers, 3);
lean_object *i_pImageMemoryBarriers_image = lean_ctor_get(i_pImageMemoryBarriers, 4);
lean_object *i_pImageMemoryBarriers_subresourceRange = lean_ctor_get(i_pImageMemoryBarriers, 5);
lean_object *i_pImageMemoryBarriers_subresourceRange_aspectMask = lean_ctor_get(i_pImageMemoryBarriers_subresourceRange, 0);
struct VkImageSubresourceRange um_i_pImageMemoryBarriers_subresourceRange = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pImageMemoryBarriers_subresourceRange_aspectMask),
.baseMipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers_subresourceRange) + lean_ctor_num_objs(i_pImageMemoryBarriers_subresourceRange)) + 0),
.levelCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers_subresourceRange) + lean_ctor_num_objs(i_pImageMemoryBarriers_subresourceRange)) + 4),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers_subresourceRange) + lean_ctor_num_objs(i_pImageMemoryBarriers_subresourceRange)) + 8),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers_subresourceRange) + lean_ctor_num_objs(i_pImageMemoryBarriers_subresourceRange)) + 12),
};
struct VkImageMemoryBarrier um_i_pImageMemoryBarriers = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = 0,
.srcAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pImageMemoryBarriers_srcAccessMask),
.dstAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pImageMemoryBarriers_dstAccessMask),
.oldLayout = (VkImageLayout)lean_unbox_uint32(i_pImageMemoryBarriers_oldLayout),
.newLayout = (VkImageLayout)lean_unbox_uint32(i_pImageMemoryBarriers_newLayout),
.srcQueueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers) + lean_ctor_num_objs(i_pImageMemoryBarriers)) + 0),
.dstQueueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers) + lean_ctor_num_objs(i_pImageMemoryBarriers)) + 4),
.image = (VkImage)lean_unbox_uint64(i_pImageMemoryBarriers_image),
.subresourceRange = um_i_pImageMemoryBarriers_subresourceRange,
};
um_pImageMemoryBarriers[i] = um_i_pImageMemoryBarriers;
}
vkCmdWaitEvents((VkCommandBuffer)lean_unbox_uint64(commandBuffer), len_pEvents, um_pEvents, srcStageMask, dstStageMask, len_pMemoryBarriers, um_pMemoryBarriers, len_pBufferMemoryBarriers, um_pBufferMemoryBarriers, len_pImageMemoryBarriers, um_pImageMemoryBarriers);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdPipelineBarrier(b_lean_obj_arg commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, b_lean_obj_arg pMemoryBarriers, b_lean_obj_arg pBufferMemoryBarriers, b_lean_obj_arg pImageMemoryBarriers, b_lean_obj_arg w) {
size_t len_pMemoryBarriers = lean_array_size(pMemoryBarriers);
VkMemoryBarrier* um_pMemoryBarriers = calloc(len_pMemoryBarriers, sizeof(VkMemoryBarrier));
for (size_t i = 0; i < len_pMemoryBarriers; ++i) {
lean_object *i_pMemoryBarriers = lean_array_cptr(pMemoryBarriers)[i];
lean_object *i_pMemoryBarriers_srcAccessMask = lean_ctor_get(i_pMemoryBarriers, 0);
lean_object *i_pMemoryBarriers_dstAccessMask = lean_ctor_get(i_pMemoryBarriers, 1);
struct VkMemoryBarrier um_i_pMemoryBarriers = {
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
.pNext = 0,
.srcAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pMemoryBarriers_srcAccessMask),
.dstAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pMemoryBarriers_dstAccessMask),
};
um_pMemoryBarriers[i] = um_i_pMemoryBarriers;
}
size_t len_pBufferMemoryBarriers = lean_array_size(pBufferMemoryBarriers);
VkBufferMemoryBarrier* um_pBufferMemoryBarriers = calloc(len_pBufferMemoryBarriers, sizeof(VkBufferMemoryBarrier));
for (size_t i = 0; i < len_pBufferMemoryBarriers; ++i) {
lean_object *i_pBufferMemoryBarriers = lean_array_cptr(pBufferMemoryBarriers)[i];
lean_object *i_pBufferMemoryBarriers_srcAccessMask = lean_ctor_get(i_pBufferMemoryBarriers, 0);
lean_object *i_pBufferMemoryBarriers_dstAccessMask = lean_ctor_get(i_pBufferMemoryBarriers, 1);
lean_object *i_pBufferMemoryBarriers_buffer = lean_ctor_get(i_pBufferMemoryBarriers, 2);
struct VkBufferMemoryBarrier um_i_pBufferMemoryBarriers = {
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
.pNext = 0,
.srcAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pBufferMemoryBarriers_srcAccessMask),
.dstAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pBufferMemoryBarriers_dstAccessMask),
.srcQueueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pBufferMemoryBarriers) + lean_ctor_num_objs(i_pBufferMemoryBarriers)) + 16),
.dstQueueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pBufferMemoryBarriers) + lean_ctor_num_objs(i_pBufferMemoryBarriers)) + 20),
.buffer = (VkBuffer)lean_unbox_uint64(i_pBufferMemoryBarriers_buffer),
.offset = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pBufferMemoryBarriers) + lean_ctor_num_objs(i_pBufferMemoryBarriers)) + 0),
.size = (VkDeviceSize)*(uint64_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pBufferMemoryBarriers) + lean_ctor_num_objs(i_pBufferMemoryBarriers)) + 8),
};
um_pBufferMemoryBarriers[i] = um_i_pBufferMemoryBarriers;
}
size_t len_pImageMemoryBarriers = lean_array_size(pImageMemoryBarriers);
VkImageMemoryBarrier* um_pImageMemoryBarriers = calloc(len_pImageMemoryBarriers, sizeof(VkImageMemoryBarrier));
for (size_t i = 0; i < len_pImageMemoryBarriers; ++i) {
lean_object *i_pImageMemoryBarriers = lean_array_cptr(pImageMemoryBarriers)[i];
lean_object *i_pImageMemoryBarriers_srcAccessMask = lean_ctor_get(i_pImageMemoryBarriers, 0);
lean_object *i_pImageMemoryBarriers_dstAccessMask = lean_ctor_get(i_pImageMemoryBarriers, 1);
lean_object *i_pImageMemoryBarriers_oldLayout = lean_ctor_get(i_pImageMemoryBarriers, 2);
lean_object *i_pImageMemoryBarriers_newLayout = lean_ctor_get(i_pImageMemoryBarriers, 3);
lean_object *i_pImageMemoryBarriers_image = lean_ctor_get(i_pImageMemoryBarriers, 4);
lean_object *i_pImageMemoryBarriers_subresourceRange = lean_ctor_get(i_pImageMemoryBarriers, 5);
lean_object *i_pImageMemoryBarriers_subresourceRange_aspectMask = lean_ctor_get(i_pImageMemoryBarriers_subresourceRange, 0);
struct VkImageSubresourceRange um_i_pImageMemoryBarriers_subresourceRange = {
.aspectMask = (VkImageAspectFlags)(VkImageAspectFlagBits)lean_unbox_uint32(i_pImageMemoryBarriers_subresourceRange_aspectMask),
.baseMipLevel = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers_subresourceRange) + lean_ctor_num_objs(i_pImageMemoryBarriers_subresourceRange)) + 0),
.levelCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers_subresourceRange) + lean_ctor_num_objs(i_pImageMemoryBarriers_subresourceRange)) + 4),
.baseArrayLayer = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers_subresourceRange) + lean_ctor_num_objs(i_pImageMemoryBarriers_subresourceRange)) + 8),
.layerCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers_subresourceRange) + lean_ctor_num_objs(i_pImageMemoryBarriers_subresourceRange)) + 12),
};
struct VkImageMemoryBarrier um_i_pImageMemoryBarriers = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = 0,
.srcAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pImageMemoryBarriers_srcAccessMask),
.dstAccessMask = (VkAccessFlags)(VkAccessFlagBits)lean_unbox_uint32(i_pImageMemoryBarriers_dstAccessMask),
.oldLayout = (VkImageLayout)lean_unbox_uint32(i_pImageMemoryBarriers_oldLayout),
.newLayout = (VkImageLayout)lean_unbox_uint32(i_pImageMemoryBarriers_newLayout),
.srcQueueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers) + lean_ctor_num_objs(i_pImageMemoryBarriers)) + 0),
.dstQueueFamilyIndex = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pImageMemoryBarriers) + lean_ctor_num_objs(i_pImageMemoryBarriers)) + 4),
.image = (VkImage)lean_unbox_uint64(i_pImageMemoryBarriers_image),
.subresourceRange = um_i_pImageMemoryBarriers_subresourceRange,
};
um_pImageMemoryBarriers[i] = um_i_pImageMemoryBarriers;
}
vkCmdPipelineBarrier((VkCommandBuffer)lean_unbox_uint64(commandBuffer), srcStageMask, dstStageMask, dependencyFlags, len_pMemoryBarriers, um_pMemoryBarriers, len_pBufferMemoryBarriers, um_pBufferMemoryBarriers, len_pImageMemoryBarriers, um_pImageMemoryBarriers);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdBeginQuery(b_lean_obj_arg commandBuffer, b_lean_obj_arg queryPool, uint32_t query, VkQueryControlFlags flags, b_lean_obj_arg w) {
vkCmdBeginQuery((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkQueryPool)lean_unbox_uint64(queryPool), query, flags);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdEndQuery(b_lean_obj_arg commandBuffer, b_lean_obj_arg queryPool, uint32_t query, b_lean_obj_arg w) {
vkCmdEndQuery((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkQueryPool)lean_unbox_uint64(queryPool), query);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdResetQueryPool(b_lean_obj_arg commandBuffer, b_lean_obj_arg queryPool, uint32_t firstQuery, uint32_t queryCount, b_lean_obj_arg w) {
vkCmdResetQueryPool((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkQueryPool)lean_unbox_uint64(queryPool), firstQuery, queryCount);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdWriteTimestamp(b_lean_obj_arg commandBuffer, VkPipelineStageFlagBits pipelineStage, b_lean_obj_arg queryPool, uint32_t query, b_lean_obj_arg w) {
vkCmdWriteTimestamp((VkCommandBuffer)lean_unbox_uint64(commandBuffer), pipelineStage, (VkQueryPool)lean_unbox_uint64(queryPool), query);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdCopyQueryPoolResults(b_lean_obj_arg commandBuffer, b_lean_obj_arg queryPool, uint32_t firstQuery, uint32_t queryCount, b_lean_obj_arg dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags, b_lean_obj_arg w) {
vkCmdCopyQueryPoolResults((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkQueryPool)lean_unbox_uint64(queryPool), firstQuery, queryCount, (VkBuffer)lean_unbox_uint64(dstBuffer), dstOffset, stride, flags);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdPushConstants(b_lean_obj_arg commandBuffer, b_lean_obj_arg layout, VkShaderStageFlags stageFlags, uint32_t offset, b_lean_obj_arg pValues, b_lean_obj_arg w) {
size_t len_pValues = lean_sarray_size(pValues);
void *um_pValues = lean_sarray_cptr(pValues);
vkCmdPushConstants((VkCommandBuffer)lean_unbox_uint64(commandBuffer), (VkPipelineLayout)lean_unbox_uint64(layout), stageFlags, offset, len_pValues, um_pValues);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdBeginRenderPass(b_lean_obj_arg commandBuffer, b_lean_obj_arg pRenderPassBegin, VkSubpassContents contents, b_lean_obj_arg w) {
lean_object *pRenderPassBegin_renderPass = lean_ctor_get(pRenderPassBegin, 0);
lean_object *pRenderPassBegin_framebuffer = lean_ctor_get(pRenderPassBegin, 1);
lean_object *pRenderPassBegin_renderArea = lean_ctor_get(pRenderPassBegin, 2);
lean_object *pRenderPassBegin_renderArea_offset = lean_ctor_get(pRenderPassBegin_renderArea, 0);
struct VkOffset2D um_pRenderPassBegin_renderArea_offset = {
.x = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pRenderPassBegin_renderArea_offset) + lean_ctor_num_objs(pRenderPassBegin_renderArea_offset)) + 0),
.y = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pRenderPassBegin_renderArea_offset) + lean_ctor_num_objs(pRenderPassBegin_renderArea_offset)) + 4),
};
lean_object *pRenderPassBegin_renderArea_extent = lean_ctor_get(pRenderPassBegin_renderArea, 1);
struct VkExtent2D um_pRenderPassBegin_renderArea_extent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pRenderPassBegin_renderArea_extent) + lean_ctor_num_objs(pRenderPassBegin_renderArea_extent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pRenderPassBegin_renderArea_extent) + lean_ctor_num_objs(pRenderPassBegin_renderArea_extent)) + 4),
};
struct VkRect2D um_pRenderPassBegin_renderArea = {
.offset = um_pRenderPassBegin_renderArea_offset,
.extent = um_pRenderPassBegin_renderArea_extent,
};
lean_object *pRenderPassBegin_pClearValues = lean_ctor_get(pRenderPassBegin, 3);
size_t len_pRenderPassBegin_pClearValues = lean_array_size(pRenderPassBegin_pClearValues);
VkClearValue* um_pRenderPassBegin_pClearValues = calloc(len_pRenderPassBegin_pClearValues, sizeof(VkClearValue));
for (size_t i = 0; i < len_pRenderPassBegin_pClearValues; ++i) {
lean_object *i_pRenderPassBegin_pClearValues = lean_array_cptr(pRenderPassBegin_pClearValues)[i];
union VkClearValue um_i_pRenderPassBegin_pClearValues;
switch (lean_ptr_tag(i_pRenderPassBegin_pClearValues)) {
case 0: {
lean_object *i_pRenderPassBegin_pClearValues_color = lean_ctor_get(i_pRenderPassBegin_pClearValues, 0);
union VkClearColorValue um_i_pRenderPassBegin_pClearValues_color;
switch (lean_ptr_tag(i_pRenderPassBegin_pClearValues_color)) {
case 0: {
lean_object *i_pRenderPassBegin_pClearValues_color_float32 = lean_ctor_get(i_pRenderPassBegin_pClearValues_color, 0);
size_t len_i_pRenderPassBegin_pClearValues_color_float32 = lean_sarray_size(i_pRenderPassBegin_pClearValues_color_float32);
float* um_i_pRenderPassBegin_pClearValues_color_float32 = calloc(len_i_pRenderPassBegin_pClearValues_color_float32, sizeof(float));
for (size_t i = 0; i < len_i_pRenderPassBegin_pClearValues_color_float32; ++i) {
um_i_pRenderPassBegin_pClearValues_color_float32[i] = lean_float_array_uget(i_pRenderPassBegin_pClearValues_color_float32, i);
}
if (len_i_pRenderPassBegin_pClearValues_color_float32 != 4) abort();
um_i_pRenderPassBegin_pClearValues_color = (union VkClearColorValue){ .float32 = {um_i_pRenderPassBegin_pClearValues_color_float32[0],um_i_pRenderPassBegin_pClearValues_color_float32[1],um_i_pRenderPassBegin_pClearValues_color_float32[2],um_i_pRenderPassBegin_pClearValues_color_float32[3],} };
} break;
case 1: {
lean_object *i_pRenderPassBegin_pClearValues_color_int32 = lean_ctor_get(i_pRenderPassBegin_pClearValues_color, 0);
size_t len_i_pRenderPassBegin_pClearValues_color_int32 = lean_array_size(i_pRenderPassBegin_pClearValues_color_int32);
uint32_t* um_i_pRenderPassBegin_pClearValues_color_int32 = calloc(len_i_pRenderPassBegin_pClearValues_color_int32, sizeof(uint32_t));
for (size_t i = 0; i < len_i_pRenderPassBegin_pClearValues_color_int32; ++i) {
lean_object *i_i_pRenderPassBegin_pClearValues_color_int32 = lean_array_cptr(i_pRenderPassBegin_pClearValues_color_int32)[i];
um_i_pRenderPassBegin_pClearValues_color_int32[i] = lean_unbox_uint32(i_i_pRenderPassBegin_pClearValues_color_int32);
}
if (len_i_pRenderPassBegin_pClearValues_color_int32 != 4) abort();
um_i_pRenderPassBegin_pClearValues_color = (union VkClearColorValue){ .int32 = {um_i_pRenderPassBegin_pClearValues_color_int32[0],um_i_pRenderPassBegin_pClearValues_color_int32[1],um_i_pRenderPassBegin_pClearValues_color_int32[2],um_i_pRenderPassBegin_pClearValues_color_int32[3],} };
} break;
case 2: {
lean_object *i_pRenderPassBegin_pClearValues_color_uint32 = lean_ctor_get(i_pRenderPassBegin_pClearValues_color, 0);
size_t len_i_pRenderPassBegin_pClearValues_color_uint32 = lean_array_size(i_pRenderPassBegin_pClearValues_color_uint32);
uint32_t* um_i_pRenderPassBegin_pClearValues_color_uint32 = calloc(len_i_pRenderPassBegin_pClearValues_color_uint32, sizeof(uint32_t));
for (size_t i = 0; i < len_i_pRenderPassBegin_pClearValues_color_uint32; ++i) {
lean_object *i_i_pRenderPassBegin_pClearValues_color_uint32 = lean_array_cptr(i_pRenderPassBegin_pClearValues_color_uint32)[i];
um_i_pRenderPassBegin_pClearValues_color_uint32[i] = lean_unbox_uint32(i_i_pRenderPassBegin_pClearValues_color_uint32);
}
if (len_i_pRenderPassBegin_pClearValues_color_uint32 != 4) abort();
um_i_pRenderPassBegin_pClearValues_color = (union VkClearColorValue){ .uint32 = {um_i_pRenderPassBegin_pClearValues_color_uint32[0],um_i_pRenderPassBegin_pClearValues_color_uint32[1],um_i_pRenderPassBegin_pClearValues_color_uint32[2],um_i_pRenderPassBegin_pClearValues_color_uint32[3],} };
} break;
}
um_i_pRenderPassBegin_pClearValues = (union VkClearValue){ .color = um_i_pRenderPassBegin_pClearValues_color };
} break;
case 1: {
lean_object *i_pRenderPassBegin_pClearValues_depthStencil = lean_ctor_get(i_pRenderPassBegin_pClearValues, 0);
struct VkClearDepthStencilValue um_i_pRenderPassBegin_pClearValues_depthStencil = {
.depth = (float)*(double*)((uint8_t*)(lean_ctor_obj_cptr(i_pRenderPassBegin_pClearValues_depthStencil) + lean_ctor_num_objs(i_pRenderPassBegin_pClearValues_depthStencil)) + 0),
.stencil = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(i_pRenderPassBegin_pClearValues_depthStencil) + lean_ctor_num_objs(i_pRenderPassBegin_pClearValues_depthStencil)) + 8),
};
um_i_pRenderPassBegin_pClearValues = (union VkClearValue){ .depthStencil = um_i_pRenderPassBegin_pClearValues_depthStencil };
} break;
}
um_pRenderPassBegin_pClearValues[i] = um_i_pRenderPassBegin_pClearValues;
}
struct VkRenderPassBeginInfo um_pRenderPassBegin = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.pNext = 0,
.renderPass = (VkRenderPass)lean_unbox_uint64(pRenderPassBegin_renderPass),
.framebuffer = (VkFramebuffer)lean_unbox_uint64(pRenderPassBegin_framebuffer),
.renderArea = um_pRenderPassBegin_renderArea,
.clearValueCount = len_pRenderPassBegin_pClearValues,
.pClearValues = um_pRenderPassBegin_pClearValues,
};
vkCmdBeginRenderPass((VkCommandBuffer)lean_unbox_uint64(commandBuffer), &um_pRenderPassBegin, contents);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdNextSubpass(b_lean_obj_arg commandBuffer, VkSubpassContents contents, b_lean_obj_arg w) {
vkCmdNextSubpass((VkCommandBuffer)lean_unbox_uint64(commandBuffer), contents);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdEndRenderPass(b_lean_obj_arg commandBuffer, b_lean_obj_arg w) {
vkCmdEndRenderPass((VkCommandBuffer)lean_unbox_uint64(commandBuffer));
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkCmdExecuteCommands(b_lean_obj_arg commandBuffer, b_lean_obj_arg pCommandBuffers, b_lean_obj_arg w) {
size_t len_pCommandBuffers = lean_array_size(pCommandBuffers);
VkCommandBuffer* um_pCommandBuffers = calloc(len_pCommandBuffers, sizeof(VkCommandBuffer));
for (size_t i = 0; i < len_pCommandBuffers; ++i) {
lean_object *i_pCommandBuffers = lean_array_cptr(pCommandBuffers)[i];
um_pCommandBuffers[i] = (VkCommandBuffer)lean_unbox_uint64(i_pCommandBuffers);
}
vkCmdExecuteCommands((VkCommandBuffer)lean_unbox_uint64(commandBuffer), len_pCommandBuffers, um_pCommandBuffers);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkDestroySurfaceKHR(b_lean_obj_arg instance, b_lean_obj_arg surface, b_lean_obj_arg w) {
vkDestroySurfaceKHR((VkInstance)lean_unbox_uint64(instance), (VkSurfaceKHR)lean_unbox_uint64(surface), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceSurfaceSupportKHR(b_lean_obj_arg physicalDevice, uint32_t queueFamilyIndex, b_lean_obj_arg surface, b_lean_obj_arg w) {
VkBool32 out_pSupported;
VkResult out_ret = vkGetPhysicalDeviceSurfaceSupportKHR((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), queueFamilyIndex, (VkSurfaceKHR)lean_unbox_uint64(surface), &out_pSupported);
lean_object *temp, *tuple = lean_box_uint32((uint32_t)out_pSupported);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(b_lean_obj_arg physicalDevice, b_lean_obj_arg surface, b_lean_obj_arg w) {
VkSurfaceCapabilitiesKHR out_pSurfaceCapabilities;
VkResult out_ret = vkGetPhysicalDeviceSurfaceCapabilitiesKHR((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), (VkSurfaceKHR)lean_unbox_uint64(surface), &out_pSurfaceCapabilities);
lean_object *m_out_pSurfaceCapabilities = lean_alloc_ctor(0, 7, 12);
VkExtent2D out_pSurfaceCapabilities_currentExtent = out_pSurfaceCapabilities.currentExtent;
lean_object *m_out_pSurfaceCapabilities_currentExtent = lean_alloc_ctor(0, 0, 8);
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities_currentExtent) + 0) = out_pSurfaceCapabilities_currentExtent.width;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities_currentExtent) + 4) = out_pSurfaceCapabilities_currentExtent.height;
lean_ctor_set(m_out_pSurfaceCapabilities, 0, m_out_pSurfaceCapabilities_currentExtent);
VkExtent2D out_pSurfaceCapabilities_minImageExtent = out_pSurfaceCapabilities.minImageExtent;
lean_object *m_out_pSurfaceCapabilities_minImageExtent = lean_alloc_ctor(0, 0, 8);
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities_minImageExtent) + 0) = out_pSurfaceCapabilities_minImageExtent.width;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities_minImageExtent) + 4) = out_pSurfaceCapabilities_minImageExtent.height;
lean_ctor_set(m_out_pSurfaceCapabilities, 1, m_out_pSurfaceCapabilities_minImageExtent);
VkExtent2D out_pSurfaceCapabilities_maxImageExtent = out_pSurfaceCapabilities.maxImageExtent;
lean_object *m_out_pSurfaceCapabilities_maxImageExtent = lean_alloc_ctor(0, 0, 8);
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities_maxImageExtent) + 0) = out_pSurfaceCapabilities_maxImageExtent.width;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities_maxImageExtent) + 4) = out_pSurfaceCapabilities_maxImageExtent.height;
lean_ctor_set(m_out_pSurfaceCapabilities, 2, m_out_pSurfaceCapabilities_maxImageExtent);
VkSurfaceTransformFlagsKHR out_pSurfaceCapabilities_supportedTransforms = out_pSurfaceCapabilities.supportedTransforms;
lean_ctor_set(m_out_pSurfaceCapabilities, 3, lean_box_uint32((uint32_t)out_pSurfaceCapabilities_supportedTransforms));
VkSurfaceTransformFlagBitsKHR out_pSurfaceCapabilities_currentTransform = out_pSurfaceCapabilities.currentTransform;
lean_ctor_set(m_out_pSurfaceCapabilities, 4, lean_box_uint32((uint32_t)out_pSurfaceCapabilities_currentTransform));
VkCompositeAlphaFlagsKHR out_pSurfaceCapabilities_supportedCompositeAlpha = out_pSurfaceCapabilities.supportedCompositeAlpha;
lean_ctor_set(m_out_pSurfaceCapabilities, 5, lean_box_uint32((uint32_t)out_pSurfaceCapabilities_supportedCompositeAlpha));
VkImageUsageFlags out_pSurfaceCapabilities_supportedUsageFlags = out_pSurfaceCapabilities.supportedUsageFlags;
lean_ctor_set(m_out_pSurfaceCapabilities, 6, lean_box_uint32((uint32_t)out_pSurfaceCapabilities_supportedUsageFlags));
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities) + 0) = out_pSurfaceCapabilities.minImageCount;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities) + 4) = out_pSurfaceCapabilities.maxImageCount;
*(uint32_t*)(lean_ctor_scalar_cptr(m_out_pSurfaceCapabilities) + 8) = out_pSurfaceCapabilities.maxImageArrayLayers;
lean_object *temp, *tuple = m_out_pSurfaceCapabilities;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceSurfaceFormatsKHR(b_lean_obj_arg physicalDevice, b_lean_obj_arg surface, b_lean_obj_arg w) {
uint32_t len_out_pSurfaceFormats;
VkSurfaceFormatKHR* out_pSurfaceFormats;
// get length pSurfaceFormatCount of pSurfaceFormats
vkGetPhysicalDeviceSurfaceFormatsKHR((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), (VkSurfaceKHR)lean_unbox_uint64(surface), &len_out_pSurfaceFormats, NULL);
out_pSurfaceFormats = calloc(len_out_pSurfaceFormats, sizeof(VkSurfaceFormatKHR));
VkResult out_ret = vkGetPhysicalDeviceSurfaceFormatsKHR((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), (VkSurfaceKHR)lean_unbox_uint64(surface), &len_out_pSurfaceFormats, out_pSurfaceFormats);
lean_object *m_out_pSurfaceFormats = lean_alloc_array(len_out_pSurfaceFormats, len_out_pSurfaceFormats);
for (size_t i = 0; i < len_out_pSurfaceFormats; ++i) {
VkSurfaceFormatKHR i_out_pSurfaceFormats = out_pSurfaceFormats[i];
lean_object *m_i_out_pSurfaceFormats = lean_alloc_ctor(0, 2, 0);
VkFormat i_out_pSurfaceFormats_format = i_out_pSurfaceFormats.format;
lean_ctor_set(m_i_out_pSurfaceFormats, 0, lean_box_uint32((uint32_t)i_out_pSurfaceFormats_format));
VkColorSpaceKHR i_out_pSurfaceFormats_colorSpace = i_out_pSurfaceFormats.colorSpace;
lean_ctor_set(m_i_out_pSurfaceFormats, 1, lean_box_uint32((uint32_t)i_out_pSurfaceFormats_colorSpace));
lean_array_cptr(m_out_pSurfaceFormats)[i] = m_i_out_pSurfaceFormats;
}
lean_object *temp, *tuple = m_out_pSurfaceFormats;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkGetPhysicalDeviceSurfacePresentModesKHR(b_lean_obj_arg physicalDevice, b_lean_obj_arg surface, b_lean_obj_arg w) {
uint32_t len_out_pPresentModes;
VkPresentModeKHR* out_pPresentModes;
// get length pPresentModeCount of pPresentModes
vkGetPhysicalDeviceSurfacePresentModesKHR((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), (VkSurfaceKHR)lean_unbox_uint64(surface), &len_out_pPresentModes, NULL);
out_pPresentModes = calloc(len_out_pPresentModes, sizeof(VkPresentModeKHR));
VkResult out_ret = vkGetPhysicalDeviceSurfacePresentModesKHR((VkPhysicalDevice)lean_unbox_uint64(physicalDevice), (VkSurfaceKHR)lean_unbox_uint64(surface), &len_out_pPresentModes, out_pPresentModes);
lean_object *m_out_pPresentModes = lean_alloc_array(len_out_pPresentModes, len_out_pPresentModes);
for (size_t i = 0; i < len_out_pPresentModes; ++i) {
VkPresentModeKHR i_out_pPresentModes = out_pPresentModes[i];
lean_array_cptr(m_out_pPresentModes)[i] = lean_box_uint32((uint32_t)i_out_pPresentModes);
}
lean_object *temp, *tuple = m_out_pPresentModes;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkCreateSwapchainKHR(b_lean_obj_arg device, b_lean_obj_arg pCreateInfo, b_lean_obj_arg w) {
lean_object *pCreateInfo_flags = lean_ctor_get(pCreateInfo, 0);
lean_object *pCreateInfo_surface = lean_ctor_get(pCreateInfo, 1);
lean_object *pCreateInfo_imageFormat = lean_ctor_get(pCreateInfo, 2);
lean_object *pCreateInfo_imageColorSpace = lean_ctor_get(pCreateInfo, 3);
lean_object *pCreateInfo_imageExtent = lean_ctor_get(pCreateInfo, 4);
struct VkExtent2D um_pCreateInfo_imageExtent = {
.width = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_imageExtent) + lean_ctor_num_objs(pCreateInfo_imageExtent)) + 0),
.height = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo_imageExtent) + lean_ctor_num_objs(pCreateInfo_imageExtent)) + 4),
};
lean_object *pCreateInfo_imageUsage = lean_ctor_get(pCreateInfo, 5);
lean_object *pCreateInfo_imageSharingMode = lean_ctor_get(pCreateInfo, 6);
lean_object *pCreateInfo_pQueueFamilyIndices = lean_ctor_get(pCreateInfo, 7);
size_t len_pCreateInfo_pQueueFamilyIndices = lean_array_size(pCreateInfo_pQueueFamilyIndices);
uint32_t* um_pCreateInfo_pQueueFamilyIndices = calloc(len_pCreateInfo_pQueueFamilyIndices, sizeof(uint32_t));
for (size_t i = 0; i < len_pCreateInfo_pQueueFamilyIndices; ++i) {
lean_object *i_pCreateInfo_pQueueFamilyIndices = lean_array_cptr(pCreateInfo_pQueueFamilyIndices)[i];
um_pCreateInfo_pQueueFamilyIndices[i] = lean_unbox_uint32(i_pCreateInfo_pQueueFamilyIndices);
}
lean_object *pCreateInfo_preTransform = lean_ctor_get(pCreateInfo, 8);
lean_object *pCreateInfo_compositeAlpha = lean_ctor_get(pCreateInfo, 9);
lean_object *pCreateInfo_presentMode = lean_ctor_get(pCreateInfo, 10);
struct VkSwapchainCreateInfoKHR um_pCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
.pNext = 0,
.flags = (VkSwapchainCreateFlagsKHR)(VkSwapchainCreateFlagBitsKHR)lean_unbox_uint32(pCreateInfo_flags),
.surface = (VkSurfaceKHR)lean_unbox_uint64(pCreateInfo_surface),
.minImageCount = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 0),
.imageFormat = (VkFormat)lean_unbox_uint32(pCreateInfo_imageFormat),
.imageColorSpace = (VkColorSpaceKHR)lean_unbox_uint32(pCreateInfo_imageColorSpace),
.imageExtent = um_pCreateInfo_imageExtent,
.imageArrayLayers = (uint32_t)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 4),
.imageUsage = (VkImageUsageFlags)(VkImageUsageFlagBits)lean_unbox_uint32(pCreateInfo_imageUsage),
.imageSharingMode = (VkSharingMode)lean_unbox_uint32(pCreateInfo_imageSharingMode),
.queueFamilyIndexCount = len_pCreateInfo_pQueueFamilyIndices,
.pQueueFamilyIndices = um_pCreateInfo_pQueueFamilyIndices,
.preTransform = (VkSurfaceTransformFlagBitsKHR)lean_unbox_uint32(pCreateInfo_preTransform),
.compositeAlpha = (VkCompositeAlphaFlagBitsKHR)lean_unbox_uint32(pCreateInfo_compositeAlpha),
.presentMode = (VkPresentModeKHR)lean_unbox_uint32(pCreateInfo_presentMode),
.clipped = (VkBool32)*(uint32_t*)((uint8_t*)(lean_ctor_obj_cptr(pCreateInfo) + lean_ctor_num_objs(pCreateInfo)) + 8),
.oldSwapchain = NULL,
};
VkSwapchainKHR out_pSwapchain;
VkResult out_ret = vkCreateSwapchainKHR((VkDevice)lean_unbox_uint64(device), &um_pCreateInfo, NULL, &out_pSwapchain);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_pSwapchain);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkDestroySwapchainKHR(b_lean_obj_arg device, b_lean_obj_arg swapchain, b_lean_obj_arg w) {
vkDestroySwapchainKHR((VkDevice)lean_unbox_uint64(device), (VkSwapchainKHR)lean_unbox_uint64(swapchain), NULL);
return lean_io_result_mk_ok(lean_box(0));
}
LEAN_EXPORT lean_obj_res glue_vkGetSwapchainImagesKHR(b_lean_obj_arg device, b_lean_obj_arg swapchain, b_lean_obj_arg w) {
uint32_t len_out_pSwapchainImages;
VkImage* out_pSwapchainImages;
// get length pSwapchainImageCount of pSwapchainImages
vkGetSwapchainImagesKHR((VkDevice)lean_unbox_uint64(device), (VkSwapchainKHR)lean_unbox_uint64(swapchain), &len_out_pSwapchainImages, NULL);
out_pSwapchainImages = calloc(len_out_pSwapchainImages, sizeof(VkImage));
VkResult out_ret = vkGetSwapchainImagesKHR((VkDevice)lean_unbox_uint64(device), (VkSwapchainKHR)lean_unbox_uint64(swapchain), &len_out_pSwapchainImages, out_pSwapchainImages);
lean_object *m_out_pSwapchainImages = lean_alloc_array(len_out_pSwapchainImages, len_out_pSwapchainImages);
for (size_t i = 0; i < len_out_pSwapchainImages; ++i) {
VkImage i_out_pSwapchainImages = out_pSwapchainImages[i];
lean_array_cptr(m_out_pSwapchainImages)[i] = lean_box_uint64((uint64_t)i_out_pSwapchainImages);
}
lean_object *temp, *tuple = m_out_pSwapchainImages;
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkAcquireNextImageKHR(b_lean_obj_arg device, b_lean_obj_arg swapchain, uint64_t timeout, b_lean_obj_arg semaphore, b_lean_obj_arg w) {
uint32_t out_pImageIndex;
VkResult out_ret = vkAcquireNextImageKHR((VkDevice)lean_unbox_uint64(device), (VkSwapchainKHR)lean_unbox_uint64(swapchain), timeout, (VkSemaphore)lean_unbox_uint64(semaphore), VK_NULL_HANDLE, &out_pImageIndex);
lean_object *temp, *tuple = lean_box_uint32((uint32_t)out_pImageIndex);
temp = lean_alloc_ctor(0, 2, 0);
lean_ctor_set(temp, 0, lean_box_uint64((uint64_t)out_ret));
lean_ctor_set(temp, 1, tuple);
tuple = temp;
return lean_io_result_mk_ok(tuple);
}
LEAN_EXPORT lean_obj_res glue_vkQueuePresentKHR(b_lean_obj_arg queue, b_lean_obj_arg pPresentInfo, b_lean_obj_arg w) {
lean_object *pPresentInfo_pWaitSemaphores = lean_ctor_get(pPresentInfo, 0);
size_t len_pPresentInfo_pWaitSemaphores = lean_array_size(pPresentInfo_pWaitSemaphores);
VkSemaphore* um_pPresentInfo_pWaitSemaphores = calloc(len_pPresentInfo_pWaitSemaphores, sizeof(VkSemaphore));
for (size_t i = 0; i < len_pPresentInfo_pWaitSemaphores; ++i) {
lean_object *i_pPresentInfo_pWaitSemaphores = lean_array_cptr(pPresentInfo_pWaitSemaphores)[i];
um_pPresentInfo_pWaitSemaphores[i] = (VkSemaphore)lean_unbox_uint64(i_pPresentInfo_pWaitSemaphores);
}
lean_object *pPresentInfo_pSwapchains = lean_ctor_get(pPresentInfo, 1);
size_t len_pPresentInfo_pSwapchains = lean_array_size(pPresentInfo_pSwapchains);
VkSwapchainKHR* um_pPresentInfo_pSwapchains = calloc(len_pPresentInfo_pSwapchains, sizeof(VkSwapchainKHR));
for (size_t i = 0; i < len_pPresentInfo_pSwapchains; ++i) {
lean_object *i_pPresentInfo_pSwapchains = lean_array_cptr(pPresentInfo_pSwapchains)[i];
um_pPresentInfo_pSwapchains[i] = (VkSwapchainKHR)lean_unbox_uint64(i_pPresentInfo_pSwapchains);
}
lean_object *pPresentInfo_pImageIndices = lean_ctor_get(pPresentInfo, 2);
size_t len_pPresentInfo_pImageIndices = lean_array_size(pPresentInfo_pImageIndices);
uint32_t* um_pPresentInfo_pImageIndices = calloc(len_pPresentInfo_pImageIndices, sizeof(uint32_t));
for (size_t i = 0; i < len_pPresentInfo_pImageIndices; ++i) {
lean_object *i_pPresentInfo_pImageIndices = lean_array_cptr(pPresentInfo_pImageIndices)[i];
um_pPresentInfo_pImageIndices[i] = lean_unbox_uint32(i_pPresentInfo_pImageIndices);
}
lean_object *pPresentInfo_pResults = lean_ctor_get(pPresentInfo, 3);
size_t len_pPresentInfo_pResults = lean_array_size(pPresentInfo_pResults);
VkResult* um_pPresentInfo_pResults = calloc(len_pPresentInfo_pResults, sizeof(VkResult));
for (size_t i = 0; i < len_pPresentInfo_pResults; ++i) {
lean_object *i_pPresentInfo_pResults = lean_array_cptr(pPresentInfo_pResults)[i];
um_pPresentInfo_pResults[i] = (VkResult)lean_unbox_uint64(i_pPresentInfo_pResults);
}
struct VkPresentInfoKHR um_pPresentInfo = {
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
.pNext = 0,
.waitSemaphoreCount = len_pPresentInfo_pWaitSemaphores,
.pWaitSemaphores = um_pPresentInfo_pWaitSemaphores,
.swapchainCount = len_pPresentInfo_pSwapchains,
.pSwapchains = um_pPresentInfo_pSwapchains,
.pImageIndices = um_pPresentInfo_pImageIndices,
.pResults = um_pPresentInfo_pResults,
};
VkResult out_ret = vkQueuePresentKHR((VkQueue)lean_unbox_uint64(queue), &um_pPresentInfo);
lean_object *temp, *tuple = lean_box_uint64((uint64_t)out_ret);
return lean_io_result_mk_ok(tuple);
}
set_option autoImplicit false
namespace Vk
abbrev FixedArray α (_ : Nat) := Array α
structure InstanceCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr InstanceCreateFlags InstanceCreateFlags InstanceCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd InstanceCreateFlags InstanceCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited InstanceCreateFlags := ⟨⟨0⟩⟩
structure ApplicationInfo where
applicationName : String
applicationVersion : UInt32 := 0
engineName : String
engineVersion : UInt32 := 0
apiVersion : UInt32 := 0
structure InstanceCreateInfo where
flags : InstanceCreateFlags := default
applicationInfo : Option (ApplicationInfo) := none
enabledLayerNames : Array (String) := .empty
enabledExtensionNames : Array (String) := .empty
abbrev Result : Type := UInt64
opaque Instance : Type := UInt64
@[extern "glue_vkCreateInstance"]
opaque createInstance : (@& InstanceCreateInfo) → IO (Result × Instance)
@[extern "glue_vkDestroyInstance"]
opaque destroyInstance : (@& Instance) → IO Unit
opaque PhysicalDevice : Type := UInt64
@[extern "glue_vkEnumeratePhysicalDevices"]
opaque enumeratePhysicalDevices : (@& Instance) → IO (Result × Array (PhysicalDevice))
abbrev Bool32 : Type := UInt32
structure PhysicalDeviceFeatures where
robustBufferAccess : Bool32 := 0
fullDrawIndexUint32 : Bool32 := 0
imageCubeArray : Bool32 := 0
independentBlend : Bool32 := 0
geometryShader : Bool32 := 0
tessellationShader : Bool32 := 0
sampleRateShading : Bool32 := 0
dualSrcBlend : Bool32 := 0
logicOp : Bool32 := 0
multiDrawIndirect : Bool32 := 0
drawIndirectFirstInstance : Bool32 := 0
depthClamp : Bool32 := 0
depthBiasClamp : Bool32 := 0
fillModeNonSolid : Bool32 := 0
depthBounds : Bool32 := 0
wideLines : Bool32 := 0
largePoints : Bool32 := 0
alphaToOne : Bool32 := 0
multiViewport : Bool32 := 0
samplerAnisotropy : Bool32 := 0
textureCompressionETC2 : Bool32 := 0
textureCompressionASTC_LDR : Bool32 := 0
textureCompressionBC : Bool32 := 0
occlusionQueryPrecise : Bool32 := 0
pipelineStatisticsQuery : Bool32 := 0
vertexPipelineStoresAndAtomics : Bool32 := 0
fragmentStoresAndAtomics : Bool32 := 0
shaderTessellationAndGeometryPointSize : Bool32 := 0
shaderImageGatherExtended : Bool32 := 0
shaderStorageImageExtendedFormats : Bool32 := 0
shaderStorageImageMultisample : Bool32 := 0
shaderStorageImageReadWithoutFormat : Bool32 := 0
shaderStorageImageWriteWithoutFormat : Bool32 := 0
shaderUniformBufferArrayDynamicIndexing : Bool32 := 0
shaderSampledImageArrayDynamicIndexing : Bool32 := 0
shaderStorageBufferArrayDynamicIndexing : Bool32 := 0
shaderStorageImageArrayDynamicIndexing : Bool32 := 0
shaderClipDistance : Bool32 := 0
shaderCullDistance : Bool32 := 0
shaderFloat64 : Bool32 := 0
shaderInt64 : Bool32 := 0
shaderInt16 : Bool32 := 0
shaderResourceResidency : Bool32 := 0
shaderResourceMinLod : Bool32 := 0
sparseBinding : Bool32 := 0
sparseResidencyBuffer : Bool32 := 0
sparseResidencyImage2D : Bool32 := 0
sparseResidencyImage3D : Bool32 := 0
sparseResidency2Samples : Bool32 := 0
sparseResidency4Samples : Bool32 := 0
sparseResidency8Samples : Bool32 := 0
sparseResidency16Samples : Bool32 := 0
sparseResidencyAliased : Bool32 := 0
variableMultisampleRate : Bool32 := 0
inheritedQueries : Bool32 := 0
@[extern "glue_vkGetPhysicalDeviceFeatures"]
opaque getPhysicalDeviceFeatures : (@& PhysicalDevice) → IO PhysicalDeviceFeatures
structure Format := private mk :: private v : UInt32
deriving DecidableEq
def Format.undefined := mk 0
def Format.r4g4UnormPack8 := mk 1
def Format.r4g4b4a4UnormPack16 := mk 2
def Format.b4g4r4a4UnormPack16 := mk 3
def Format.r5g6b5UnormPack16 := mk 4
def Format.b5g6r5UnormPack16 := mk 5
def Format.r5g5b5a1UnormPack16 := mk 6
def Format.b5g5r5a1UnormPack16 := mk 7
def Format.a1r5g5b5UnormPack16 := mk 8
def Format.r8Unorm := mk 9
def Format.r8Snorm := mk 10
def Format.r8Uscaled := mk 11
def Format.r8Sscaled := mk 12
def Format.r8Uint := mk 13
def Format.r8Sint := mk 14
def Format.r8Srgb := mk 15
def Format.r8g8Unorm := mk 16
def Format.r8g8Snorm := mk 17
def Format.r8g8Uscaled := mk 18
def Format.r8g8Sscaled := mk 19
def Format.r8g8Uint := mk 20
def Format.r8g8Sint := mk 21
def Format.r8g8Srgb := mk 22
def Format.r8g8b8Unorm := mk 23
def Format.r8g8b8Snorm := mk 24
def Format.r8g8b8Uscaled := mk 25
def Format.r8g8b8Sscaled := mk 26
def Format.r8g8b8Uint := mk 27
def Format.r8g8b8Sint := mk 28
def Format.r8g8b8Srgb := mk 29
def Format.b8g8r8Unorm := mk 30
def Format.b8g8r8Snorm := mk 31
def Format.b8g8r8Uscaled := mk 32
def Format.b8g8r8Sscaled := mk 33
def Format.b8g8r8Uint := mk 34
def Format.b8g8r8Sint := mk 35
def Format.b8g8r8Srgb := mk 36
def Format.r8g8b8a8Unorm := mk 37
def Format.r8g8b8a8Snorm := mk 38
def Format.r8g8b8a8Uscaled := mk 39
def Format.r8g8b8a8Sscaled := mk 40
def Format.r8g8b8a8Uint := mk 41
def Format.r8g8b8a8Sint := mk 42
def Format.r8g8b8a8Srgb := mk 43
def Format.b8g8r8a8Unorm := mk 44
def Format.b8g8r8a8Snorm := mk 45
def Format.b8g8r8a8Uscaled := mk 46
def Format.b8g8r8a8Sscaled := mk 47
def Format.b8g8r8a8Uint := mk 48
def Format.b8g8r8a8Sint := mk 49
def Format.b8g8r8a8Srgb := mk 50
def Format.a8b8g8r8UnormPack32 := mk 51
def Format.a8b8g8r8SnormPack32 := mk 52
def Format.a8b8g8r8UscaledPack32 := mk 53
def Format.a8b8g8r8SscaledPack32 := mk 54
def Format.a8b8g8r8UintPack32 := mk 55
def Format.a8b8g8r8SintPack32 := mk 56
def Format.a8b8g8r8SrgbPack32 := mk 57
def Format.a2r10g10b10UnormPack32 := mk 58
def Format.a2r10g10b10SnormPack32 := mk 59
def Format.a2r10g10b10UscaledPack32 := mk 60
def Format.a2r10g10b10SscaledPack32 := mk 61
def Format.a2r10g10b10UintPack32 := mk 62
def Format.a2r10g10b10SintPack32 := mk 63
def Format.a2b10g10r10UnormPack32 := mk 64
def Format.a2b10g10r10SnormPack32 := mk 65
def Format.a2b10g10r10UscaledPack32 := mk 66
def Format.a2b10g10r10SscaledPack32 := mk 67
def Format.a2b10g10r10UintPack32 := mk 68
def Format.a2b10g10r10SintPack32 := mk 69
def Format.r16Unorm := mk 70
def Format.r16Snorm := mk 71
def Format.r16Uscaled := mk 72
def Format.r16Sscaled := mk 73
def Format.r16Uint := mk 74
def Format.r16Sint := mk 75
def Format.r16Sfloat := mk 76
def Format.r16g16Unorm := mk 77
def Format.r16g16Snorm := mk 78
def Format.r16g16Uscaled := mk 79
def Format.r16g16Sscaled := mk 80
def Format.r16g16Uint := mk 81
def Format.r16g16Sint := mk 82
def Format.r16g16Sfloat := mk 83
def Format.r16g16b16Unorm := mk 84
def Format.r16g16b16Snorm := mk 85
def Format.r16g16b16Uscaled := mk 86
def Format.r16g16b16Sscaled := mk 87
def Format.r16g16b16Uint := mk 88
def Format.r16g16b16Sint := mk 89
def Format.r16g16b16Sfloat := mk 90
def Format.r16g16b16a16Unorm := mk 91
def Format.r16g16b16a16Snorm := mk 92
def Format.r16g16b16a16Uscaled := mk 93
def Format.r16g16b16a16Sscaled := mk 94
def Format.r16g16b16a16Uint := mk 95
def Format.r16g16b16a16Sint := mk 96
def Format.r16g16b16a16Sfloat := mk 97
def Format.r32Uint := mk 98
def Format.r32Sint := mk 99
def Format.r32Sfloat := mk 100
def Format.r32g32Uint := mk 101
def Format.r32g32Sint := mk 102
def Format.r32g32Sfloat := mk 103
def Format.r32g32b32Uint := mk 104
def Format.r32g32b32Sint := mk 105
def Format.r32g32b32Sfloat := mk 106
def Format.r32g32b32a32Uint := mk 107
def Format.r32g32b32a32Sint := mk 108
def Format.r32g32b32a32Sfloat := mk 109
def Format.r64Uint := mk 110
def Format.r64Sint := mk 111
def Format.r64Sfloat := mk 112
def Format.r64g64Uint := mk 113
def Format.r64g64Sint := mk 114
def Format.r64g64Sfloat := mk 115
def Format.r64g64b64Uint := mk 116
def Format.r64g64b64Sint := mk 117
def Format.r64g64b64Sfloat := mk 118
def Format.r64g64b64a64Uint := mk 119
def Format.r64g64b64a64Sint := mk 120
def Format.r64g64b64a64Sfloat := mk 121
def Format.b10g11r11UfloatPack32 := mk 122
def Format.e5b9g9r9UfloatPack32 := mk 123
def Format.d16Unorm := mk 124
def Format.x8D24UnormPack32 := mk 125
def Format.d32Sfloat := mk 126
def Format.s8Uint := mk 127
def Format.d16UnormS8Uint := mk 128
def Format.d24UnormS8Uint := mk 129
def Format.d32SfloatS8Uint := mk 130
def Format.bc1RgbUnormBlock := mk 131
def Format.bc1RgbSrgbBlock := mk 132
def Format.bc1RgbaUnormBlock := mk 133
def Format.bc1RgbaSrgbBlock := mk 134
def Format.bc2UnormBlock := mk 135
def Format.bc2SrgbBlock := mk 136
def Format.bc3UnormBlock := mk 137
def Format.bc3SrgbBlock := mk 138
def Format.bc4UnormBlock := mk 139
def Format.bc4SnormBlock := mk 140
def Format.bc5UnormBlock := mk 141
def Format.bc5SnormBlock := mk 142
def Format.bc6hUfloatBlock := mk 143
def Format.bc6hSfloatBlock := mk 144
def Format.bc7UnormBlock := mk 145
def Format.bc7SrgbBlock := mk 146
def Format.etc2R8g8b8UnormBlock := mk 147
def Format.etc2R8g8b8SrgbBlock := mk 148
def Format.etc2R8g8b8a1UnormBlock := mk 149
def Format.etc2R8g8b8a1SrgbBlock := mk 150
def Format.etc2R8g8b8a8UnormBlock := mk 151
def Format.etc2R8g8b8a8SrgbBlock := mk 152
def Format.eacR11UnormBlock := mk 153
def Format.eacR11SnormBlock := mk 154
def Format.eacR11g11UnormBlock := mk 155
def Format.eacR11g11SnormBlock := mk 156
def Format.astc4x4UnormBlock := mk 157
def Format.astc4x4SrgbBlock := mk 158
def Format.astc5x4UnormBlock := mk 159
def Format.astc5x4SrgbBlock := mk 160
def Format.astc5x5UnormBlock := mk 161
def Format.astc5x5SrgbBlock := mk 162
def Format.astc6x5UnormBlock := mk 163
def Format.astc6x5SrgbBlock := mk 164
def Format.astc6x6UnormBlock := mk 165
def Format.astc6x6SrgbBlock := mk 166
def Format.astc8x5UnormBlock := mk 167
def Format.astc8x5SrgbBlock := mk 168
def Format.astc8x6UnormBlock := mk 169
def Format.astc8x6SrgbBlock := mk 170
def Format.astc8x8UnormBlock := mk 171
def Format.astc8x8SrgbBlock := mk 172
def Format.astc10x5UnormBlock := mk 173
def Format.astc10x5SrgbBlock := mk 174
def Format.astc10x6UnormBlock := mk 175
def Format.astc10x6SrgbBlock := mk 176
def Format.astc10x8UnormBlock := mk 177
def Format.astc10x8SrgbBlock := mk 178
def Format.astc10x10UnormBlock := mk 179
def Format.astc10x10SrgbBlock := mk 180
def Format.astc12x10UnormBlock := mk 181
def Format.astc12x10SrgbBlock := mk 182
def Format.astc12x12UnormBlock := mk 183
def Format.astc12x12SrgbBlock := mk 184
structure FormatFeatureFlags := private mk :: private v : UInt32
deriving DecidableEq
def FormatFeatureFlags.sampledImage := mk 1
def FormatFeatureFlags.storageImage := mk 2
def FormatFeatureFlags.storageImageAtomic := mk 4
def FormatFeatureFlags.uniformTexelBuffer := mk 8
def FormatFeatureFlags.storageTexelBuffer := mk 16
def FormatFeatureFlags.storageTexelBufferAtomic := mk 32
def FormatFeatureFlags.vertexBuffer := mk 64
def FormatFeatureFlags.colorAttachment := mk 128
def FormatFeatureFlags.colorAttachmentBlend := mk 256
def FormatFeatureFlags.depthStencilAttachment := mk 512
def FormatFeatureFlags.blitSrc := mk 1024
def FormatFeatureFlags.blitDst := mk 2048
def FormatFeatureFlags.sampledImageFilterLinear := mk 4096
instance : HOr FormatFeatureFlags FormatFeatureFlags FormatFeatureFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd FormatFeatureFlags FormatFeatureFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited FormatFeatureFlags := ⟨⟨0⟩⟩
structure FormatProperties where
linearTilingFeatures : FormatFeatureFlags := default
optimalTilingFeatures : FormatFeatureFlags := default
bufferFeatures : FormatFeatureFlags := default
@[extern "glue_vkGetPhysicalDeviceFormatProperties"]
opaque getPhysicalDeviceFormatProperties : (@& PhysicalDevice) → (@& Format) → IO FormatProperties
structure ImageType := private mk :: private v : UInt32
deriving DecidableEq
def ImageType._1d := mk 0
def ImageType._2d := mk 1
def ImageType._3d := mk 2
structure ImageTiling := private mk :: private v : UInt32
deriving DecidableEq
def ImageTiling.optimal := mk 0
def ImageTiling.linear := mk 1
structure ImageUsageFlags := private mk :: private v : UInt32
deriving DecidableEq
def ImageUsageFlags.transferSrc := mk 1
def ImageUsageFlags.transferDst := mk 2
def ImageUsageFlags.sampled := mk 4
def ImageUsageFlags.storage := mk 8
def ImageUsageFlags.colorAttachment := mk 16
def ImageUsageFlags.depthStencilAttachment := mk 32
def ImageUsageFlags.transientAttachment := mk 64
def ImageUsageFlags.inputAttachment := mk 128
instance : HOr ImageUsageFlags ImageUsageFlags ImageUsageFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd ImageUsageFlags ImageUsageFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited ImageUsageFlags := ⟨⟨0⟩⟩
structure ImageCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
def ImageCreateFlags.sparseBinding := mk 1
def ImageCreateFlags.sparseResidency := mk 2
def ImageCreateFlags.sparseAliased := mk 4
def ImageCreateFlags.mutableFormat := mk 8
def ImageCreateFlags.cubeCompatible := mk 16
instance : HOr ImageCreateFlags ImageCreateFlags ImageCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd ImageCreateFlags ImageCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited ImageCreateFlags := ⟨⟨0⟩⟩
structure Extent3D where
width : UInt32 := 0
height : UInt32 := 0
depth : UInt32 := 0
structure SampleCountFlags := private mk :: private v : UInt32
deriving DecidableEq
def SampleCountFlags._1 := mk 1
def SampleCountFlags._2 := mk 2
def SampleCountFlags._4 := mk 4
def SampleCountFlags._8 := mk 8
def SampleCountFlags._16 := mk 16
def SampleCountFlags._32 := mk 32
def SampleCountFlags._64 := mk 64
instance : HOr SampleCountFlags SampleCountFlags SampleCountFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd SampleCountFlags SampleCountFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited SampleCountFlags := ⟨⟨0⟩⟩
abbrev DeviceSize : Type := UInt64
structure ImageFormatProperties where
maxExtent : Extent3D
maxMipLevels : UInt32 := 0
maxArrayLayers : UInt32 := 0
sampleCounts : SampleCountFlags := default
maxResourceSize : DeviceSize := 0
@[extern "glue_vkGetPhysicalDeviceImageFormatProperties"]
opaque getPhysicalDeviceImageFormatProperties : (@& PhysicalDevice) → (@& Format) → (@& ImageType) → (@& ImageTiling) → (@& ImageUsageFlags) → (@& ImageCreateFlags) → IO (Result × ImageFormatProperties)
structure PhysicalDeviceType := private mk :: private v : UInt32
deriving DecidableEq
def PhysicalDeviceType.other := mk 0
def PhysicalDeviceType.integratedGpu := mk 1
def PhysicalDeviceType.discreteGpu := mk 2
def PhysicalDeviceType.virtualGpu := mk 3
def PhysicalDeviceType.cpu := mk 4
structure PhysicalDeviceLimits where
maxImageDimension1D : UInt32 := 0
maxImageDimension2D : UInt32 := 0
maxImageDimension3D : UInt32 := 0
maxImageDimensionCube : UInt32 := 0
maxImageArrayLayers : UInt32 := 0
maxTexelBufferElements : UInt32 := 0
maxUniformBufferRange : UInt32 := 0
maxStorageBufferRange : UInt32 := 0
maxPushConstantsSize : UInt32 := 0
maxMemoryAllocationCount : UInt32 := 0
maxSamplerAllocationCount : UInt32 := 0
bufferImageGranularity : DeviceSize := 0
sparseAddressSpaceSize : DeviceSize := 0
maxBoundDescriptorSets : UInt32 := 0
maxPerStageDescriptorSamplers : UInt32 := 0
maxPerStageDescriptorUniformBuffers : UInt32 := 0
maxPerStageDescriptorStorageBuffers : UInt32 := 0
maxPerStageDescriptorSampledImages : UInt32 := 0
maxPerStageDescriptorStorageImages : UInt32 := 0
maxPerStageDescriptorInputAttachments : UInt32 := 0
maxPerStageResources : UInt32 := 0
maxDescriptorSetSamplers : UInt32 := 0
maxDescriptorSetUniformBuffers : UInt32 := 0
maxDescriptorSetUniformBuffersDynamic : UInt32 := 0
maxDescriptorSetStorageBuffers : UInt32 := 0
maxDescriptorSetStorageBuffersDynamic : UInt32 := 0
maxDescriptorSetSampledImages : UInt32 := 0
maxDescriptorSetStorageImages : UInt32 := 0
maxDescriptorSetInputAttachments : UInt32 := 0
maxVertexInputAttributes : UInt32 := 0
maxVertexInputBindings : UInt32 := 0
maxVertexInputAttributeOffset : UInt32 := 0
maxVertexInputBindingStride : UInt32 := 0
maxVertexOutputComponents : UInt32 := 0
maxTessellationGenerationLevel : UInt32 := 0
maxTessellationPatchSize : UInt32 := 0
maxTessellationControlPerVertexInputComponents : UInt32 := 0
maxTessellationControlPerVertexOutputComponents : UInt32 := 0
maxTessellationControlPerPatchOutputComponents : UInt32 := 0
maxTessellationControlTotalOutputComponents : UInt32 := 0
maxTessellationEvaluationInputComponents : UInt32 := 0
maxTessellationEvaluationOutputComponents : UInt32 := 0
maxGeometryShaderInvocations : UInt32 := 0
maxGeometryInputComponents : UInt32 := 0
maxGeometryOutputComponents : UInt32 := 0
maxGeometryOutputVertices : UInt32 := 0
maxGeometryTotalOutputComponents : UInt32 := 0
maxFragmentInputComponents : UInt32 := 0
maxFragmentOutputAttachments : UInt32 := 0
maxFragmentDualSrcAttachments : UInt32 := 0
maxFragmentCombinedOutputResources : UInt32 := 0
maxComputeSharedMemorySize : UInt32 := 0
maxComputeWorkGroupCount : Array (UInt32) := .empty
maxComputeWorkGroupInvocations : UInt32 := 0
maxComputeWorkGroupSize : Array (UInt32) := .empty
subPixelPrecisionBits : UInt32 := 0
subTexelPrecisionBits : UInt32 := 0
mipmapPrecisionBits : UInt32 := 0
maxDrawIndexedIndexValue : UInt32 := 0
maxDrawIndirectCount : UInt32 := 0
maxSamplerLodBias : Float
maxSamplerAnisotropy : Float
maxViewports : UInt32 := 0
maxViewportDimensions : Array (UInt32) := .empty
viewportBoundsRange : FloatArray := .empty
viewportSubPixelBits : UInt32 := 0
minMemoryMapAlignment : UInt64 := 0
minTexelBufferOffsetAlignment : DeviceSize := 0
minUniformBufferOffsetAlignment : DeviceSize := 0
minStorageBufferOffsetAlignment : DeviceSize := 0
minTexelOffset : UInt32 := 0
maxTexelOffset : UInt32 := 0
minTexelGatherOffset : UInt32 := 0
maxTexelGatherOffset : UInt32 := 0
minInterpolationOffset : Float
maxInterpolationOffset : Float
subPixelInterpolationOffsetBits : UInt32 := 0
maxFramebufferWidth : UInt32 := 0
maxFramebufferHeight : UInt32 := 0
maxFramebufferLayers : UInt32 := 0
framebufferColorSampleCounts : SampleCountFlags := default
framebufferDepthSampleCounts : SampleCountFlags := default
framebufferStencilSampleCounts : SampleCountFlags := default
framebufferNoAttachmentsSampleCounts : SampleCountFlags := default
maxColorAttachments : UInt32 := 0
sampledImageColorSampleCounts : SampleCountFlags := default
sampledImageIntegerSampleCounts : SampleCountFlags := default
sampledImageDepthSampleCounts : SampleCountFlags := default
sampledImageStencilSampleCounts : SampleCountFlags := default
storageImageSampleCounts : SampleCountFlags := default
maxSampleMaskWords : UInt32 := 0
timestampComputeAndGraphics : Bool32 := 0
timestampPeriod : Float
maxClipDistances : UInt32 := 0
maxCullDistances : UInt32 := 0
maxCombinedClipAndCullDistances : UInt32 := 0
discreteQueuePriorities : UInt32 := 0
pointSizeRange : FloatArray := .empty
lineWidthRange : FloatArray := .empty
pointSizeGranularity : Float
lineWidthGranularity : Float
strictLines : Bool32 := 0
standardSampleLocations : Bool32 := 0
optimalBufferCopyOffsetAlignment : DeviceSize := 0
optimalBufferCopyRowPitchAlignment : DeviceSize := 0
nonCoherentAtomSize : DeviceSize := 0
structure PhysicalDeviceSparseProperties where
residencyStandard2DBlockShape : Bool32 := 0
residencyStandard2DMultisampleBlockShape : Bool32 := 0
residencyStandard3DBlockShape : Bool32 := 0
residencyAlignedMipSize : Bool32 := 0
residencyNonResidentStrict : Bool32 := 0
structure PhysicalDeviceProperties where
apiVersion : UInt32 := 0
driverVersion : UInt32 := 0
vendorID : UInt32 := 0
deviceID : UInt32 := 0
deviceType : PhysicalDeviceType
limits : PhysicalDeviceLimits
sparseProperties : PhysicalDeviceSparseProperties
@[extern "glue_vkGetPhysicalDeviceProperties"]
opaque getPhysicalDeviceProperties : (@& PhysicalDevice) → IO PhysicalDeviceProperties
structure QueueFlags := private mk :: private v : UInt32
deriving DecidableEq
def QueueFlags.graphics := mk 1
def QueueFlags.compute := mk 2
def QueueFlags.transfer := mk 4
def QueueFlags.sparseBinding := mk 8
instance : HOr QueueFlags QueueFlags QueueFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd QueueFlags QueueFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited QueueFlags := ⟨⟨0⟩⟩
structure QueueFamilyProperties where
queueFlags : QueueFlags := default
queueCount : UInt32 := 0
timestampValidBits : UInt32 := 0
minImageTransferGranularity : Extent3D
@[extern "glue_vkGetPhysicalDeviceQueueFamilyProperties"]
opaque getPhysicalDeviceQueueFamilyProperties : (@& PhysicalDevice) → IO (Array (QueueFamilyProperties))
@[extern "glue_vkGetInstanceProcAddr"]
opaque getInstanceProcAddr : (@& Instance) → (@& String) → IO Unit
opaque Device : Type := UInt64
@[extern "glue_vkGetDeviceProcAddr"]
opaque getDeviceProcAddr : (@& Device) → (@& String) → IO Unit
abbrev Flags : Type := UInt32
abbrev DeviceCreateFlags : Type := Flags
structure DeviceQueueCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr DeviceQueueCreateFlags DeviceQueueCreateFlags DeviceQueueCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd DeviceQueueCreateFlags DeviceQueueCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited DeviceQueueCreateFlags := ⟨⟨0⟩⟩
structure DeviceQueueCreateInfo where
flags : DeviceQueueCreateFlags := default
queueFamilyIndex : UInt32 := 0
queuePriorities : FloatArray := .empty
structure DeviceCreateInfo where
flags : DeviceCreateFlags := 0
queueCreateInfos : Array (DeviceQueueCreateInfo) := .empty
enabledLayerNames : Array (String) := .empty
enabledExtensionNames : Array (String) := .empty
enabledFeatures : Option (PhysicalDeviceFeatures) := none
@[extern "glue_vkCreateDevice"]
opaque createDevice : (@& PhysicalDevice) → (@& DeviceCreateInfo) → IO (Result × Device)
@[extern "glue_vkDestroyDevice"]
opaque destroyDevice : (@& Device) → IO Unit
structure ExtensionProperties where
specVersion : UInt32 := 0
dummy : Unit := ()
@[extern "glue_vkEnumerateInstanceExtensionProperties"]
opaque enumerateInstanceExtensionProperties : (@& String) → IO (Result × Array (ExtensionProperties))
@[extern "glue_vkEnumerateDeviceExtensionProperties"]
opaque enumerateDeviceExtensionProperties : (@& PhysicalDevice) → (@& String) → IO (Result × Array (ExtensionProperties))
structure LayerProperties where
specVersion : UInt32 := 0
implementationVersion : UInt32 := 0
@[extern "glue_vkEnumerateInstanceLayerProperties"]
opaque enumerateInstanceLayerProperties : IO (Result × Array (LayerProperties))
@[extern "glue_vkEnumerateDeviceLayerProperties"]
opaque enumerateDeviceLayerProperties : (@& PhysicalDevice) → IO (Result × Array (LayerProperties))
opaque Queue : Type := UInt64
@[extern "glue_vkGetDeviceQueue"]
opaque getDeviceQueue : (@& Device) → (@& UInt32) → (@& UInt32) → IO Queue
opaque Semaphore : Type := UInt64
structure PipelineStageFlags := private mk :: private v : UInt32
deriving DecidableEq
def PipelineStageFlags.topOfPipe := mk 1
def PipelineStageFlags.drawIndirect := mk 2
def PipelineStageFlags.vertexInput := mk 4
def PipelineStageFlags.vertexShader := mk 8
def PipelineStageFlags.tessellationControlShader := mk 16
def PipelineStageFlags.tessellationEvaluationShader := mk 32
def PipelineStageFlags.geometryShader := mk 64
def PipelineStageFlags.fragmentShader := mk 128
def PipelineStageFlags.earlyFragmentTests := mk 256
def PipelineStageFlags.lateFragmentTests := mk 512
def PipelineStageFlags.colorAttachmentOutput := mk 1024
def PipelineStageFlags.computeShader := mk 2048
def PipelineStageFlags.transfer := mk 4096
def PipelineStageFlags.bottomOfPipe := mk 8192
def PipelineStageFlags.host := mk 16384
def PipelineStageFlags.allGraphics := mk 32768
def PipelineStageFlags.allCommands := mk 65536
instance : HOr PipelineStageFlags PipelineStageFlags PipelineStageFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd PipelineStageFlags PipelineStageFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited PipelineStageFlags := ⟨⟨0⟩⟩
opaque CommandBuffer : Type := UInt64
structure SubmitInfo where
waitSemaphores : Array (Semaphore) := .empty
waitDstStageMask : Array (PipelineStageFlags) := .empty
commandBuffers : Array (CommandBuffer) := .empty
signalSemaphores : Array (Semaphore) := .empty
opaque Fence : Type := UInt64
@[extern "glue_vkQueueSubmit"]
opaque queueSubmit : (@& Queue) → (@& Array (SubmitInfo)) → (@& Fence) → IO Result
@[extern "glue_vkQueueWaitIdle"]
opaque queueWaitIdle : (@& Queue) → IO Result
@[extern "glue_vkDeviceWaitIdle"]
opaque deviceWaitIdle : (@& Device) → IO Result
structure MemoryAllocateInfo where
allocationSize : DeviceSize := 0
memoryTypeIndex : UInt32 := 0
opaque DeviceMemory : Type := UInt64
@[extern "glue_vkAllocateMemory"]
opaque allocateMemory : (@& Device) → (@& MemoryAllocateInfo) → IO (Result × DeviceMemory)
@[extern "glue_vkFreeMemory"]
opaque freeMemory : (@& Device) → (@& DeviceMemory) → IO Unit
abbrev MemoryMapFlags : Type := Flags
opaque Pointer : Type := UInt64
@[extern "glue_vkMapMemory"]
opaque mapMemory : (@& Device) → (@& DeviceMemory) → (@& DeviceSize) → (@& DeviceSize) → (@& MemoryMapFlags) → IO (Result × Pointer)
@[extern "glue_vkUnmapMemory"]
opaque unmapMemory : (@& Device) → (@& DeviceMemory) → IO Unit
structure MappedMemoryRange where
memory : DeviceMemory
offset : DeviceSize := 0
size : DeviceSize := 0
@[extern "glue_vkFlushMappedMemoryRanges"]
opaque flushMappedMemoryRanges : (@& Device) → (@& Array (MappedMemoryRange)) → IO Result
@[extern "glue_vkInvalidateMappedMemoryRanges"]
opaque invalidateMappedMemoryRanges : (@& Device) → (@& Array (MappedMemoryRange)) → IO Result
@[extern "glue_vkGetDeviceMemoryCommitment"]
opaque getDeviceMemoryCommitment : (@& Device) → (@& DeviceMemory) → IO DeviceSize
opaque Buffer : Type := UInt64
@[extern "glue_vkBindBufferMemory"]
opaque bindBufferMemory : (@& Device) → (@& Buffer) → (@& DeviceMemory) → (@& DeviceSize) → IO Result
opaque Image : Type := UInt64
@[extern "glue_vkBindImageMemory"]
opaque bindImageMemory : (@& Device) → (@& Image) → (@& DeviceMemory) → (@& DeviceSize) → IO Result
structure MemoryRequirements where
size : DeviceSize := 0
alignment : DeviceSize := 0
memoryTypeBits : UInt32 := 0
@[extern "glue_vkGetBufferMemoryRequirements"]
opaque getBufferMemoryRequirements : (@& Device) → (@& Buffer) → IO MemoryRequirements
@[extern "glue_vkGetImageMemoryRequirements"]
opaque getImageMemoryRequirements : (@& Device) → (@& Image) → IO MemoryRequirements
structure ImageAspectFlags := private mk :: private v : UInt32
deriving DecidableEq
def ImageAspectFlags.color := mk 1
def ImageAspectFlags.depth := mk 2
def ImageAspectFlags.stencil := mk 4
def ImageAspectFlags.metadata := mk 8
instance : HOr ImageAspectFlags ImageAspectFlags ImageAspectFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd ImageAspectFlags ImageAspectFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited ImageAspectFlags := ⟨⟨0⟩⟩
structure SparseImageFormatFlags := private mk :: private v : UInt32
deriving DecidableEq
def SparseImageFormatFlags.singleMiptail := mk 1
def SparseImageFormatFlags.alignedMipSize := mk 2
def SparseImageFormatFlags.nonstandardBlockSize := mk 4
instance : HOr SparseImageFormatFlags SparseImageFormatFlags SparseImageFormatFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd SparseImageFormatFlags SparseImageFormatFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited SparseImageFormatFlags := ⟨⟨0⟩⟩
structure SparseImageFormatProperties where
aspectMask : ImageAspectFlags := default
imageGranularity : Extent3D
flags : SparseImageFormatFlags := default
structure SparseImageMemoryRequirements where
formatProperties : SparseImageFormatProperties
imageMipTailFirstLod : UInt32 := 0
imageMipTailSize : DeviceSize := 0
imageMipTailOffset : DeviceSize := 0
imageMipTailStride : DeviceSize := 0
@[extern "glue_vkGetImageSparseMemoryRequirements"]
opaque getImageSparseMemoryRequirements : (@& Device) → (@& Image) → IO (Array (SparseImageMemoryRequirements))
@[extern "glue_vkGetPhysicalDeviceSparseImageFormatProperties"]
opaque getPhysicalDeviceSparseImageFormatProperties : (@& PhysicalDevice) → (@& Format) → (@& ImageType) → (@& SampleCountFlags) → (@& ImageUsageFlags) → (@& ImageTiling) → IO (Array (SparseImageFormatProperties))
structure SparseMemoryBindFlags := private mk :: private v : UInt32
deriving DecidableEq
def SparseMemoryBindFlags.metadata := mk 1
instance : HOr SparseMemoryBindFlags SparseMemoryBindFlags SparseMemoryBindFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd SparseMemoryBindFlags SparseMemoryBindFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited SparseMemoryBindFlags := ⟨⟨0⟩⟩
structure SparseMemoryBind where
resourceOffset : DeviceSize := 0
size : DeviceSize := 0
memory : DeviceMemory
memoryOffset : DeviceSize := 0
flags : SparseMemoryBindFlags := default
structure SparseBufferMemoryBindInfo where
buffer : Buffer
binds : Array (SparseMemoryBind) := .empty
structure SparseImageOpaqueMemoryBindInfo where
image : Image
binds : Array (SparseMemoryBind) := .empty
structure ImageSubresource where
aspectMask : ImageAspectFlags := default
mipLevel : UInt32 := 0
arrayLayer : UInt32 := 0
structure Offset3D where
x : UInt32 := 0
y : UInt32 := 0
z : UInt32 := 0
structure SparseImageMemoryBind where
subresource : ImageSubresource
offset : Offset3D
extent : Extent3D
memory : DeviceMemory
memoryOffset : DeviceSize := 0
flags : SparseMemoryBindFlags := default
structure SparseImageMemoryBindInfo where
image : Image
binds : Array (SparseImageMemoryBind) := .empty
structure BindSparseInfo where
waitSemaphores : Array (Semaphore) := .empty
bufferBinds : Array (SparseBufferMemoryBindInfo) := .empty
imageOpaqueBinds : Array (SparseImageOpaqueMemoryBindInfo) := .empty
imageBinds : Array (SparseImageMemoryBindInfo) := .empty
signalSemaphores : Array (Semaphore) := .empty
@[extern "glue_vkQueueBindSparse"]
opaque queueBindSparse : (@& Queue) → (@& Array (BindSparseInfo)) → (@& Fence) → IO Result
structure FenceCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
def FenceCreateFlags.signaled := mk 1
instance : HOr FenceCreateFlags FenceCreateFlags FenceCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd FenceCreateFlags FenceCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited FenceCreateFlags := ⟨⟨0⟩⟩
structure FenceCreateInfo where
flags : FenceCreateFlags := default
dummy : Unit := ()
@[extern "glue_vkCreateFence"]
opaque createFence : (@& Device) → (@& FenceCreateInfo) → IO (Result × Fence)
@[extern "glue_vkDestroyFence"]
opaque destroyFence : (@& Device) → (@& Fence) → IO Unit
@[extern "glue_vkResetFences"]
opaque resetFences : (@& Device) → (@& Array (Fence)) → IO Result
@[extern "glue_vkGetFenceStatus"]
opaque getFenceStatus : (@& Device) → (@& Fence) → IO Result
@[extern "glue_vkWaitForFences"]
opaque waitForFences : (@& Device) → (@& Array (Fence)) → (@& Bool32) → (@& UInt64) → IO Result
abbrev SemaphoreCreateFlags : Type := Flags
structure SemaphoreCreateInfo where
flags : SemaphoreCreateFlags := 0
dummy : Unit := ()
@[extern "glue_vkCreateSemaphore"]
opaque createSemaphore : (@& Device) → (@& SemaphoreCreateInfo) → IO (Result × Semaphore)
@[extern "glue_vkDestroySemaphore"]
opaque destroySemaphore : (@& Device) → (@& Semaphore) → IO Unit
structure EventCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr EventCreateFlags EventCreateFlags EventCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd EventCreateFlags EventCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited EventCreateFlags := ⟨⟨0⟩⟩
structure EventCreateInfo where
flags : EventCreateFlags := default
dummy : Unit := ()
opaque Event : Type := UInt64
@[extern "glue_vkCreateEvent"]
opaque createEvent : (@& Device) → (@& EventCreateInfo) → IO (Result × Event)
@[extern "glue_vkDestroyEvent"]
opaque destroyEvent : (@& Device) → (@& Event) → IO Unit
@[extern "glue_vkGetEventStatus"]
opaque getEventStatus : (@& Device) → (@& Event) → IO Result
@[extern "glue_vkSetEvent"]
opaque setEvent : (@& Device) → (@& Event) → IO Result
@[extern "glue_vkResetEvent"]
opaque resetEvent : (@& Device) → (@& Event) → IO Result
abbrev QueryPoolCreateFlags : Type := Flags
structure QueryType := private mk :: private v : UInt32
deriving DecidableEq
def QueryType.occlusion := mk 0
def QueryType.pipelineStatistics := mk 1
def QueryType.timestamp := mk 2
structure QueryPipelineStatisticFlags := private mk :: private v : UInt32
deriving DecidableEq
def QueryPipelineStatisticFlags.inputAssemblyVertices := mk 1
def QueryPipelineStatisticFlags.inputAssemblyPrimitives := mk 2
def QueryPipelineStatisticFlags.vertexShaderInvocations := mk 4
def QueryPipelineStatisticFlags.geometryShaderInvocations := mk 8
def QueryPipelineStatisticFlags.geometryShaderPrimitives := mk 16
def QueryPipelineStatisticFlags.clippingInvocations := mk 32
def QueryPipelineStatisticFlags.clippingPrimitives := mk 64
def QueryPipelineStatisticFlags.fragmentShaderInvocations := mk 128
def QueryPipelineStatisticFlags.tessellationControlShaderPatches := mk 256
def QueryPipelineStatisticFlags.tessellationEvaluationShaderInvocations := mk 512
def QueryPipelineStatisticFlags.computeShaderInvocations := mk 1024
instance : HOr QueryPipelineStatisticFlags QueryPipelineStatisticFlags QueryPipelineStatisticFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd QueryPipelineStatisticFlags QueryPipelineStatisticFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited QueryPipelineStatisticFlags := ⟨⟨0⟩⟩
structure QueryPoolCreateInfo where
flags : QueryPoolCreateFlags := 0
queryType : QueryType
queryCount : UInt32 := 0
pipelineStatistics : QueryPipelineStatisticFlags := default
opaque QueryPool : Type := UInt64
@[extern "glue_vkCreateQueryPool"]
opaque createQueryPool : (@& Device) → (@& QueryPoolCreateInfo) → IO (Result × QueryPool)
@[extern "glue_vkDestroyQueryPool"]
opaque destroyQueryPool : (@& Device) → (@& QueryPool) → IO Unit
structure BufferCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
def BufferCreateFlags.sparseBinding := mk 1
def BufferCreateFlags.sparseResidency := mk 2
def BufferCreateFlags.sparseAliased := mk 4
instance : HOr BufferCreateFlags BufferCreateFlags BufferCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd BufferCreateFlags BufferCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited BufferCreateFlags := ⟨⟨0⟩⟩
structure BufferUsageFlags := private mk :: private v : UInt32
deriving DecidableEq
def BufferUsageFlags.transferSrc := mk 1
def BufferUsageFlags.transferDst := mk 2
def BufferUsageFlags.uniformTexelBuffer := mk 4
def BufferUsageFlags.storageTexelBuffer := mk 8
def BufferUsageFlags.uniformBuffer := mk 16
def BufferUsageFlags.storageBuffer := mk 32
def BufferUsageFlags.indexBuffer := mk 64
def BufferUsageFlags.vertexBuffer := mk 128
def BufferUsageFlags.indirectBuffer := mk 256
instance : HOr BufferUsageFlags BufferUsageFlags BufferUsageFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd BufferUsageFlags BufferUsageFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited BufferUsageFlags := ⟨⟨0⟩⟩
structure SharingMode := private mk :: private v : UInt32
deriving DecidableEq
def SharingMode.exclusive := mk 0
def SharingMode.concurrent := mk 1
structure BufferCreateInfo where
flags : BufferCreateFlags := default
size : DeviceSize := 0
usage : BufferUsageFlags := default
sharingMode : SharingMode
queueFamilyIndices : Array (UInt32) := .empty
@[extern "glue_vkCreateBuffer"]
opaque createBuffer : (@& Device) → (@& BufferCreateInfo) → IO (Result × Buffer)
@[extern "glue_vkDestroyBuffer"]
opaque destroyBuffer : (@& Device) → (@& Buffer) → IO Unit
abbrev BufferViewCreateFlags : Type := Flags
structure BufferViewCreateInfo where
flags : BufferViewCreateFlags := 0
buffer : Buffer
format : Format
offset : DeviceSize := 0
range : DeviceSize := 0
opaque BufferView : Type := UInt64
@[extern "glue_vkCreateBufferView"]
opaque createBufferView : (@& Device) → (@& BufferViewCreateInfo) → IO (Result × BufferView)
@[extern "glue_vkDestroyBufferView"]
opaque destroyBufferView : (@& Device) → (@& BufferView) → IO Unit
structure ImageLayout := private mk :: private v : UInt32
deriving DecidableEq
def ImageLayout.undefined := mk 0
def ImageLayout.general := mk 1
def ImageLayout.colorAttachmentOptimal := mk 2
def ImageLayout.depthStencilAttachmentOptimal := mk 3
def ImageLayout.depthStencilReadOnlyOptimal := mk 4
def ImageLayout.shaderReadOnlyOptimal := mk 5
def ImageLayout.transferSrcOptimal := mk 6
def ImageLayout.transferDstOptimal := mk 7
def ImageLayout.preinitialized := mk 8
def ImageLayout.presentSrcKhr := mk 1000001002
structure ImageCreateInfo where
flags : ImageCreateFlags := default
imageType : ImageType
format : Format
extent : Extent3D
mipLevels : UInt32 := 0
arrayLayers : UInt32 := 0
samples : SampleCountFlags := default
tiling : ImageTiling
usage : ImageUsageFlags := default
sharingMode : SharingMode
queueFamilyIndices : Array (UInt32) := .empty
initialLayout : ImageLayout
@[extern "glue_vkCreateImage"]
opaque createImage : (@& Device) → (@& ImageCreateInfo) → IO (Result × Image)
@[extern "glue_vkDestroyImage"]
opaque destroyImage : (@& Device) → (@& Image) → IO Unit
structure SubresourceLayout where
offset : DeviceSize := 0
size : DeviceSize := 0
rowPitch : DeviceSize := 0
arrayPitch : DeviceSize := 0
depthPitch : DeviceSize := 0
@[extern "glue_vkGetImageSubresourceLayout"]
opaque getImageSubresourceLayout : (@& Device) → (@& Image) → (@& ImageSubresource) → IO SubresourceLayout
structure ImageViewCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr ImageViewCreateFlags ImageViewCreateFlags ImageViewCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd ImageViewCreateFlags ImageViewCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited ImageViewCreateFlags := ⟨⟨0⟩⟩
structure ImageViewType := private mk :: private v : UInt32
deriving DecidableEq
def ImageViewType._1d := mk 0
def ImageViewType._2d := mk 1
def ImageViewType._3d := mk 2
def ImageViewType.cube := mk 3
def ImageViewType._1dArray := mk 4
def ImageViewType._2dArray := mk 5
def ImageViewType.cubeArray := mk 6
structure ComponentSwizzle := private mk :: private v : UInt32
deriving DecidableEq
def ComponentSwizzle.identity := mk 0
def ComponentSwizzle.zero := mk 1
def ComponentSwizzle.one := mk 2
def ComponentSwizzle.r := mk 3
def ComponentSwizzle.g := mk 4
def ComponentSwizzle.b := mk 5
def ComponentSwizzle.a := mk 6
structure ComponentMapping where
r : ComponentSwizzle
g : ComponentSwizzle
b : ComponentSwizzle
a : ComponentSwizzle
structure ImageSubresourceRange where
aspectMask : ImageAspectFlags := default
baseMipLevel : UInt32 := 0
levelCount : UInt32 := 0
baseArrayLayer : UInt32 := 0
layerCount : UInt32 := 0
structure ImageViewCreateInfo where
flags : ImageViewCreateFlags := default
image : Image
viewType : ImageViewType
format : Format
components : ComponentMapping
subresourceRange : ImageSubresourceRange
opaque ImageView : Type := UInt64
@[extern "glue_vkCreateImageView"]
opaque createImageView : (@& Device) → (@& ImageViewCreateInfo) → IO (Result × ImageView)
@[extern "glue_vkDestroyImageView"]
opaque destroyImageView : (@& Device) → (@& ImageView) → IO Unit
abbrev ShaderModuleCreateFlags : Type := Flags
structure ShaderModuleCreateInfo where
flags : ShaderModuleCreateFlags := 0
code : ByteArray := .empty
opaque ShaderModule : Type := UInt64
@[extern "glue_vkCreateShaderModule"]
opaque createShaderModule : (@& Device) → (@& ShaderModuleCreateInfo) → IO (Result × ShaderModule)
@[extern "glue_vkDestroyShaderModule"]
opaque destroyShaderModule : (@& Device) → (@& ShaderModule) → IO Unit
structure PipelineCacheCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr PipelineCacheCreateFlags PipelineCacheCreateFlags PipelineCacheCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd PipelineCacheCreateFlags PipelineCacheCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited PipelineCacheCreateFlags := ⟨⟨0⟩⟩
structure PipelineCacheCreateInfo where
flags : PipelineCacheCreateFlags := default
initialData : ByteArray := .empty
opaque PipelineCache : Type := UInt64
@[extern "glue_vkCreatePipelineCache"]
opaque createPipelineCache : (@& Device) → (@& PipelineCacheCreateInfo) → IO (Result × PipelineCache)
@[extern "glue_vkDestroyPipelineCache"]
opaque destroyPipelineCache : (@& Device) → (@& PipelineCache) → IO Unit
@[extern "glue_vkMergePipelineCaches"]
opaque mergePipelineCaches : (@& Device) → (@& PipelineCache) → (@& Array (PipelineCache)) → IO Result
structure PipelineCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
def PipelineCreateFlags.disableOptimization := mk 1
def PipelineCreateFlags.allowDerivatives := mk 2
def PipelineCreateFlags.derivative := mk 4
instance : HOr PipelineCreateFlags PipelineCreateFlags PipelineCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd PipelineCreateFlags PipelineCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited PipelineCreateFlags := ⟨⟨0⟩⟩
structure PipelineShaderStageCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr PipelineShaderStageCreateFlags PipelineShaderStageCreateFlags PipelineShaderStageCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd PipelineShaderStageCreateFlags PipelineShaderStageCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited PipelineShaderStageCreateFlags := ⟨⟨0⟩⟩
structure ShaderStageFlags := private mk :: private v : UInt32
deriving DecidableEq
def ShaderStageFlags.vertex := mk 1
def ShaderStageFlags.tessellationControl := mk 2
def ShaderStageFlags.tessellationEvaluation := mk 4
def ShaderStageFlags.geometry := mk 8
def ShaderStageFlags.fragment := mk 16
def ShaderStageFlags.compute := mk 32
instance : HOr ShaderStageFlags ShaderStageFlags ShaderStageFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd ShaderStageFlags ShaderStageFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited ShaderStageFlags := ⟨⟨0⟩⟩
structure SpecializationMapEntry where
constantID : UInt32 := 0
offset : UInt32 := 0
size : UInt64 := 0
structure SpecializationInfo where
mapEntries : Array (SpecializationMapEntry) := .empty
data : ByteArray := .empty
structure PipelineShaderStageCreateInfo where
flags : PipelineShaderStageCreateFlags := default
stage : ShaderStageFlags := default
module : ShaderModule
name : String
specializationInfo : Option (SpecializationInfo) := none
abbrev PipelineVertexInputStateCreateFlags : Type := Flags
structure VertexInputRate := private mk :: private v : UInt32
deriving DecidableEq
def VertexInputRate.vertex := mk 0
def VertexInputRate.instance := mk 1
structure VertexInputBindingDescription where
binding : UInt32 := 0
stride : UInt32 := 0
inputRate : VertexInputRate
structure VertexInputAttributeDescription where
location : UInt32 := 0
binding : UInt32 := 0
format : Format
offset : UInt32 := 0
structure PipelineVertexInputStateCreateInfo where
flags : PipelineVertexInputStateCreateFlags := 0
vertexBindingDescriptions : Array (VertexInputBindingDescription) := .empty
vertexAttributeDescriptions : Array (VertexInputAttributeDescription) := .empty
abbrev PipelineInputAssemblyStateCreateFlags : Type := Flags
structure PrimitiveTopology := private mk :: private v : UInt32
deriving DecidableEq
def PrimitiveTopology.pointList := mk 0
def PrimitiveTopology.lineList := mk 1
def PrimitiveTopology.lineStrip := mk 2
def PrimitiveTopology.triangleList := mk 3
def PrimitiveTopology.triangleStrip := mk 4
def PrimitiveTopology.triangleFan := mk 5
def PrimitiveTopology.lineListWithAdjacency := mk 6
def PrimitiveTopology.lineStripWithAdjacency := mk 7
def PrimitiveTopology.triangleListWithAdjacency := mk 8
def PrimitiveTopology.triangleStripWithAdjacency := mk 9
def PrimitiveTopology.patchList := mk 10
structure PipelineInputAssemblyStateCreateInfo where
flags : PipelineInputAssemblyStateCreateFlags := 0
topology : PrimitiveTopology
primitiveRestartEnable : Bool32 := 0
abbrev PipelineTessellationStateCreateFlags : Type := Flags
structure PipelineTessellationStateCreateInfo where
flags : PipelineTessellationStateCreateFlags := 0
patchControlPoints : UInt32 := 0
abbrev PipelineViewportStateCreateFlags : Type := Flags
structure Viewport where
x : Float
y : Float
width : Float
height : Float
minDepth : Float
maxDepth : Float
structure Offset2D where
x : UInt32 := 0
y : UInt32 := 0
structure Extent2D where
width : UInt32 := 0
height : UInt32 := 0
structure Rect2D where
offset : Offset2D
extent : Extent2D
structure PipelineViewportStateCreateInfo where
flags : PipelineViewportStateCreateFlags := 0
viewports : Array (Viewport) := .empty
scissors : Array (Rect2D) := .empty
abbrev PipelineRasterizationStateCreateFlags : Type := Flags
structure PolygonMode := private mk :: private v : UInt32
deriving DecidableEq
def PolygonMode.fill := mk 0
def PolygonMode.line := mk 1
def PolygonMode.point := mk 2
structure CullModeFlags := private mk :: private v : UInt32
deriving DecidableEq
def CullModeFlags.front := mk 1
def CullModeFlags.back := mk 2
instance : HOr CullModeFlags CullModeFlags CullModeFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd CullModeFlags CullModeFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited CullModeFlags := ⟨⟨0⟩⟩
structure FrontFace := private mk :: private v : UInt32
deriving DecidableEq
def FrontFace.counterClockwise := mk 0
def FrontFace.clockwise := mk 1
structure PipelineRasterizationStateCreateInfo where
flags : PipelineRasterizationStateCreateFlags := 0
depthClampEnable : Bool32 := 0
rasterizerDiscardEnable : Bool32 := 0
polygonMode : PolygonMode
cullMode : CullModeFlags := default
frontFace : FrontFace
depthBiasEnable : Bool32 := 0
depthBiasConstantFactor : Float
depthBiasClamp : Float
depthBiasSlopeFactor : Float
lineWidth : Float
abbrev PipelineMultisampleStateCreateFlags : Type := Flags
structure PipelineMultisampleStateCreateInfo where
flags : PipelineMultisampleStateCreateFlags := 0
rasterizationSamples : SampleCountFlags := default
sampleShadingEnable : Bool32 := 0
minSampleShading : Float
alphaToCoverageEnable : Bool32 := 0
alphaToOneEnable : Bool32 := 0
structure PipelineDepthStencilStateCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr PipelineDepthStencilStateCreateFlags PipelineDepthStencilStateCreateFlags PipelineDepthStencilStateCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd PipelineDepthStencilStateCreateFlags PipelineDepthStencilStateCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited PipelineDepthStencilStateCreateFlags := ⟨⟨0⟩⟩
structure CompareOp := private mk :: private v : UInt32
deriving DecidableEq
def CompareOp.never := mk 0
def CompareOp.less := mk 1
def CompareOp.equal := mk 2
def CompareOp.lessOrEqual := mk 3
def CompareOp.greater := mk 4
def CompareOp.notEqual := mk 5
def CompareOp.greaterOrEqual := mk 6
def CompareOp.always := mk 7
structure StencilOp := private mk :: private v : UInt32
deriving DecidableEq
def StencilOp.keep := mk 0
def StencilOp.zero := mk 1
def StencilOp.replace := mk 2
def StencilOp.incrementAndClamp := mk 3
def StencilOp.decrementAndClamp := mk 4
def StencilOp.invert := mk 5
def StencilOp.incrementAndWrap := mk 6
def StencilOp.decrementAndWrap := mk 7
structure StencilOpState where
failOp : StencilOp
passOp : StencilOp
depthFailOp : StencilOp
compareOp : CompareOp
compareMask : UInt32 := 0
writeMask : UInt32 := 0
reference : UInt32 := 0
structure PipelineDepthStencilStateCreateInfo where
flags : PipelineDepthStencilStateCreateFlags := default
depthTestEnable : Bool32 := 0
depthWriteEnable : Bool32 := 0
depthCompareOp : CompareOp
depthBoundsTestEnable : Bool32 := 0
stencilTestEnable : Bool32 := 0
front : StencilOpState
back : StencilOpState
minDepthBounds : Float
maxDepthBounds : Float
structure PipelineColorBlendStateCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr PipelineColorBlendStateCreateFlags PipelineColorBlendStateCreateFlags PipelineColorBlendStateCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd PipelineColorBlendStateCreateFlags PipelineColorBlendStateCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited PipelineColorBlendStateCreateFlags := ⟨⟨0⟩⟩
structure LogicOp := private mk :: private v : UInt32
deriving DecidableEq
def LogicOp.clear := mk 0
def LogicOp.and := mk 1
def LogicOp.andReverse := mk 2
def LogicOp.copy := mk 3
def LogicOp.andInverted := mk 4
def LogicOp.noOp := mk 5
def LogicOp.xor := mk 6
def LogicOp.or := mk 7
def LogicOp.nor := mk 8
def LogicOp.equivalent := mk 9
def LogicOp.invert := mk 10
def LogicOp.orReverse := mk 11
def LogicOp.copyInverted := mk 12
def LogicOp.orInverted := mk 13
def LogicOp.nand := mk 14
def LogicOp.set := mk 15
structure BlendFactor := private mk :: private v : UInt32
deriving DecidableEq
def BlendFactor.zero := mk 0
def BlendFactor.one := mk 1
def BlendFactor.srcColor := mk 2
def BlendFactor.oneMinusSrcColor := mk 3
def BlendFactor.dstColor := mk 4
def BlendFactor.oneMinusDstColor := mk 5
def BlendFactor.srcAlpha := mk 6
def BlendFactor.oneMinusSrcAlpha := mk 7
def BlendFactor.dstAlpha := mk 8
def BlendFactor.oneMinusDstAlpha := mk 9
def BlendFactor.constantColor := mk 10
def BlendFactor.oneMinusConstantColor := mk 11
def BlendFactor.constantAlpha := mk 12
def BlendFactor.oneMinusConstantAlpha := mk 13
def BlendFactor.srcAlphaSaturate := mk 14
def BlendFactor.src1Color := mk 15
def BlendFactor.oneMinusSrc1Color := mk 16
def BlendFactor.src1Alpha := mk 17
def BlendFactor.oneMinusSrc1Alpha := mk 18
structure BlendOp := private mk :: private v : UInt32
deriving DecidableEq
def BlendOp.add := mk 0
def BlendOp.subtract := mk 1
def BlendOp.reverseSubtract := mk 2
def BlendOp.min := mk 3
def BlendOp.max := mk 4
structure ColorComponentFlags := private mk :: private v : UInt32
deriving DecidableEq
def ColorComponentFlags.r := mk 1
def ColorComponentFlags.g := mk 2
def ColorComponentFlags.b := mk 4
def ColorComponentFlags.a := mk 8
instance : HOr ColorComponentFlags ColorComponentFlags ColorComponentFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd ColorComponentFlags ColorComponentFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited ColorComponentFlags := ⟨⟨0⟩⟩
structure PipelineColorBlendAttachmentState where
blendEnable : Bool32 := 0
srcColorBlendFactor : BlendFactor
dstColorBlendFactor : BlendFactor
colorBlendOp : BlendOp
srcAlphaBlendFactor : BlendFactor
dstAlphaBlendFactor : BlendFactor
alphaBlendOp : BlendOp
colorWriteMask : ColorComponentFlags := default
structure PipelineColorBlendStateCreateInfo where
flags : PipelineColorBlendStateCreateFlags := default
logicOpEnable : Bool32 := 0
logicOp : LogicOp
attachments : Array (PipelineColorBlendAttachmentState) := .empty
blendConstants : FloatArray := .empty
abbrev PipelineDynamicStateCreateFlags : Type := Flags
structure DynamicState := private mk :: private v : UInt32
deriving DecidableEq
def DynamicState.viewport := mk 0
def DynamicState.scissor := mk 1
def DynamicState.lineWidth := mk 2
def DynamicState.depthBias := mk 3
def DynamicState.blendConstants := mk 4
def DynamicState.depthBounds := mk 5
def DynamicState.stencilCompareMask := mk 6
def DynamicState.stencilWriteMask := mk 7
def DynamicState.stencilReference := mk 8
structure PipelineDynamicStateCreateInfo where
flags : PipelineDynamicStateCreateFlags := 0
dynamicStates : Array (DynamicState) := .empty
opaque PipelineLayout : Type := UInt64
opaque RenderPass : Type := UInt64
structure GraphicsPipelineCreateInfo where
flags : PipelineCreateFlags := default
stages : Array (PipelineShaderStageCreateInfo) := .empty
vertexInputState : Option (PipelineVertexInputStateCreateInfo) := none
inputAssemblyState : Option (PipelineInputAssemblyStateCreateInfo) := none
tessellationState : Option (PipelineTessellationStateCreateInfo) := none
viewportState : Option (PipelineViewportStateCreateInfo) := none
rasterizationState : Option (PipelineRasterizationStateCreateInfo) := none
multisampleState : Option (PipelineMultisampleStateCreateInfo) := none
depthStencilState : Option (PipelineDepthStencilStateCreateInfo) := none
colorBlendState : Option (PipelineColorBlendStateCreateInfo) := none
dynamicState : Option (PipelineDynamicStateCreateInfo) := none
layout : PipelineLayout
renderPass : RenderPass
subpass : UInt32 := 0
opaque Pipeline : Type := UInt64
@[extern "glue_vkCreateGraphicsPipelines"]
opaque createGraphicsPipelines : (@& Device) → (@& Array (GraphicsPipelineCreateInfo)) → IO (Result × Array (Pipeline))
structure ComputePipelineCreateInfo where
flags : PipelineCreateFlags := default
stage : PipelineShaderStageCreateInfo
layout : PipelineLayout
basePipelineHandle : Pipeline
basePipelineIndex : UInt32 := 0
@[extern "glue_vkCreateComputePipelines"]
opaque createComputePipelines : (@& Device) → (@& PipelineCache) → (@& Array (ComputePipelineCreateInfo)) → IO (Result × Array (Pipeline))
@[extern "glue_vkDestroyPipeline"]
opaque destroyPipeline : (@& Device) → (@& Pipeline) → IO Unit
structure PipelineLayoutCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr PipelineLayoutCreateFlags PipelineLayoutCreateFlags PipelineLayoutCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd PipelineLayoutCreateFlags PipelineLayoutCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited PipelineLayoutCreateFlags := ⟨⟨0⟩⟩
opaque DescriptorSetLayout : Type := UInt64
structure PushConstantRange where
stageFlags : ShaderStageFlags := default
offset : UInt32 := 0
size : UInt32 := 0
structure PipelineLayoutCreateInfo where
flags : PipelineLayoutCreateFlags := default
setLayouts : Array (DescriptorSetLayout) := .empty
pushConstantRanges : Array (PushConstantRange) := .empty
@[extern "glue_vkCreatePipelineLayout"]
opaque createPipelineLayout : (@& Device) → (@& PipelineLayoutCreateInfo) → IO (Result × PipelineLayout)
@[extern "glue_vkDestroyPipelineLayout"]
opaque destroyPipelineLayout : (@& Device) → (@& PipelineLayout) → IO Unit
structure SamplerCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr SamplerCreateFlags SamplerCreateFlags SamplerCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd SamplerCreateFlags SamplerCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited SamplerCreateFlags := ⟨⟨0⟩⟩
structure Filter := private mk :: private v : UInt32
deriving DecidableEq
def Filter.nearest := mk 0
def Filter.linear := mk 1
structure SamplerMipmapMode := private mk :: private v : UInt32
deriving DecidableEq
def SamplerMipmapMode.nearest := mk 0
def SamplerMipmapMode.linear := mk 1
structure SamplerAddressMode := private mk :: private v : UInt32
deriving DecidableEq
def SamplerAddressMode.repeat := mk 0
def SamplerAddressMode.mirroredRepeat := mk 1
def SamplerAddressMode.clampToEdge := mk 2
def SamplerAddressMode.clampToBorder := mk 3
structure BorderColor := private mk :: private v : UInt32
deriving DecidableEq
def BorderColor.floatTransparentBlack := mk 0
def BorderColor.intTransparentBlack := mk 1
def BorderColor.floatOpaqueBlack := mk 2
def BorderColor.intOpaqueBlack := mk 3
def BorderColor.floatOpaqueWhite := mk 4
def BorderColor.intOpaqueWhite := mk 5
structure SamplerCreateInfo where
flags : SamplerCreateFlags := default
magFilter : Filter
minFilter : Filter
mipmapMode : SamplerMipmapMode
addressModeU : SamplerAddressMode
addressModeV : SamplerAddressMode
addressModeW : SamplerAddressMode
mipLodBias : Float
anisotropyEnable : Bool32 := 0
maxAnisotropy : Float
compareEnable : Bool32 := 0
compareOp : CompareOp
minLod : Float
maxLod : Float
borderColor : BorderColor
unnormalizedCoordinates : Bool32 := 0
opaque Sampler : Type := UInt64
@[extern "glue_vkCreateSampler"]
opaque createSampler : (@& Device) → (@& SamplerCreateInfo) → IO (Result × Sampler)
@[extern "glue_vkDestroySampler"]
opaque destroySampler : (@& Device) → (@& Sampler) → IO Unit
structure DescriptorSetLayoutCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr DescriptorSetLayoutCreateFlags DescriptorSetLayoutCreateFlags DescriptorSetLayoutCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd DescriptorSetLayoutCreateFlags DescriptorSetLayoutCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited DescriptorSetLayoutCreateFlags := ⟨⟨0⟩⟩
structure DescriptorType := private mk :: private v : UInt32
deriving DecidableEq
def DescriptorType.sampler := mk 0
def DescriptorType.combinedImageSampler := mk 1
def DescriptorType.sampledImage := mk 2
def DescriptorType.storageImage := mk 3
def DescriptorType.uniformTexelBuffer := mk 4
def DescriptorType.storageTexelBuffer := mk 5
def DescriptorType.uniformBuffer := mk 6
def DescriptorType.storageBuffer := mk 7
def DescriptorType.uniformBufferDynamic := mk 8
def DescriptorType.storageBufferDynamic := mk 9
def DescriptorType.inputAttachment := mk 10
structure DescriptorSetLayoutBinding where
binding : UInt32 := 0
descriptorType : DescriptorType
stageFlags : ShaderStageFlags := default
immutableSamplers : Array (Sampler) := .empty
structure DescriptorSetLayoutCreateInfo where
flags : DescriptorSetLayoutCreateFlags := default
bindings : Array (DescriptorSetLayoutBinding) := .empty
@[extern "glue_vkCreateDescriptorSetLayout"]
opaque createDescriptorSetLayout : (@& Device) → (@& DescriptorSetLayoutCreateInfo) → IO (Result × DescriptorSetLayout)
@[extern "glue_vkDestroyDescriptorSetLayout"]
opaque destroyDescriptorSetLayout : (@& Device) → (@& DescriptorSetLayout) → IO Unit
structure DescriptorPoolCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
def DescriptorPoolCreateFlags.freeDescriptorSet := mk 1
instance : HOr DescriptorPoolCreateFlags DescriptorPoolCreateFlags DescriptorPoolCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd DescriptorPoolCreateFlags DescriptorPoolCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited DescriptorPoolCreateFlags := ⟨⟨0⟩⟩
structure DescriptorPoolSize where
type : DescriptorType
descriptorCount : UInt32 := 0
structure DescriptorPoolCreateInfo where
flags : DescriptorPoolCreateFlags := default
maxSets : UInt32 := 0
poolSizes : Array (DescriptorPoolSize) := .empty
opaque DescriptorPool : Type := UInt64
@[extern "glue_vkCreateDescriptorPool"]
opaque createDescriptorPool : (@& Device) → (@& DescriptorPoolCreateInfo) → IO (Result × DescriptorPool)
@[extern "glue_vkDestroyDescriptorPool"]
opaque destroyDescriptorPool : (@& Device) → (@& DescriptorPool) → IO Unit
abbrev DescriptorPoolResetFlags : Type := Flags
@[extern "glue_vkResetDescriptorPool"]
opaque resetDescriptorPool : (@& Device) → (@& DescriptorPool) → (@& DescriptorPoolResetFlags) → IO Result
structure DescriptorSetAllocateInfo where
descriptorPool : DescriptorPool
setLayouts : Array (DescriptorSetLayout) := .empty
opaque DescriptorSet : Type := UInt64
@[extern "glue_vkAllocateDescriptorSets"]
opaque allocateDescriptorSets : (@& Device) → (@& DescriptorSetAllocateInfo) → IO (Result × Array (DescriptorSet))
@[extern "glue_vkFreeDescriptorSets"]
opaque freeDescriptorSets : (@& Device) → (@& DescriptorPool) → (@& Array (DescriptorSet)) → IO Result
structure DescriptorImageInfo where
sampler : Sampler
imageView : ImageView
imageLayout : ImageLayout
structure DescriptorBufferInfo where
buffer : Buffer
offset : DeviceSize := 0
range : DeviceSize := 0
structure WriteDescriptorSet where
dstSet : DescriptorSet
dstBinding : UInt32 := 0
dstArrayElement : UInt32 := 0
descriptorType : DescriptorType
imageInfo : Array (DescriptorImageInfo) := .empty
bufferInfo : Array (DescriptorBufferInfo) := .empty
texelBufferView : Array (BufferView) := .empty
structure CopyDescriptorSet where
srcSet : DescriptorSet
srcBinding : UInt32 := 0
srcArrayElement : UInt32 := 0
dstSet : DescriptorSet
dstBinding : UInt32 := 0
dstArrayElement : UInt32 := 0
descriptorCount : UInt32 := 0
@[extern "glue_vkUpdateDescriptorSets"]
opaque updateDescriptorSets : (@& Device) → (@& Array (WriteDescriptorSet)) → (@& Array (CopyDescriptorSet)) → IO Unit
structure FramebufferCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr FramebufferCreateFlags FramebufferCreateFlags FramebufferCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd FramebufferCreateFlags FramebufferCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited FramebufferCreateFlags := ⟨⟨0⟩⟩
structure FramebufferCreateInfo where
flags : FramebufferCreateFlags := default
renderPass : RenderPass
attachments : Array (ImageView) := .empty
width : UInt32 := 0
height : UInt32 := 0
layers : UInt32 := 0
opaque Framebuffer : Type := UInt64
@[extern "glue_vkCreateFramebuffer"]
opaque createFramebuffer : (@& Device) → (@& FramebufferCreateInfo) → IO (Result × Framebuffer)
@[extern "glue_vkDestroyFramebuffer"]
opaque destroyFramebuffer : (@& Device) → (@& Framebuffer) → IO Unit
structure RenderPassCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr RenderPassCreateFlags RenderPassCreateFlags RenderPassCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd RenderPassCreateFlags RenderPassCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited RenderPassCreateFlags := ⟨⟨0⟩⟩
structure AttachmentDescriptionFlags := private mk :: private v : UInt32
deriving DecidableEq
def AttachmentDescriptionFlags.mayAlias := mk 1
instance : HOr AttachmentDescriptionFlags AttachmentDescriptionFlags AttachmentDescriptionFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd AttachmentDescriptionFlags AttachmentDescriptionFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited AttachmentDescriptionFlags := ⟨⟨0⟩⟩
structure AttachmentLoadOp := private mk :: private v : UInt32
deriving DecidableEq
def AttachmentLoadOp.load := mk 0
def AttachmentLoadOp.clear := mk 1
def AttachmentLoadOp.dontCare := mk 2
structure AttachmentStoreOp := private mk :: private v : UInt32
deriving DecidableEq
def AttachmentStoreOp.store := mk 0
def AttachmentStoreOp.dontCare := mk 1
structure AttachmentDescription where
flags : AttachmentDescriptionFlags := default
format : Format
samples : SampleCountFlags := default
loadOp : AttachmentLoadOp
storeOp : AttachmentStoreOp
stencilLoadOp : AttachmentLoadOp
stencilStoreOp : AttachmentStoreOp
initialLayout : ImageLayout
finalLayout : ImageLayout
structure SubpassDescriptionFlags := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr SubpassDescriptionFlags SubpassDescriptionFlags SubpassDescriptionFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd SubpassDescriptionFlags SubpassDescriptionFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited SubpassDescriptionFlags := ⟨⟨0⟩⟩
structure PipelineBindPoint := private mk :: private v : UInt32
deriving DecidableEq
def PipelineBindPoint.graphics := mk 0
def PipelineBindPoint.compute := mk 1
structure AttachmentReference where
attachment : UInt32 := 0
layout : ImageLayout
structure SubpassDescription where
flags : SubpassDescriptionFlags := default
pipelineBindPoint : PipelineBindPoint
inputAttachments : Array (AttachmentReference) := .empty
colorAttachments : Array (AttachmentReference) := .empty
depthStencilAttachment : Option (AttachmentReference) := none
preserveAttachments : Array (UInt32) := .empty
structure AccessFlags := private mk :: private v : UInt32
deriving DecidableEq
def AccessFlags.indirectCommandRead := mk 1
def AccessFlags.indexRead := mk 2
def AccessFlags.vertexAttributeRead := mk 4
def AccessFlags.uniformRead := mk 8
def AccessFlags.inputAttachmentRead := mk 16
def AccessFlags.shaderRead := mk 32
def AccessFlags.shaderWrite := mk 64
def AccessFlags.colorAttachmentRead := mk 128
def AccessFlags.colorAttachmentWrite := mk 256
def AccessFlags.depthStencilAttachmentRead := mk 512
def AccessFlags.depthStencilAttachmentWrite := mk 1024
def AccessFlags.transferRead := mk 2048
def AccessFlags.transferWrite := mk 4096
def AccessFlags.hostRead := mk 8192
def AccessFlags.hostWrite := mk 16384
def AccessFlags.memoryRead := mk 32768
def AccessFlags.memoryWrite := mk 65536
instance : HOr AccessFlags AccessFlags AccessFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd AccessFlags AccessFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited AccessFlags := ⟨⟨0⟩⟩
structure DependencyFlags := private mk :: private v : UInt32
deriving DecidableEq
def DependencyFlags.byRegion := mk 1
instance : HOr DependencyFlags DependencyFlags DependencyFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd DependencyFlags DependencyFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited DependencyFlags := ⟨⟨0⟩⟩
structure SubpassDependency where
srcSubpass : UInt32 := 0
dstSubpass : UInt32 := 0
srcStageMask : PipelineStageFlags := default
dstStageMask : PipelineStageFlags := default
srcAccessMask : AccessFlags := default
dstAccessMask : AccessFlags := default
dependencyFlags : DependencyFlags := default
structure RenderPassCreateInfo where
flags : RenderPassCreateFlags := default
attachments : Array (AttachmentDescription) := .empty
subpasses : Array (SubpassDescription) := .empty
dependencies : Array (SubpassDependency) := .empty
@[extern "glue_vkCreateRenderPass"]
opaque createRenderPass : (@& Device) → (@& RenderPassCreateInfo) → IO (Result × RenderPass)
@[extern "glue_vkDestroyRenderPass"]
opaque destroyRenderPass : (@& Device) → (@& RenderPass) → IO Unit
@[extern "glue_vkGetRenderAreaGranularity"]
opaque getRenderAreaGranularity : (@& Device) → (@& RenderPass) → IO Extent2D
structure CommandPoolCreateFlags := private mk :: private v : UInt32
deriving DecidableEq
def CommandPoolCreateFlags.transient := mk 1
def CommandPoolCreateFlags.resetCommandBuffer := mk 2
instance : HOr CommandPoolCreateFlags CommandPoolCreateFlags CommandPoolCreateFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd CommandPoolCreateFlags CommandPoolCreateFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited CommandPoolCreateFlags := ⟨⟨0⟩⟩
structure CommandPoolCreateInfo where
flags : CommandPoolCreateFlags := default
queueFamilyIndex : UInt32 := 0
opaque CommandPool : Type := UInt64
@[extern "glue_vkCreateCommandPool"]
opaque createCommandPool : (@& Device) → (@& CommandPoolCreateInfo) → IO (Result × CommandPool)
@[extern "glue_vkDestroyCommandPool"]
opaque destroyCommandPool : (@& Device) → (@& CommandPool) → IO Unit
structure CommandPoolResetFlags := private mk :: private v : UInt32
deriving DecidableEq
def CommandPoolResetFlags.releaseResources := mk 1
instance : HOr CommandPoolResetFlags CommandPoolResetFlags CommandPoolResetFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd CommandPoolResetFlags CommandPoolResetFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited CommandPoolResetFlags := ⟨⟨0⟩⟩
@[extern "glue_vkResetCommandPool"]
opaque resetCommandPool : (@& Device) → (@& CommandPool) → (@& CommandPoolResetFlags) → IO Result
structure CommandBufferLevel := private mk :: private v : UInt32
deriving DecidableEq
def CommandBufferLevel.primary := mk 0
def CommandBufferLevel.secondary := mk 1
structure CommandBufferAllocateInfo where
commandPool : CommandPool
level : CommandBufferLevel
commandBufferCount : UInt32 := 0
@[extern "glue_vkAllocateCommandBuffers"]
opaque allocateCommandBuffers : (@& Device) → (@& CommandBufferAllocateInfo) → IO (Result × Array (CommandBuffer))
@[extern "glue_vkFreeCommandBuffers"]
opaque freeCommandBuffers : (@& Device) → (@& CommandPool) → (@& Array (CommandBuffer)) → IO Unit
structure CommandBufferUsageFlags := private mk :: private v : UInt32
deriving DecidableEq
def CommandBufferUsageFlags.oneTimeSubmit := mk 1
def CommandBufferUsageFlags.renderPassContinue := mk 2
def CommandBufferUsageFlags.simultaneousUse := mk 4
instance : HOr CommandBufferUsageFlags CommandBufferUsageFlags CommandBufferUsageFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd CommandBufferUsageFlags CommandBufferUsageFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited CommandBufferUsageFlags := ⟨⟨0⟩⟩
structure QueryControlFlags := private mk :: private v : UInt32
deriving DecidableEq
def QueryControlFlags.precise := mk 1
instance : HOr QueryControlFlags QueryControlFlags QueryControlFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd QueryControlFlags QueryControlFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited QueryControlFlags := ⟨⟨0⟩⟩
structure CommandBufferInheritanceInfo where
renderPass : RenderPass
subpass : UInt32 := 0
framebuffer : Framebuffer
occlusionQueryEnable : Bool32 := 0
queryFlags : QueryControlFlags := default
pipelineStatistics : QueryPipelineStatisticFlags := default
structure CommandBufferBeginInfo where
flags : CommandBufferUsageFlags := default
inheritanceInfo : Option (CommandBufferInheritanceInfo) := none
@[extern "glue_vkBeginCommandBuffer"]
opaque beginCommandBuffer : (@& CommandBuffer) → (@& CommandBufferBeginInfo) → IO Result
@[extern "glue_vkEndCommandBuffer"]
opaque endCommandBuffer : (@& CommandBuffer) → IO Result
structure CommandBufferResetFlags := private mk :: private v : UInt32
deriving DecidableEq
def CommandBufferResetFlags.releaseResources := mk 1
instance : HOr CommandBufferResetFlags CommandBufferResetFlags CommandBufferResetFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd CommandBufferResetFlags CommandBufferResetFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited CommandBufferResetFlags := ⟨⟨0⟩⟩
@[extern "glue_vkResetCommandBuffer"]
opaque resetCommandBuffer : (@& CommandBuffer) → (@& CommandBufferResetFlags) → IO Result
@[extern "glue_vkCmdBindPipeline"]
opaque cmdBindPipeline : (@& CommandBuffer) → (@& PipelineBindPoint) → (@& Pipeline) → IO Unit
@[extern "glue_vkCmdSetViewport"]
opaque cmdSetViewport : (@& CommandBuffer) → (@& UInt32) → (@& Array (Viewport)) → IO Unit
@[extern "glue_vkCmdSetScissor"]
opaque cmdSetScissor : (@& CommandBuffer) → (@& UInt32) → (@& Array (Rect2D)) → IO Unit
@[extern "glue_vkCmdSetLineWidth"]
opaque cmdSetLineWidth : (@& CommandBuffer) → (@& Float) → IO Unit
@[extern "glue_vkCmdSetDepthBias"]
opaque cmdSetDepthBias : (@& CommandBuffer) → (@& Float) → (@& Float) → (@& Float) → IO Unit
@[extern "glue_vkCmdSetDepthBounds"]
opaque cmdSetDepthBounds : (@& CommandBuffer) → (@& Float) → (@& Float) → IO Unit
structure StencilFaceFlags := private mk :: private v : UInt32
deriving DecidableEq
def StencilFaceFlags.front := mk 1
def StencilFaceFlags.back := mk 2
instance : HOr StencilFaceFlags StencilFaceFlags StencilFaceFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd StencilFaceFlags StencilFaceFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited StencilFaceFlags := ⟨⟨0⟩⟩
@[extern "glue_vkCmdSetStencilCompareMask"]
opaque cmdSetStencilCompareMask : (@& CommandBuffer) → (@& StencilFaceFlags) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdSetStencilWriteMask"]
opaque cmdSetStencilWriteMask : (@& CommandBuffer) → (@& StencilFaceFlags) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdSetStencilReference"]
opaque cmdSetStencilReference : (@& CommandBuffer) → (@& StencilFaceFlags) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdBindDescriptorSets"]
opaque cmdBindDescriptorSets : (@& CommandBuffer) → (@& PipelineBindPoint) → (@& PipelineLayout) → (@& UInt32) → (@& Array (DescriptorSet)) → (@& Array (UInt32)) → IO Unit
structure IndexType := private mk :: private v : UInt32
deriving DecidableEq
def IndexType.uint16 := mk 0
def IndexType.uint32 := mk 1
@[extern "glue_vkCmdBindIndexBuffer"]
opaque cmdBindIndexBuffer : (@& CommandBuffer) → (@& Buffer) → (@& DeviceSize) → (@& IndexType) → IO Unit
@[extern "glue_vkCmdBindVertexBuffers"]
opaque cmdBindVertexBuffers : (@& CommandBuffer) → (@& UInt32) → (@& Array (Buffer)) → (@& Array (DeviceSize)) → IO Unit
@[extern "glue_vkCmdDraw"]
opaque cmdDraw : (@& CommandBuffer) → (@& UInt32) → (@& UInt32) → (@& UInt32) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdDrawIndexed"]
opaque cmdDrawIndexed : (@& CommandBuffer) → (@& UInt32) → (@& UInt32) → (@& UInt32) → (@& UInt32) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdDrawIndirect"]
opaque cmdDrawIndirect : (@& CommandBuffer) → (@& Buffer) → (@& DeviceSize) → (@& UInt32) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdDrawIndexedIndirect"]
opaque cmdDrawIndexedIndirect : (@& CommandBuffer) → (@& Buffer) → (@& DeviceSize) → (@& UInt32) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdDispatch"]
opaque cmdDispatch : (@& CommandBuffer) → (@& UInt32) → (@& UInt32) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdDispatchIndirect"]
opaque cmdDispatchIndirect : (@& CommandBuffer) → (@& Buffer) → (@& DeviceSize) → IO Unit
structure BufferCopy where
srcOffset : DeviceSize := 0
dstOffset : DeviceSize := 0
size : DeviceSize := 0
@[extern "glue_vkCmdCopyBuffer"]
opaque cmdCopyBuffer : (@& CommandBuffer) → (@& Buffer) → (@& Buffer) → (@& Array (BufferCopy)) → IO Unit
structure ImageSubresourceLayers where
aspectMask : ImageAspectFlags := default
mipLevel : UInt32 := 0
baseArrayLayer : UInt32 := 0
layerCount : UInt32 := 0
structure ImageCopy where
srcSubresource : ImageSubresourceLayers
srcOffset : Offset3D
dstSubresource : ImageSubresourceLayers
dstOffset : Offset3D
extent : Extent3D
@[extern "glue_vkCmdCopyImage"]
opaque cmdCopyImage : (@& CommandBuffer) → (@& Image) → (@& ImageLayout) → (@& Image) → (@& ImageLayout) → (@& Array (ImageCopy)) → IO Unit
structure ImageBlit where
srcSubresource : ImageSubresourceLayers
srcOffsets : Offset3D
dstSubresource : ImageSubresourceLayers
dstOffsets : Offset3D
@[extern "glue_vkCmdBlitImage"]
opaque cmdBlitImage : (@& CommandBuffer) → (@& Image) → (@& ImageLayout) → (@& Image) → (@& ImageLayout) → (@& Array (ImageBlit)) → (@& Filter) → IO Unit
structure BufferImageCopy where
bufferOffset : DeviceSize := 0
bufferRowLength : UInt32 := 0
bufferImageHeight : UInt32 := 0
imageSubresource : ImageSubresourceLayers
imageOffset : Offset3D
imageExtent : Extent3D
@[extern "glue_vkCmdCopyBufferToImage"]
opaque cmdCopyBufferToImage : (@& CommandBuffer) → (@& Buffer) → (@& Image) → (@& ImageLayout) → (@& Array (BufferImageCopy)) → IO Unit
@[extern "glue_vkCmdCopyImageToBuffer"]
opaque cmdCopyImageToBuffer : (@& CommandBuffer) → (@& Image) → (@& ImageLayout) → (@& Buffer) → (@& Array (BufferImageCopy)) → IO Unit
@[extern "glue_vkCmdUpdateBuffer"]
opaque cmdUpdateBuffer : (@& CommandBuffer) → (@& Buffer) → (@& DeviceSize) → (@& ByteArray) → IO Unit
@[extern "glue_vkCmdFillBuffer"]
opaque cmdFillBuffer : (@& CommandBuffer) → (@& Buffer) → (@& DeviceSize) → (@& DeviceSize) → (@& UInt32) → IO Unit
inductive ClearColorValue where
| float32 (_ : FloatArray)
| int32 (_ : Array (UInt32))
| uint32 (_ : Array (UInt32))
@[extern "glue_vkCmdClearColorImage"]
opaque cmdClearColorImage : (@& CommandBuffer) → (@& Image) → (@& ImageLayout) → (@& ClearColorValue) → (@& Array (ImageSubresourceRange)) → IO Unit
structure ClearDepthStencilValue where
depth : Float
stencil : UInt32 := 0
@[extern "glue_vkCmdClearDepthStencilImage"]
opaque cmdClearDepthStencilImage : (@& CommandBuffer) → (@& Image) → (@& ImageLayout) → (@& ClearDepthStencilValue) → (@& Array (ImageSubresourceRange)) → IO Unit
inductive ClearValue where
| color (_ : ClearColorValue)
| depthStencil (_ : ClearDepthStencilValue)
structure ClearAttachment where
aspectMask : ImageAspectFlags := default
colorAttachment : UInt32 := 0
clearValue : ClearValue
structure ClearRect where
rect : Rect2D
baseArrayLayer : UInt32 := 0
layerCount : UInt32 := 0
@[extern "glue_vkCmdClearAttachments"]
opaque cmdClearAttachments : (@& CommandBuffer) → (@& Array (ClearAttachment)) → (@& Array (ClearRect)) → IO Unit
structure ImageResolve where
srcSubresource : ImageSubresourceLayers
srcOffset : Offset3D
dstSubresource : ImageSubresourceLayers
dstOffset : Offset3D
extent : Extent3D
@[extern "glue_vkCmdResolveImage"]
opaque cmdResolveImage : (@& CommandBuffer) → (@& Image) → (@& ImageLayout) → (@& Image) → (@& ImageLayout) → (@& Array (ImageResolve)) → IO Unit
@[extern "glue_vkCmdSetEvent"]
opaque cmdSetEvent : (@& CommandBuffer) → (@& Event) → (@& PipelineStageFlags) → IO Unit
@[extern "glue_vkCmdResetEvent"]
opaque cmdResetEvent : (@& CommandBuffer) → (@& Event) → (@& PipelineStageFlags) → IO Unit
structure MemoryBarrier where
srcAccessMask : AccessFlags := default
dstAccessMask : AccessFlags := default
structure BufferMemoryBarrier where
srcAccessMask : AccessFlags := default
dstAccessMask : AccessFlags := default
srcQueueFamilyIndex : UInt32 := 0
dstQueueFamilyIndex : UInt32 := 0
buffer : Buffer
offset : DeviceSize := 0
size : DeviceSize := 0
structure ImageMemoryBarrier where
srcAccessMask : AccessFlags := default
dstAccessMask : AccessFlags := default
oldLayout : ImageLayout
newLayout : ImageLayout
srcQueueFamilyIndex : UInt32 := 0
dstQueueFamilyIndex : UInt32 := 0
image : Image
subresourceRange : ImageSubresourceRange
@[extern "glue_vkCmdWaitEvents"]
opaque cmdWaitEvents : (@& CommandBuffer) → (@& Array (Event)) → (@& PipelineStageFlags) → (@& PipelineStageFlags) → (@& Array (MemoryBarrier)) → (@& Array (BufferMemoryBarrier)) → (@& Array (ImageMemoryBarrier)) → IO Unit
@[extern "glue_vkCmdPipelineBarrier"]
opaque cmdPipelineBarrier : (@& CommandBuffer) → (@& PipelineStageFlags) → (@& PipelineStageFlags) → (@& DependencyFlags) → (@& Array (MemoryBarrier)) → (@& Array (BufferMemoryBarrier)) → (@& Array (ImageMemoryBarrier)) → IO Unit
@[extern "glue_vkCmdBeginQuery"]
opaque cmdBeginQuery : (@& CommandBuffer) → (@& QueryPool) → (@& UInt32) → (@& QueryControlFlags) → IO Unit
@[extern "glue_vkCmdEndQuery"]
opaque cmdEndQuery : (@& CommandBuffer) → (@& QueryPool) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdResetQueryPool"]
opaque cmdResetQueryPool : (@& CommandBuffer) → (@& QueryPool) → (@& UInt32) → (@& UInt32) → IO Unit
@[extern "glue_vkCmdWriteTimestamp"]
opaque cmdWriteTimestamp : (@& CommandBuffer) → (@& PipelineStageFlags) → (@& QueryPool) → (@& UInt32) → IO Unit
structure QueryResultFlags := private mk :: private v : UInt32
deriving DecidableEq
def QueryResultFlags._64 := mk 1
def QueryResultFlags.wait := mk 2
def QueryResultFlags.withAvailability := mk 4
def QueryResultFlags.partial := mk 8
instance : HOr QueryResultFlags QueryResultFlags QueryResultFlags := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd QueryResultFlags QueryResultFlags Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited QueryResultFlags := ⟨⟨0⟩⟩
@[extern "glue_vkCmdCopyQueryPoolResults"]
opaque cmdCopyQueryPoolResults : (@& CommandBuffer) → (@& QueryPool) → (@& UInt32) → (@& UInt32) → (@& Buffer) → (@& DeviceSize) → (@& DeviceSize) → (@& QueryResultFlags) → IO Unit
@[extern "glue_vkCmdPushConstants"]
opaque cmdPushConstants : (@& CommandBuffer) → (@& PipelineLayout) → (@& ShaderStageFlags) → (@& UInt32) → (@& ByteArray) → IO Unit
structure RenderPassBeginInfo where
renderPass : RenderPass
framebuffer : Framebuffer
renderArea : Rect2D
clearValues : Array (ClearValue) := .empty
structure SubpassContents := private mk :: private v : UInt32
deriving DecidableEq
def SubpassContents.inline := mk 0
def SubpassContents.secondaryCommandBuffers := mk 1
@[extern "glue_vkCmdBeginRenderPass"]
opaque cmdBeginRenderPass : (@& CommandBuffer) → (@& RenderPassBeginInfo) → (@& SubpassContents) → IO Unit
@[extern "glue_vkCmdNextSubpass"]
opaque cmdNextSubpass : (@& CommandBuffer) → (@& SubpassContents) → IO Unit
@[extern "glue_vkCmdEndRenderPass"]
opaque cmdEndRenderPass : (@& CommandBuffer) → IO Unit
@[extern "glue_vkCmdExecuteCommands"]
opaque cmdExecuteCommands : (@& CommandBuffer) → (@& Array (CommandBuffer)) → IO Unit
opaque SurfaceKHR : Type := UInt64
@[extern "glue_vkDestroySurfaceKHR"]
opaque destroySurfaceKHR : (@& Instance) → (@& SurfaceKHR) → IO Unit
@[extern "glue_vkGetPhysicalDeviceSurfaceSupportKHR"]
opaque getPhysicalDeviceSurfaceSupportKHR : (@& PhysicalDevice) → (@& UInt32) → (@& SurfaceKHR) → IO (Result × Bool32)
structure SurfaceTransformFlagsKHR := private mk :: private v : UInt32
deriving DecidableEq
def SurfaceTransformFlagsKHR.identityBitKhr := mk 1
def SurfaceTransformFlagsKHR.rotate90BitKhr := mk 2
def SurfaceTransformFlagsKHR.rotate180BitKhr := mk 4
def SurfaceTransformFlagsKHR.rotate270BitKhr := mk 8
def SurfaceTransformFlagsKHR.horizontalMirrorBitKhr := mk 16
def SurfaceTransformFlagsKHR.horizontalMirrorRotate90BitKhr := mk 32
def SurfaceTransformFlagsKHR.horizontalMirrorRotate180BitKhr := mk 64
def SurfaceTransformFlagsKHR.horizontalMirrorRotate270BitKhr := mk 128
def SurfaceTransformFlagsKHR.inheritBitKhr := mk 256
instance : HOr SurfaceTransformFlagsKHR SurfaceTransformFlagsKHR SurfaceTransformFlagsKHR := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd SurfaceTransformFlagsKHR SurfaceTransformFlagsKHR Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited SurfaceTransformFlagsKHR := ⟨⟨0⟩⟩
structure CompositeAlphaFlagsKHR := private mk :: private v : UInt32
deriving DecidableEq
def CompositeAlphaFlagsKHR.opaqueBitKhr := mk 1
def CompositeAlphaFlagsKHR.preMultipliedBitKhr := mk 2
def CompositeAlphaFlagsKHR.postMultipliedBitKhr := mk 4
def CompositeAlphaFlagsKHR.inheritBitKhr := mk 8
instance : HOr CompositeAlphaFlagsKHR CompositeAlphaFlagsKHR CompositeAlphaFlagsKHR := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd CompositeAlphaFlagsKHR CompositeAlphaFlagsKHR Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited CompositeAlphaFlagsKHR := ⟨⟨0⟩⟩
structure SurfaceCapabilitiesKHR where
minImageCount : UInt32 := 0
maxImageCount : UInt32 := 0
currentExtent : Extent2D
minImageExtent : Extent2D
maxImageExtent : Extent2D
maxImageArrayLayers : UInt32 := 0
supportedTransforms : SurfaceTransformFlagsKHR := default
currentTransform : SurfaceTransformFlagsKHR := default
supportedCompositeAlpha : CompositeAlphaFlagsKHR := default
supportedUsageFlags : ImageUsageFlags := default
@[extern "glue_vkGetPhysicalDeviceSurfaceCapabilitiesKHR"]
opaque getPhysicalDeviceSurfaceCapabilitiesKHR : (@& PhysicalDevice) → (@& SurfaceKHR) → IO (Result × SurfaceCapabilitiesKHR)
structure ColorSpaceKHR := private mk :: private v : UInt32
deriving DecidableEq
def ColorSpaceKHR.NonlinearKhr := mk 0
structure SurfaceFormatKHR where
format : Format
colorSpace : ColorSpaceKHR
@[extern "glue_vkGetPhysicalDeviceSurfaceFormatsKHR"]
opaque getPhysicalDeviceSurfaceFormatsKHR : (@& PhysicalDevice) → (@& SurfaceKHR) → IO (Result × Array (SurfaceFormatKHR))
structure PresentModeKHR := private mk :: private v : UInt32
deriving DecidableEq
def PresentModeKHR.diateKhr := mk 0
def PresentModeKHR.boxKhr := mk 1
def PresentModeKHR.Khr := mk 2
def PresentModeKHR.RelaxedKhr := mk 3
@[extern "glue_vkGetPhysicalDeviceSurfacePresentModesKHR"]
opaque getPhysicalDeviceSurfacePresentModesKHR : (@& PhysicalDevice) → (@& SurfaceKHR) → IO (Result × Array (PresentModeKHR))
structure SwapchainCreateFlagsKHR := private mk :: private v : UInt32
deriving DecidableEq
instance : HOr SwapchainCreateFlagsKHR SwapchainCreateFlagsKHR SwapchainCreateFlagsKHR := ⟨(⟨·.v ||| ·.v⟩)⟩
instance : HAnd SwapchainCreateFlagsKHR SwapchainCreateFlagsKHR Bool := ⟨(·.v &&& ·.v != 0)⟩
instance : Inhabited SwapchainCreateFlagsKHR := ⟨⟨0⟩⟩
structure SwapchainCreateInfoKHR where
flags : SwapchainCreateFlagsKHR := default
surface : SurfaceKHR
minImageCount : UInt32 := 0
imageFormat : Format
imageColorSpace : ColorSpaceKHR
imageExtent : Extent2D
imageArrayLayers : UInt32 := 0
imageUsage : ImageUsageFlags := default
imageSharingMode : SharingMode
queueFamilyIndices : Array (UInt32) := .empty
preTransform : SurfaceTransformFlagsKHR := default
compositeAlpha : CompositeAlphaFlagsKHR := default
presentMode : PresentModeKHR
clipped : Bool32 := 0
opaque SwapchainKHR : Type := UInt64
@[extern "glue_vkCreateSwapchainKHR"]
opaque createSwapchainKHR : (@& Device) → (@& SwapchainCreateInfoKHR) → IO (Result × SwapchainKHR)
@[extern "glue_vkDestroySwapchainKHR"]
opaque destroySwapchainKHR : (@& Device) → (@& SwapchainKHR) → IO Unit
@[extern "glue_vkGetSwapchainImagesKHR"]
opaque getSwapchainImagesKHR : (@& Device) → (@& SwapchainKHR) → IO (Result × Array (Image))
@[extern "glue_vkAcquireNextImageKHR"]
opaque acquireNextImageKHR : (@& Device) → (@& SwapchainKHR) → (@& UInt64) → (@& Semaphore) → IO (Result × UInt32)
structure PresentInfoKHR where
waitSemaphores : Array (Semaphore) := .empty
swapchains : Array (SwapchainKHR) := .empty
imageIndices : Array (UInt32) := .empty
results : Array (Result) := .empty
@[extern "glue_vkQueuePresentKHR"]
opaque queuePresentKHR : (@& Queue) → (@& PresentInfoKHR) → IO Result
end Vk
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment