This commit is contained in:
iceman1001 2024-03-26 15:08:34 +01:00
parent c66e781a9c
commit ab2e3d5179
15 changed files with 6254 additions and 4381 deletions

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2008-2020 The Khronos Group Inc.
* Copyright (c) 2008-2023 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,13 +14,18 @@
* limitations under the License.
******************************************************************************/
#ifndef __OPENCL_CL_D3D10_H
#define __OPENCL_CL_D3D10_H
#ifndef OPENCL_CL_D3D10_H_
#define OPENCL_CL_D3D10_H_
/*
** This header is generated from the Khronos OpenCL XML API Registry.
*/
#if defined(_MSC_VER)
#if _MSC_VER >=1500
#pragma warning( push )
#pragma warning( disable : 4201 )
#pragma warning( disable : 5105 )
#endif
#endif
#include <d3d10.h>
@ -29,100 +34,229 @@
#pragma warning( pop )
#endif
#endif
#include <CL/cl.h>
#include <CL/cl_platform.h>
/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES)
#define CL_NO_EXTENSION_PROTOTYPES
#endif
/* CL_NO_EXTENSION_PROTOTYPES implies
CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and
CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* cl_khr_d3d10_sharing */
/***************************************************************
* cl_khr_d3d10_sharing
***************************************************************/
#define cl_khr_d3d10_sharing 1
#define CL_KHR_D3D10_SHARING_EXTENSION_NAME \
"cl_khr_d3d10_sharing"
typedef cl_uint cl_d3d10_device_source_khr;
typedef cl_uint cl_d3d10_device_set_khr;
typedef cl_uint cl_d3d10_device_source_khr;
typedef cl_uint cl_d3d10_device_set_khr;
/******************************************************************************/
/* Error codes */
#define CL_INVALID_D3D10_DEVICE_KHR -1002
#define CL_INVALID_D3D10_RESOURCE_KHR -1003
#define CL_D3D10_RESOURCE_ALREADY_ACQUIRED_KHR -1004
#define CL_D3D10_RESOURCE_NOT_ACQUIRED_KHR -1005
/* Error Codes */
#define CL_INVALID_D3D10_DEVICE_KHR -1002
#define CL_INVALID_D3D10_RESOURCE_KHR -1003
#define CL_D3D10_RESOURCE_ALREADY_ACQUIRED_KHR -1004
#define CL_D3D10_RESOURCE_NOT_ACQUIRED_KHR -1005
/* cl_d3d10_device_source_khr */
#define CL_D3D10_DEVICE_KHR 0x4010
#define CL_D3D10_DXGI_ADAPTER_KHR 0x4011
/* cl_d3d10_device_source_nv */
#define CL_D3D10_DEVICE_KHR 0x4010
#define CL_D3D10_DXGI_ADAPTER_KHR 0x4011
/* cl_d3d10_device_set_nv */
#define CL_PREFERRED_DEVICES_FOR_D3D10_KHR 0x4012
#define CL_ALL_DEVICES_FOR_D3D10_KHR 0x4013
/* cl_d3d10_device_set_khr */
#define CL_PREFERRED_DEVICES_FOR_D3D10_KHR 0x4012
#define CL_ALL_DEVICES_FOR_D3D10_KHR 0x4013
/* cl_context_info */
#define CL_CONTEXT_D3D10_DEVICE_KHR 0x4014
#define CL_CONTEXT_D3D10_PREFER_SHARED_RESOURCES_KHR 0x402C
#define CL_CONTEXT_D3D10_DEVICE_KHR 0x4014
#define CL_CONTEXT_D3D10_PREFER_SHARED_RESOURCES_KHR 0x402C
/* cl_mem_info */
#define CL_MEM_D3D10_RESOURCE_KHR 0x4015
#define CL_MEM_D3D10_RESOURCE_KHR 0x4015
/* cl_image_info */
#define CL_IMAGE_D3D10_SUBRESOURCE_KHR 0x4016
#define CL_IMAGE_D3D10_SUBRESOURCE_KHR 0x4016
/* cl_command_type */
#define CL_COMMAND_ACQUIRE_D3D10_OBJECTS_KHR 0x4017
#define CL_COMMAND_RELEASE_D3D10_OBJECTS_KHR 0x4018
#define CL_COMMAND_ACQUIRE_D3D10_OBJECTS_KHR 0x4017
#define CL_COMMAND_RELEASE_D3D10_OBJECTS_KHR 0x4018
/******************************************************************************/
typedef CL_API_ENTRY cl_int(CL_API_CALL *clGetDeviceIDsFromD3D10KHR_fn)(
cl_platform_id platform,
typedef cl_int CL_API_CALL
clGetDeviceIDsFromD3D10KHR_t(
cl_platform_id platform,
cl_d3d10_device_source_khr d3d_device_source,
void *d3d_object,
cl_d3d10_device_set_khr d3d_device_set,
cl_uint num_entries,
cl_device_id *devices,
cl_uint *num_devices) CL_API_SUFFIX__VERSION_1_0;
void* d3d_object,
cl_d3d10_device_set_khr d3d_device_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices);
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromD3D10BufferKHR_fn)(
cl_context context,
cl_mem_flags flags,
ID3D10Buffer *resource,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_0;
typedef clGetDeviceIDsFromD3D10KHR_t *
clGetDeviceIDsFromD3D10KHR_fn CL_API_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromD3D10Texture2DKHR_fn)(
cl_context context,
cl_mem_flags flags,
ID3D10Texture2D *resource,
UINT subresource,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_0;
typedef cl_mem CL_API_CALL
clCreateFromD3D10BufferKHR_t(
cl_context context,
cl_mem_flags flags,
ID3D10Buffer* resource,
cl_int* errcode_ret);
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromD3D10Texture3DKHR_fn)(
cl_context context,
cl_mem_flags flags,
ID3D10Texture3D *resource,
UINT subresource,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_0;
typedef clCreateFromD3D10BufferKHR_t *
clCreateFromD3D10BufferKHR_fn CL_API_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueAcquireD3D10ObjectsKHR_fn)(
typedef cl_mem CL_API_CALL
clCreateFromD3D10Texture2DKHR_t(
cl_context context,
cl_mem_flags flags,
ID3D10Texture2D* resource,
UINT subresource,
cl_int* errcode_ret);
typedef clCreateFromD3D10Texture2DKHR_t *
clCreateFromD3D10Texture2DKHR_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_mem CL_API_CALL
clCreateFromD3D10Texture3DKHR_t(
cl_context context,
cl_mem_flags flags,
ID3D10Texture3D* resource,
UINT subresource,
cl_int* errcode_ret);
typedef clCreateFromD3D10Texture3DKHR_t *
clCreateFromD3D10Texture3DKHR_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_int CL_API_CALL
clEnqueueAcquireD3D10ObjectsKHR_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_0;
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueReleaseD3D10ObjectsKHR_fn)(
typedef clEnqueueAcquireD3D10ObjectsKHR_t *
clEnqueueAcquireD3D10ObjectsKHR_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_int CL_API_CALL
clEnqueueReleaseD3D10ObjectsKHR_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_0;
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueReleaseD3D10ObjectsKHR_t *
clEnqueueReleaseD3D10ObjectsKHR_fn CL_API_SUFFIX__VERSION_1_0;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetDeviceIDsFromD3D10KHR(
cl_platform_id platform,
cl_d3d10_device_source_khr d3d_device_source,
void* d3d_object,
cl_d3d10_device_set_khr d3d_device_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromD3D10BufferKHR(
cl_context context,
cl_mem_flags flags,
ID3D10Buffer* resource,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromD3D10Texture2DKHR(
cl_context context,
cl_mem_flags flags,
ID3D10Texture2D* resource,
UINT subresource,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromD3D10Texture3DKHR(
cl_context context,
cl_mem_flags flags,
ID3D10Texture3D* resource,
UINT subresource,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireD3D10ObjectsKHR(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseD3D10ObjectsKHR(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/***************************************************************
* cl_intel_sharing_format_query_d3d10
***************************************************************/
#define cl_intel_sharing_format_query_d3d10 1
#define CL_INTEL_SHARING_FORMAT_QUERY_D3D10_EXTENSION_NAME \
"cl_intel_sharing_format_query_d3d10"
/* when cl_khr_d3d10_sharing is supported */
typedef cl_int CL_API_CALL
clGetSupportedD3D10TextureFormatsINTEL_t(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint num_entries,
DXGI_FORMAT* d3d10_formats,
cl_uint* num_texture_formats);
typedef clGetSupportedD3D10TextureFormatsINTEL_t *
clGetSupportedD3D10TextureFormatsINTEL_fn ;
#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetSupportedD3D10TextureFormatsINTEL(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint num_entries,
DXGI_FORMAT* d3d10_formats,
cl_uint* num_texture_formats) ;
#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_D3D10_H */
#endif /* OPENCL_CL_D3D10_H_ */

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2008-2020 The Khronos Group Inc.
* Copyright (c) 2008-2023 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,13 +14,18 @@
* limitations under the License.
******************************************************************************/
#ifndef __OPENCL_CL_D3D11_H
#define __OPENCL_CL_D3D11_H
#ifndef OPENCL_CL_D3D11_H_
#define OPENCL_CL_D3D11_H_
/*
** This header is generated from the Khronos OpenCL XML API Registry.
*/
#if defined(_MSC_VER)
#if _MSC_VER >=1500
#pragma warning( push )
#pragma warning( disable : 4201 )
#pragma warning( disable : 5105 )
#endif
#endif
#include <d3d11.h>
@ -29,100 +34,231 @@
#pragma warning( pop )
#endif
#endif
#include <CL/cl.h>
#include <CL/cl_platform.h>
/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES)
#define CL_NO_EXTENSION_PROTOTYPES
#endif
/* CL_NO_EXTENSION_PROTOTYPES implies
CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and
CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* cl_khr_d3d11_sharing */
/***************************************************************
* cl_khr_d3d11_sharing
***************************************************************/
#define cl_khr_d3d11_sharing 1
#define CL_KHR_D3D11_SHARING_EXTENSION_NAME \
"cl_khr_d3d11_sharing"
typedef cl_uint cl_d3d11_device_source_khr;
typedef cl_uint cl_d3d11_device_set_khr;
typedef cl_uint cl_d3d11_device_source_khr;
typedef cl_uint cl_d3d11_device_set_khr;
/******************************************************************************/
/* Error codes */
#define CL_INVALID_D3D11_DEVICE_KHR -1006
#define CL_INVALID_D3D11_RESOURCE_KHR -1007
#define CL_D3D11_RESOURCE_ALREADY_ACQUIRED_KHR -1008
#define CL_D3D11_RESOURCE_NOT_ACQUIRED_KHR -1009
/* Error Codes */
#define CL_INVALID_D3D11_DEVICE_KHR -1006
#define CL_INVALID_D3D11_RESOURCE_KHR -1007
#define CL_D3D11_RESOURCE_ALREADY_ACQUIRED_KHR -1008
#define CL_D3D11_RESOURCE_NOT_ACQUIRED_KHR -1009
/* cl_d3d11_device_source_khr */
#define CL_D3D11_DEVICE_KHR 0x4019
#define CL_D3D11_DXGI_ADAPTER_KHR 0x401A
/* cl_d3d11_device_source */
#define CL_D3D11_DEVICE_KHR 0x4019
#define CL_D3D11_DXGI_ADAPTER_KHR 0x401A
/* cl_d3d11_device_set */
#define CL_PREFERRED_DEVICES_FOR_D3D11_KHR 0x401B
#define CL_ALL_DEVICES_FOR_D3D11_KHR 0x401C
/* cl_d3d11_device_set_khr */
#define CL_PREFERRED_DEVICES_FOR_D3D11_KHR 0x401B
#define CL_ALL_DEVICES_FOR_D3D11_KHR 0x401C
/* cl_context_info */
#define CL_CONTEXT_D3D11_DEVICE_KHR 0x401D
#define CL_CONTEXT_D3D11_PREFER_SHARED_RESOURCES_KHR 0x402D
#define CL_CONTEXT_D3D11_DEVICE_KHR 0x401D
#define CL_CONTEXT_D3D11_PREFER_SHARED_RESOURCES_KHR 0x402D
/* cl_mem_info */
#define CL_MEM_D3D11_RESOURCE_KHR 0x401E
#define CL_MEM_D3D11_RESOURCE_KHR 0x401E
/* cl_image_info */
#define CL_IMAGE_D3D11_SUBRESOURCE_KHR 0x401F
#define CL_IMAGE_D3D11_SUBRESOURCE_KHR 0x401F
/* cl_command_type */
#define CL_COMMAND_ACQUIRE_D3D11_OBJECTS_KHR 0x4020
#define CL_COMMAND_RELEASE_D3D11_OBJECTS_KHR 0x4021
#define CL_COMMAND_ACQUIRE_D3D11_OBJECTS_KHR 0x4020
#define CL_COMMAND_RELEASE_D3D11_OBJECTS_KHR 0x4021
/******************************************************************************/
typedef CL_API_ENTRY cl_int(CL_API_CALL *clGetDeviceIDsFromD3D11KHR_fn)(
cl_platform_id platform,
typedef cl_int CL_API_CALL
clGetDeviceIDsFromD3D11KHR_t(
cl_platform_id platform,
cl_d3d11_device_source_khr d3d_device_source,
void *d3d_object,
cl_d3d11_device_set_khr d3d_device_set,
cl_uint num_entries,
cl_device_id *devices,
cl_uint *num_devices) CL_API_SUFFIX__VERSION_1_2;
void* d3d_object,
cl_d3d11_device_set_khr d3d_device_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices);
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromD3D11BufferKHR_fn)(
cl_context context,
cl_mem_flags flags,
ID3D11Buffer *resource,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_2;
typedef clGetDeviceIDsFromD3D11KHR_t *
clGetDeviceIDsFromD3D11KHR_fn CL_API_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromD3D11Texture2DKHR_fn)(
cl_context context,
cl_mem_flags flags,
ID3D11Texture2D *resource,
UINT subresource,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_2;
typedef cl_mem CL_API_CALL
clCreateFromD3D11BufferKHR_t(
cl_context context,
cl_mem_flags flags,
ID3D11Buffer* resource,
cl_int* errcode_ret);
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromD3D11Texture3DKHR_fn)(
cl_context context,
cl_mem_flags flags,
ID3D11Texture3D *resource,
UINT subresource,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_2;
typedef clCreateFromD3D11BufferKHR_t *
clCreateFromD3D11BufferKHR_fn CL_API_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueAcquireD3D11ObjectsKHR_fn)(
typedef cl_mem CL_API_CALL
clCreateFromD3D11Texture2DKHR_t(
cl_context context,
cl_mem_flags flags,
ID3D11Texture2D* resource,
UINT subresource,
cl_int* errcode_ret);
typedef clCreateFromD3D11Texture2DKHR_t *
clCreateFromD3D11Texture2DKHR_fn CL_API_SUFFIX__VERSION_1_2;
typedef cl_mem CL_API_CALL
clCreateFromD3D11Texture3DKHR_t(
cl_context context,
cl_mem_flags flags,
ID3D11Texture3D* resource,
UINT subresource,
cl_int* errcode_ret);
typedef clCreateFromD3D11Texture3DKHR_t *
clCreateFromD3D11Texture3DKHR_fn CL_API_SUFFIX__VERSION_1_2;
typedef cl_int CL_API_CALL
clEnqueueAcquireD3D11ObjectsKHR_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_2;
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueReleaseD3D11ObjectsKHR_fn)(
typedef clEnqueueAcquireD3D11ObjectsKHR_t *
clEnqueueAcquireD3D11ObjectsKHR_fn CL_API_SUFFIX__VERSION_1_2;
typedef cl_int CL_API_CALL
clEnqueueReleaseD3D11ObjectsKHR_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_2;
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueReleaseD3D11ObjectsKHR_t *
clEnqueueReleaseD3D11ObjectsKHR_fn CL_API_SUFFIX__VERSION_1_2;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetDeviceIDsFromD3D11KHR(
cl_platform_id platform,
cl_d3d11_device_source_khr d3d_device_source,
void* d3d_object,
cl_d3d11_device_set_khr d3d_device_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromD3D11BufferKHR(
cl_context context,
cl_mem_flags flags,
ID3D11Buffer* resource,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromD3D11Texture2DKHR(
cl_context context,
cl_mem_flags flags,
ID3D11Texture2D* resource,
UINT subresource,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromD3D11Texture3DKHR(
cl_context context,
cl_mem_flags flags,
ID3D11Texture3D* resource,
UINT subresource,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireD3D11ObjectsKHR(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseD3D11ObjectsKHR(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_2;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/***************************************************************
* cl_intel_sharing_format_query_d3d11
***************************************************************/
#define cl_intel_sharing_format_query_d3d11 1
#define CL_INTEL_SHARING_FORMAT_QUERY_D3D11_EXTENSION_NAME \
"cl_intel_sharing_format_query_d3d11"
/* when cl_khr_d3d11_sharing is supported */
typedef cl_int CL_API_CALL
clGetSupportedD3D11TextureFormatsINTEL_t(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint plane,
cl_uint num_entries,
DXGI_FORMAT* d3d11_formats,
cl_uint* num_texture_formats);
typedef clGetSupportedD3D11TextureFormatsINTEL_t *
clGetSupportedD3D11TextureFormatsINTEL_fn ;
#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetSupportedD3D11TextureFormatsINTEL(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint plane,
cl_uint num_entries,
DXGI_FORMAT* d3d11_formats,
cl_uint* num_texture_formats) ;
#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_D3D11_H */
#endif /* OPENCL_CL_D3D11_H_ */

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2008-2020 The Khronos Group Inc.
* Copyright (c) 2008-2023 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,104 +14,364 @@
* limitations under the License.
******************************************************************************/
#ifndef __OPENCL_CL_DX9_MEDIA_SHARING_H
#define __OPENCL_CL_DX9_MEDIA_SHARING_H
#ifndef OPENCL_CL_DX9_MEDIA_SHARING_H_
#define OPENCL_CL_DX9_MEDIA_SHARING_H_
/*
** This header is generated from the Khronos OpenCL XML API Registry.
*/
#if defined(_WIN32)
#if defined(_MSC_VER)
#if _MSC_VER >=1500
#pragma warning( push )
#pragma warning( disable : 4201 )
#pragma warning( disable : 5105 )
#endif
#endif
#include <d3d9.h>
#if defined(_MSC_VER)
#if _MSC_VER >=1500
#pragma warning( pop )
#endif
#endif
#endif
#include <CL/cl.h>
#include <CL/cl_platform.h>
/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES)
#define CL_NO_EXTENSION_PROTOTYPES
#endif
/* CL_NO_EXTENSION_PROTOTYPES implies
CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and
CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************/
/* cl_khr_dx9_media_sharing */
/***************************************************************
* cl_khr_dx9_media_sharing
***************************************************************/
#define cl_khr_dx9_media_sharing 1
#define CL_KHR_DX9_MEDIA_SHARING_EXTENSION_NAME \
"cl_khr_dx9_media_sharing"
typedef cl_uint cl_dx9_media_adapter_type_khr;
typedef cl_uint cl_dx9_media_adapter_set_khr;
#if defined(_WIN32)
#include <d3d9.h>
typedef struct _cl_dx9_surface_info_khr {
IDirect3DSurface9 *resource;
IDirect3DSurface9* resource;
HANDLE shared_handle;
} cl_dx9_surface_info_khr;
#endif
#endif /* defined(_WIN32) */
/******************************************************************************/
/* Error Codes */
#define CL_INVALID_DX9_MEDIA_ADAPTER_KHR -1010
#define CL_INVALID_DX9_MEDIA_SURFACE_KHR -1011
#define CL_DX9_MEDIA_SURFACE_ALREADY_ACQUIRED_KHR -1012
#define CL_DX9_MEDIA_SURFACE_NOT_ACQUIRED_KHR -1013
/* Error codes */
#define CL_INVALID_DX9_MEDIA_ADAPTER_KHR -1010
#define CL_INVALID_DX9_MEDIA_SURFACE_KHR -1011
#define CL_DX9_MEDIA_SURFACE_ALREADY_ACQUIRED_KHR -1012
#define CL_DX9_MEDIA_SURFACE_NOT_ACQUIRED_KHR -1013
/* cl_media_adapter_type_khr */
#define CL_ADAPTER_D3D9_KHR 0x2020
#define CL_ADAPTER_D3D9EX_KHR 0x2021
#define CL_ADAPTER_DXVA_KHR 0x2022
#define CL_ADAPTER_D3D9_KHR 0x2020
#define CL_ADAPTER_D3D9EX_KHR 0x2021
#define CL_ADAPTER_DXVA_KHR 0x2022
/* cl_media_adapter_set_khr */
#define CL_PREFERRED_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2023
#define CL_ALL_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2024
#define CL_PREFERRED_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2023
#define CL_ALL_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2024
/* cl_context_info */
#define CL_CONTEXT_ADAPTER_D3D9_KHR 0x2025
#define CL_CONTEXT_ADAPTER_D3D9EX_KHR 0x2026
#define CL_CONTEXT_ADAPTER_DXVA_KHR 0x2027
#define CL_CONTEXT_ADAPTER_D3D9_KHR 0x2025
#define CL_CONTEXT_ADAPTER_D3D9EX_KHR 0x2026
#define CL_CONTEXT_ADAPTER_DXVA_KHR 0x2027
/* cl_mem_info */
#define CL_MEM_DX9_MEDIA_ADAPTER_TYPE_KHR 0x2028
#define CL_MEM_DX9_MEDIA_SURFACE_INFO_KHR 0x2029
#define CL_MEM_DX9_MEDIA_ADAPTER_TYPE_KHR 0x2028
#define CL_MEM_DX9_MEDIA_SURFACE_INFO_KHR 0x2029
/* cl_image_info */
#define CL_IMAGE_DX9_MEDIA_PLANE_KHR 0x202A
#define CL_IMAGE_DX9_MEDIA_PLANE_KHR 0x202A
/* cl_command_type */
#define CL_COMMAND_ACQUIRE_DX9_MEDIA_SURFACES_KHR 0x202B
#define CL_COMMAND_RELEASE_DX9_MEDIA_SURFACES_KHR 0x202C
#define CL_COMMAND_ACQUIRE_DX9_MEDIA_SURFACES_KHR 0x202B
#define CL_COMMAND_RELEASE_DX9_MEDIA_SURFACES_KHR 0x202C
/******************************************************************************/
typedef CL_API_ENTRY cl_int(CL_API_CALL *clGetDeviceIDsFromDX9MediaAdapterKHR_fn)(
cl_platform_id platform,
cl_uint num_media_adapters,
cl_dx9_media_adapter_type_khr *media_adapter_type,
void *media_adapters,
cl_dx9_media_adapter_set_khr media_adapter_set,
cl_uint num_entries,
cl_device_id *devices,
cl_uint *num_devices) CL_API_SUFFIX__VERSION_1_2;
typedef cl_int CL_API_CALL
clGetDeviceIDsFromDX9MediaAdapterKHR_t(
cl_platform_id platform,
cl_uint num_media_adapters,
cl_dx9_media_adapter_type_khr* media_adapter_type,
void* media_adapters,
cl_dx9_media_adapter_set_khr media_adapter_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices);
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromDX9MediaSurfaceKHR_fn)(
cl_context context,
cl_mem_flags flags,
typedef clGetDeviceIDsFromDX9MediaAdapterKHR_t *
clGetDeviceIDsFromDX9MediaAdapterKHR_fn CL_API_SUFFIX__VERSION_1_2;
typedef cl_mem CL_API_CALL
clCreateFromDX9MediaSurfaceKHR_t(
cl_context context,
cl_mem_flags flags,
cl_dx9_media_adapter_type_khr adapter_type,
void *surface_info,
cl_uint plane,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_2;
void* surface_info,
cl_uint plane,
cl_int* errcode_ret);
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueAcquireDX9MediaSurfacesKHR_fn)(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_2;
typedef clCreateFromDX9MediaSurfaceKHR_t *
clCreateFromDX9MediaSurfaceKHR_fn CL_API_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueReleaseDX9MediaSurfacesKHR_fn)(
typedef cl_int CL_API_CALL
clEnqueueAcquireDX9MediaSurfacesKHR_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_2;
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueAcquireDX9MediaSurfacesKHR_t *
clEnqueueAcquireDX9MediaSurfacesKHR_fn CL_API_SUFFIX__VERSION_1_2;
typedef cl_int CL_API_CALL
clEnqueueReleaseDX9MediaSurfacesKHR_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueReleaseDX9MediaSurfacesKHR_t *
clEnqueueReleaseDX9MediaSurfacesKHR_fn CL_API_SUFFIX__VERSION_1_2;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetDeviceIDsFromDX9MediaAdapterKHR(
cl_platform_id platform,
cl_uint num_media_adapters,
cl_dx9_media_adapter_type_khr* media_adapter_type,
void* media_adapters,
cl_dx9_media_adapter_set_khr media_adapter_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromDX9MediaSurfaceKHR(
cl_context context,
cl_mem_flags flags,
cl_dx9_media_adapter_type_khr adapter_type,
void* surface_info,
cl_uint plane,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireDX9MediaSurfacesKHR(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseDX9MediaSurfacesKHR(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_2;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/***************************************************************
* cl_intel_dx9_media_sharing
***************************************************************/
#define cl_intel_dx9_media_sharing 1
#define CL_INTEL_DX9_MEDIA_SHARING_EXTENSION_NAME \
"cl_intel_dx9_media_sharing"
typedef cl_uint cl_dx9_device_source_intel;
typedef cl_uint cl_dx9_device_set_intel;
/* Error codes */
#define CL_INVALID_DX9_DEVICE_INTEL -1010
#define CL_INVALID_DX9_RESOURCE_INTEL -1011
#define CL_DX9_RESOURCE_ALREADY_ACQUIRED_INTEL -1012
#define CL_DX9_RESOURCE_NOT_ACQUIRED_INTEL -1013
/* cl_dx9_device_source_intel */
#define CL_D3D9_DEVICE_INTEL 0x4022
#define CL_D3D9EX_DEVICE_INTEL 0x4070
#define CL_DXVA_DEVICE_INTEL 0x4071
/* cl_dx9_device_set_intel */
#define CL_PREFERRED_DEVICES_FOR_DX9_INTEL 0x4024
#define CL_ALL_DEVICES_FOR_DX9_INTEL 0x4025
/* cl_context_info */
#define CL_CONTEXT_D3D9_DEVICE_INTEL 0x4026
#define CL_CONTEXT_D3D9EX_DEVICE_INTEL 0x4072
#define CL_CONTEXT_DXVA_DEVICE_INTEL 0x4073
/* cl_mem_info */
#define CL_MEM_DX9_RESOURCE_INTEL 0x4027
#define CL_MEM_DX9_SHARED_HANDLE_INTEL 0x4074
/* cl_image_info */
#define CL_IMAGE_DX9_PLANE_INTEL 0x4075
/* cl_command_type */
#define CL_COMMAND_ACQUIRE_DX9_OBJECTS_INTEL 0x402A
#define CL_COMMAND_RELEASE_DX9_OBJECTS_INTEL 0x402B
typedef cl_int CL_API_CALL
clGetDeviceIDsFromDX9INTEL_t(
cl_platform_id platform,
cl_dx9_device_source_intel dx9_device_source,
void* dx9_object,
cl_dx9_device_set_intel dx9_device_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices);
typedef clGetDeviceIDsFromDX9INTEL_t *
clGetDeviceIDsFromDX9INTEL_fn CL_API_SUFFIX__VERSION_1_1;
typedef cl_mem CL_API_CALL
clCreateFromDX9MediaSurfaceINTEL_t(
cl_context context,
cl_mem_flags flags,
IDirect3DSurface9* resource,
HANDLE sharedHandle,
UINT plane,
cl_int* errcode_ret);
typedef clCreateFromDX9MediaSurfaceINTEL_t *
clCreateFromDX9MediaSurfaceINTEL_fn CL_API_SUFFIX__VERSION_1_1;
typedef cl_int CL_API_CALL
clEnqueueAcquireDX9ObjectsINTEL_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueAcquireDX9ObjectsINTEL_t *
clEnqueueAcquireDX9ObjectsINTEL_fn CL_API_SUFFIX__VERSION_1_1;
typedef cl_int CL_API_CALL
clEnqueueReleaseDX9ObjectsINTEL_t(
cl_command_queue command_queue,
cl_uint num_objects,
cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueReleaseDX9ObjectsINTEL_t *
clEnqueueReleaseDX9ObjectsINTEL_fn CL_API_SUFFIX__VERSION_1_1;
#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetDeviceIDsFromDX9INTEL(
cl_platform_id platform,
cl_dx9_device_source_intel dx9_device_source,
void* dx9_object,
cl_dx9_device_set_intel dx9_device_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromDX9MediaSurfaceINTEL(
cl_context context,
cl_mem_flags flags,
IDirect3DSurface9* resource,
HANDLE sharedHandle,
UINT plane,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireDX9ObjectsINTEL(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseDX9ObjectsINTEL(
cl_command_queue command_queue,
cl_uint num_objects,
cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_1;
#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/***************************************************************
* cl_intel_sharing_format_query_dx9
***************************************************************/
#define cl_intel_sharing_format_query_dx9 1
#define CL_INTEL_SHARING_FORMAT_QUERY_DX9_EXTENSION_NAME \
"cl_intel_sharing_format_query_dx9"
/* when cl_khr_dx9_media_sharing or cl_intel_dx9_media_sharing is supported */
typedef cl_int CL_API_CALL
clGetSupportedDX9MediaSurfaceFormatsINTEL_t(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint plane,
cl_uint num_entries,
D3DFORMAT* dx9_formats,
cl_uint* num_surface_formats);
typedef clGetSupportedDX9MediaSurfaceFormatsINTEL_t *
clGetSupportedDX9MediaSurfaceFormatsINTEL_fn ;
#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetSupportedDX9MediaSurfaceFormatsINTEL(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint plane,
cl_uint num_entries,
D3DFORMAT* dx9_formats,
cl_uint* num_surface_formats) ;
#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_DX9_MEDIA_SHARING_H */
#endif /* OPENCL_CL_DX9_MEDIA_SHARING_H_ */

View file

@ -13,158 +13,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
/*****************************************************************************\
Copyright (c) 2013-2019 Intel Corporation All Rights Reserved.
THESE MATERIALS ARE PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR ITS
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THESE
MATERIALS, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
File Name: cl_dx9_media_sharing_intel.h
Abstract:
Notes:
\*****************************************************************************/
#ifndef __OPENCL_CL_DX9_MEDIA_SHARING_INTEL_H
#define __OPENCL_CL_DX9_MEDIA_SHARING_INTEL_H
#include <CL/cl.h>
#include <CL/cl_platform.h>
#include <d3d9.h>
#include <dxvahd.h>
#include <wtypes.h>
#include <d3d9types.h>
#ifdef __cplusplus
extern "C" {
#endif
/***************************************
* cl_intel_dx9_media_sharing extension *
****************************************/
#define cl_intel_dx9_media_sharing 1
typedef cl_uint cl_dx9_device_source_intel;
typedef cl_uint cl_dx9_device_set_intel;
/* error codes */
#define CL_INVALID_DX9_DEVICE_INTEL -1010
#define CL_INVALID_DX9_RESOURCE_INTEL -1011
#define CL_DX9_RESOURCE_ALREADY_ACQUIRED_INTEL -1012
#define CL_DX9_RESOURCE_NOT_ACQUIRED_INTEL -1013
/* cl_dx9_device_source_intel */
#define CL_D3D9_DEVICE_INTEL 0x4022
#define CL_D3D9EX_DEVICE_INTEL 0x4070
#define CL_DXVA_DEVICE_INTEL 0x4071
/* cl_dx9_device_set_intel */
#define CL_PREFERRED_DEVICES_FOR_DX9_INTEL 0x4024
#define CL_ALL_DEVICES_FOR_DX9_INTEL 0x4025
/* cl_context_info */
#define CL_CONTEXT_D3D9_DEVICE_INTEL 0x4026
#define CL_CONTEXT_D3D9EX_DEVICE_INTEL 0x4072
#define CL_CONTEXT_DXVA_DEVICE_INTEL 0x4073
/* cl_mem_info */
#define CL_MEM_DX9_RESOURCE_INTEL 0x4027
#define CL_MEM_DX9_SHARED_HANDLE_INTEL 0x4074
/* cl_image_info */
#define CL_IMAGE_DX9_PLANE_INTEL 0x4075
/* cl_command_type */
#define CL_COMMAND_ACQUIRE_DX9_OBJECTS_INTEL 0x402A
#define CL_COMMAND_RELEASE_DX9_OBJECTS_INTEL 0x402B
/******************************************************************************/
extern CL_API_ENTRY cl_int CL_API_CALL
clGetDeviceIDsFromDX9INTEL(
cl_platform_id platform,
cl_dx9_device_source_intel dx9_device_source,
void *dx9_object,
cl_dx9_device_set_intel dx9_device_set,
cl_uint num_entries,
cl_device_id *devices,
cl_uint *num_devices) CL_EXT_SUFFIX__VERSION_1_1;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clGetDeviceIDsFromDX9INTEL_fn)(
cl_platform_id platform,
cl_dx9_device_source_intel dx9_device_source,
void *dx9_object,
cl_dx9_device_set_intel dx9_device_set,
cl_uint num_entries,
cl_device_id *devices,
cl_uint *num_devices) CL_EXT_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromDX9MediaSurfaceINTEL(
cl_context context,
cl_mem_flags flags,
IDirect3DSurface9 *resource,
HANDLE sharedHandle,
UINT plane,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_1;
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromDX9MediaSurfaceINTEL_fn)(
cl_context context,
cl_mem_flags flags,
IDirect3DSurface9 *resource,
HANDLE sharedHandle,
UINT plane,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireDX9ObjectsINTEL(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_EXT_SUFFIX__VERSION_1_1;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueAcquireDX9ObjectsINTEL_fn)(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_EXT_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseDX9ObjectsINTEL(
cl_command_queue command_queue,
cl_uint num_objects,
cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_EXT_SUFFIX__VERSION_1_1;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueReleaseDX9ObjectsINTEL_fn)(
cl_command_queue command_queue,
cl_uint num_objects,
cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_EXT_SUFFIX__VERSION_1_1;
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_DX9_MEDIA_SHARING_INTEL_H */
#include <CL/cl_dx9_media_sharing.h>
#pragma message("The Intel DX9 media sharing extensions have been moved into cl_dx9_media_sharing.h. Please include cl_dx9_media_sharing.h directly.")

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2008-2020 The Khronos Group Inc.
* Copyright (c) 2008-2023 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,107 +14,166 @@
* limitations under the License.
******************************************************************************/
#ifndef __OPENCL_CL_EGL_H
#define __OPENCL_CL_EGL_H
#ifndef OPENCL_CL_EGL_H_
#define OPENCL_CL_EGL_H_
/*
** This header is generated from the Khronos OpenCL XML API Registry.
*/
#include <CL/cl.h>
/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES)
#define CL_NO_EXTENSION_PROTOTYPES
#endif
/* CL_NO_EXTENSION_PROTOTYPES implies
CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and
CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#ifdef __cplusplus
extern "C" {
#endif
/***************************************************************
* cl_khr_egl_image
***************************************************************/
#define cl_khr_egl_image 1
#define CL_KHR_EGL_IMAGE_EXTENSION_NAME \
"cl_khr_egl_image"
/* Command type for events created with clEnqueueAcquireEGLObjectsKHR */
#define CL_COMMAND_EGL_FENCE_SYNC_OBJECT_KHR 0x202F
#define CL_COMMAND_ACQUIRE_EGL_OBJECTS_KHR 0x202D
#define CL_COMMAND_RELEASE_EGL_OBJECTS_KHR 0x202E
#define CL_COMMAND_EGL_FENCE_SYNC_OBJECT_KHR 0x202F
#define CL_COMMAND_ACQUIRE_EGL_OBJECTS_KHR 0x202D
#define CL_COMMAND_RELEASE_EGL_OBJECTS_KHR 0x202E
/* Error type for clCreateFromEGLImageKHR */
#define CL_INVALID_EGL_OBJECT_KHR -1093
#define CL_EGL_RESOURCE_NOT_ACQUIRED_KHR -1092
#define CL_INVALID_EGL_OBJECT_KHR -1093
#define CL_EGL_RESOURCE_NOT_ACQUIRED_KHR -1092
/* CLeglImageKHR is an opaque handle to an EGLImage */
typedef void *CLeglImageKHR;
typedef void* CLeglImageKHR;
/* CLeglDisplayKHR is an opaque handle to an EGLDisplay */
typedef void *CLeglDisplayKHR;
/* CLeglSyncKHR is an opaque handle to an EGLSync object */
typedef void *CLeglSyncKHR;
typedef void* CLeglDisplayKHR;
/* properties passed to clCreateFromEGLImageKHR */
typedef intptr_t cl_egl_image_properties_khr;
typedef intptr_t cl_egl_image_properties_khr;
#define cl_khr_egl_image 1
typedef cl_mem CL_API_CALL
clCreateFromEGLImageKHR_t(
cl_context context,
CLeglDisplayKHR egldisplay,
CLeglImageKHR eglimage,
cl_mem_flags flags,
const cl_egl_image_properties_khr* properties,
cl_int* errcode_ret);
typedef clCreateFromEGLImageKHR_t *
clCreateFromEGLImageKHR_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_int CL_API_CALL
clEnqueueAcquireEGLObjectsKHR_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueAcquireEGLObjectsKHR_t *
clEnqueueAcquireEGLObjectsKHR_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_int CL_API_CALL
clEnqueueReleaseEGLObjectsKHR_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueReleaseEGLObjectsKHR_t *
clEnqueueReleaseEGLObjectsKHR_fn CL_API_SUFFIX__VERSION_1_0;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromEGLImageKHR(cl_context context,
CLeglDisplayKHR egldisplay,
CLeglImageKHR eglimage,
cl_mem_flags flags,
const cl_egl_image_properties_khr *properties,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromEGLImageKHR_fn)(
cl_context context,
CLeglDisplayKHR egldisplay,
CLeglImageKHR eglimage,
cl_mem_flags flags,
const cl_egl_image_properties_khr *properties,
cl_int *errcode_ret);
clCreateFromEGLImageKHR(
cl_context context,
CLeglDisplayKHR egldisplay,
CLeglImageKHR eglimage,
cl_mem_flags flags,
const cl_egl_image_properties_khr* properties,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireEGLObjectsKHR(cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueAcquireEGLObjectsKHR_fn)(
clEnqueueAcquireEGLObjectsKHR(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseEGLObjectsKHR(cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueReleaseEGLObjectsKHR_fn)(
clEnqueueReleaseEGLObjectsKHR(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/***************************************************************
* cl_khr_egl_event
***************************************************************/
#define cl_khr_egl_event 1
#define CL_KHR_EGL_EVENT_EXTENSION_NAME \
"cl_khr_egl_event"
/* CLeglDisplayKHR is an opaque handle to an EGLDisplay */
/* type CLeglDisplayKHR */
/* CLeglSyncKHR is an opaque handle to an EGLSync object */
typedef void* CLeglSyncKHR;
typedef cl_event CL_API_CALL
clCreateEventFromEGLSyncKHR_t(
cl_context context,
CLeglSyncKHR sync,
CLeglDisplayKHR display,
cl_int* errcode_ret);
typedef clCreateEventFromEGLSyncKHR_t *
clCreateEventFromEGLSyncKHR_fn CL_API_SUFFIX__VERSION_1_0;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_event CL_API_CALL
clCreateEventFromEGLSyncKHR(cl_context context,
CLeglSyncKHR sync,
CLeglDisplayKHR display,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_event(CL_API_CALL *clCreateEventFromEGLSyncKHR_fn)(
cl_context context,
CLeglSyncKHR sync,
clCreateEventFromEGLSyncKHR(
cl_context context,
CLeglSyncKHR sync,
CLeglDisplayKHR display,
cl_int *errcode_ret);
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_EGL_H */
#endif /* OPENCL_CL_EGL_H_ */

File diff suppressed because it is too large Load diff

View file

@ -14,718 +14,6 @@
* limitations under the License.
*
******************************************************************************/
/*****************************************************************************\
Copyright (c) 2013-2020 Intel Corporation All Rights Reserved.
THESE MATERIALS ARE PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR ITS
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THESE
MATERIALS, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
File Name: cl_ext_intel.h
Abstract:
Notes:
\*****************************************************************************/
#ifndef __CL_EXT_INTEL_H
#define __CL_EXT_INTEL_H
#include <CL/cl.h>
#include <CL/cl_platform.h>
#ifdef __cplusplus
extern "C" {
#endif
/***************************************
* cl_intel_thread_local_exec extension *
****************************************/
#define cl_intel_thread_local_exec 1
#define CL_QUEUE_THREAD_LOCAL_EXEC_ENABLE_INTEL (((cl_bitfield)1) << 31)
/***********************************************
* cl_intel_device_partition_by_names extension *
************************************************/
#define cl_intel_device_partition_by_names 1
#define CL_DEVICE_PARTITION_BY_NAMES_INTEL 0x4052
#define CL_PARTITION_BY_NAMES_LIST_END_INTEL -1
/************************************************
* cl_intel_accelerator extension *
* cl_intel_motion_estimation extension *
* cl_intel_advanced_motion_estimation extension *
*************************************************/
#define cl_intel_accelerator 1
#define cl_intel_motion_estimation 1
#define cl_intel_advanced_motion_estimation 1
typedef struct _cl_accelerator_intel *cl_accelerator_intel;
typedef cl_uint cl_accelerator_type_intel;
typedef cl_uint cl_accelerator_info_intel;
typedef struct _cl_motion_estimation_desc_intel {
cl_uint mb_block_type;
cl_uint subpixel_mode;
cl_uint sad_adjust_mode;
cl_uint search_path_type;
} cl_motion_estimation_desc_intel;
/* error codes */
#define CL_INVALID_ACCELERATOR_INTEL -1094
#define CL_INVALID_ACCELERATOR_TYPE_INTEL -1095
#define CL_INVALID_ACCELERATOR_DESCRIPTOR_INTEL -1096
#define CL_ACCELERATOR_TYPE_NOT_SUPPORTED_INTEL -1097
/* cl_accelerator_type_intel */
#define CL_ACCELERATOR_TYPE_MOTION_ESTIMATION_INTEL 0x0
/* cl_accelerator_info_intel */
#define CL_ACCELERATOR_DESCRIPTOR_INTEL 0x4090
#define CL_ACCELERATOR_REFERENCE_COUNT_INTEL 0x4091
#define CL_ACCELERATOR_CONTEXT_INTEL 0x4092
#define CL_ACCELERATOR_TYPE_INTEL 0x4093
/* cl_motion_detect_desc_intel flags */
#define CL_ME_MB_TYPE_16x16_INTEL 0x0
#define CL_ME_MB_TYPE_8x8_INTEL 0x1
#define CL_ME_MB_TYPE_4x4_INTEL 0x2
#define CL_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
#define CL_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
#define CL_ME_SUBPIXEL_MODE_QPEL_INTEL 0x2
#define CL_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
#define CL_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x1
#define CL_ME_SEARCH_PATH_RADIUS_2_2_INTEL 0x0
#define CL_ME_SEARCH_PATH_RADIUS_4_4_INTEL 0x1
#define CL_ME_SEARCH_PATH_RADIUS_16_12_INTEL 0x5
#define CL_ME_SKIP_BLOCK_TYPE_16x16_INTEL 0x0
#define CL_ME_CHROMA_INTRA_PREDICT_ENABLED_INTEL 0x1
#define CL_ME_LUMA_INTRA_PREDICT_ENABLED_INTEL 0x2
#define CL_ME_SKIP_BLOCK_TYPE_8x8_INTEL 0x4
#define CL_ME_FORWARD_INPUT_MODE_INTEL 0x1
#define CL_ME_BACKWARD_INPUT_MODE_INTEL 0x2
#define CL_ME_BIDIRECTION_INPUT_MODE_INTEL 0x3
#define CL_ME_BIDIR_WEIGHT_QUARTER_INTEL 16
#define CL_ME_BIDIR_WEIGHT_THIRD_INTEL 21
#define CL_ME_BIDIR_WEIGHT_HALF_INTEL 32
#define CL_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 43
#define CL_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 48
#define CL_ME_COST_PENALTY_NONE_INTEL 0x0
#define CL_ME_COST_PENALTY_LOW_INTEL 0x1
#define CL_ME_COST_PENALTY_NORMAL_INTEL 0x2
#define CL_ME_COST_PENALTY_HIGH_INTEL 0x3
#define CL_ME_COST_PRECISION_QPEL_INTEL 0x0
#define CL_ME_COST_PRECISION_HPEL_INTEL 0x1
#define CL_ME_COST_PRECISION_PEL_INTEL 0x2
#define CL_ME_COST_PRECISION_DPEL_INTEL 0x3
#define CL_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
#define CL_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
#define CL_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
#define CL_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
#define CL_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
#define CL_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
#define CL_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
#define CL_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
#define CL_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
#define CL_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
#define CL_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
#define CL_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
#define CL_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
#define CL_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
/* cl_device_info */
#define CL_DEVICE_ME_VERSION_INTEL 0x407E
#define CL_ME_VERSION_LEGACY_INTEL 0x0
#define CL_ME_VERSION_ADVANCED_VER_1_INTEL 0x1
#define CL_ME_VERSION_ADVANCED_VER_2_INTEL 0x2
extern CL_API_ENTRY cl_accelerator_intel CL_API_CALL
clCreateAcceleratorINTEL(
cl_context context,
cl_accelerator_type_intel accelerator_type,
size_t descriptor_size,
const void *descriptor,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_accelerator_intel(CL_API_CALL *clCreateAcceleratorINTEL_fn)(
cl_context context,
cl_accelerator_type_intel accelerator_type,
size_t descriptor_size,
const void *descriptor,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetAcceleratorInfoINTEL(
cl_accelerator_intel accelerator,
cl_accelerator_info_intel param_name,
size_t param_value_size,
void *param_value,
size_t *param_value_size_ret) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clGetAcceleratorInfoINTEL_fn)(
cl_accelerator_intel accelerator,
cl_accelerator_info_intel param_name,
size_t param_value_size,
void *param_value,
size_t *param_value_size_ret) CL_EXT_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainAcceleratorINTEL(
cl_accelerator_intel accelerator) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clRetainAcceleratorINTEL_fn)(
cl_accelerator_intel accelerator) CL_EXT_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseAcceleratorINTEL(
cl_accelerator_intel accelerator) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clReleaseAcceleratorINTEL_fn)(
cl_accelerator_intel accelerator) CL_EXT_SUFFIX__VERSION_1_2;
/******************************************
* cl_intel_simultaneous_sharing extension *
*******************************************/
#define cl_intel_simultaneous_sharing 1
#define CL_DEVICE_SIMULTANEOUS_INTEROPS_INTEL 0x4104
#define CL_DEVICE_NUM_SIMULTANEOUS_INTEROPS_INTEL 0x4105
/***********************************
* cl_intel_egl_image_yuv extension *
************************************/
#define cl_intel_egl_image_yuv 1
#define CL_EGL_YUV_PLANE_INTEL 0x4107
/********************************
* cl_intel_packed_yuv extension *
*********************************/
#define cl_intel_packed_yuv 1
#define CL_YUYV_INTEL 0x4076
#define CL_UYVY_INTEL 0x4077
#define CL_YVYU_INTEL 0x4078
#define CL_VYUY_INTEL 0x4079
/********************************************
* cl_intel_required_subgroup_size extension *
*********************************************/
#define cl_intel_required_subgroup_size 1
#define CL_DEVICE_SUB_GROUP_SIZES_INTEL 0x4108
#define CL_KERNEL_SPILL_MEM_SIZE_INTEL 0x4109
#define CL_KERNEL_COMPILE_SUB_GROUP_SIZE_INTEL 0x410A
/****************************************
* cl_intel_driver_diagnostics extension *
*****************************************/
#define cl_intel_driver_diagnostics 1
typedef cl_uint cl_diagnostics_verbose_level;
#define CL_CONTEXT_SHOW_DIAGNOSTICS_INTEL 0x4106
#define CL_CONTEXT_DIAGNOSTICS_LEVEL_ALL_INTEL ( 0xff )
#define CL_CONTEXT_DIAGNOSTICS_LEVEL_GOOD_INTEL ( 1 )
#define CL_CONTEXT_DIAGNOSTICS_LEVEL_BAD_INTEL ( 1 << 1 )
#define CL_CONTEXT_DIAGNOSTICS_LEVEL_NEUTRAL_INTEL ( 1 << 2 )
/********************************
* cl_intel_planar_yuv extension *
*********************************/
#define CL_NV12_INTEL 0x410E
#define CL_MEM_NO_ACCESS_INTEL ( 1 << 24 )
#define CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL ( 1 << 25 )
#define CL_DEVICE_PLANAR_YUV_MAX_WIDTH_INTEL 0x417E
#define CL_DEVICE_PLANAR_YUV_MAX_HEIGHT_INTEL 0x417F
/*******************************************************
* cl_intel_device_side_avc_motion_estimation extension *
********************************************************/
#define CL_DEVICE_AVC_ME_VERSION_INTEL 0x410B
#define CL_DEVICE_AVC_ME_SUPPORTS_TEXTURE_SAMPLER_USE_INTEL 0x410C
#define CL_DEVICE_AVC_ME_SUPPORTS_PREEMPTION_INTEL 0x410D
#define CL_AVC_ME_VERSION_0_INTEL 0x0 /* No support. */
#define CL_AVC_ME_VERSION_1_INTEL 0x1 /* First supported version. */
#define CL_AVC_ME_MAJOR_16x16_INTEL 0x0
#define CL_AVC_ME_MAJOR_16x8_INTEL 0x1
#define CL_AVC_ME_MAJOR_8x16_INTEL 0x2
#define CL_AVC_ME_MAJOR_8x8_INTEL 0x3
#define CL_AVC_ME_MINOR_8x8_INTEL 0x0
#define CL_AVC_ME_MINOR_8x4_INTEL 0x1
#define CL_AVC_ME_MINOR_4x8_INTEL 0x2
#define CL_AVC_ME_MINOR_4x4_INTEL 0x3
#define CL_AVC_ME_MAJOR_FORWARD_INTEL 0x0
#define CL_AVC_ME_MAJOR_BACKWARD_INTEL 0x1
#define CL_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2
#define CL_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0
#define CL_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E
#define CL_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D
#define CL_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B
#define CL_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77
#define CL_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F
#define CL_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F
#define CL_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F
#define CL_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0
#define CL_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1
#define CL_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2
#define CL_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3
#define CL_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4
#define CL_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5
#define CL_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6
#define CL_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7
#define CL_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8
#define CL_AVC_ME_SEARCH_WINDOW_16x12_RADIUS_INTEL 0x9
#define CL_AVC_ME_SEARCH_WINDOW_4x4_RADIUS_INTEL 0x2
#define CL_AVC_ME_SEARCH_WINDOW_2x2_RADIUS_INTEL 0xa
#define CL_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
#define CL_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2
#define CL_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
#define CL_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
#define CL_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3
#define CL_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0
#define CL_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1
#define CL_AVC_ME_COST_PRECISION_PEL_INTEL 0x2
#define CL_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3
#define CL_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10
#define CL_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15
#define CL_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20
#define CL_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B
#define CL_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30
#define CL_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0
#define CL_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2
#define CL_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4
#define CL_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8
#define CL_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0
#define CL_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000
#define CL_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL ( 0x1 << 24 )
#define CL_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL ( 0x2 << 24 )
#define CL_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL ( 0x3 << 24 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL ( 0x55 << 24 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL ( 0xAA << 24 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL ( 0xFF << 24 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL ( 0x1 << 24 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL ( 0x2 << 24 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL ( 0x1 << 26 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL ( 0x2 << 26 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL ( 0x1 << 28 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL ( 0x2 << 28 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL ( 0x1 << 30 )
#define CL_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL ( 0x2 << 30 )
#define CL_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00
#define CL_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80
#define CL_AVC_ME_INTRA_16x16_INTEL 0x0
#define CL_AVC_ME_INTRA_8x8_INTEL 0x1
#define CL_AVC_ME_INTRA_4x4_INTEL 0x2
#define CL_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6
#define CL_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5
#define CL_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3
#define CL_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60
#define CL_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10
#define CL_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8
#define CL_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
#define CL_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
#define CL_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
#define CL_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
#define CL_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
#define CL_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
#define CL_AVC_ME_FRAME_FORWARD_INTEL 0x1
#define CL_AVC_ME_FRAME_BACKWARD_INTEL 0x2
#define CL_AVC_ME_FRAME_DUAL_INTEL 0x3
#define CL_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0
#define CL_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1
#define CL_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2
#define CL_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0
#define CL_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1
/*******************************************
* cl_intel_unified_shared_memory extension *
********************************************/
/* These APIs are in sync with Revision Q of the cl_intel_unified_shared_memory spec! */
#define cl_intel_unified_shared_memory 1
/* cl_device_info */
#define CL_DEVICE_HOST_MEM_CAPABILITIES_INTEL 0x4190
#define CL_DEVICE_DEVICE_MEM_CAPABILITIES_INTEL 0x4191
#define CL_DEVICE_SINGLE_DEVICE_SHARED_MEM_CAPABILITIES_INTEL 0x4192
#define CL_DEVICE_CROSS_DEVICE_SHARED_MEM_CAPABILITIES_INTEL 0x4193
#define CL_DEVICE_SHARED_SYSTEM_MEM_CAPABILITIES_INTEL 0x4194
typedef cl_bitfield cl_device_unified_shared_memory_capabilities_intel;
/* cl_device_unified_shared_memory_capabilities_intel - bitfield */
#define CL_UNIFIED_SHARED_MEMORY_ACCESS_INTEL (1 << 0)
#define CL_UNIFIED_SHARED_MEMORY_ATOMIC_ACCESS_INTEL (1 << 1)
#define CL_UNIFIED_SHARED_MEMORY_CONCURRENT_ACCESS_INTEL (1 << 2)
#define CL_UNIFIED_SHARED_MEMORY_CONCURRENT_ATOMIC_ACCESS_INTEL (1 << 3)
typedef cl_properties cl_mem_properties_intel;
/* cl_mem_properties_intel */
#define CL_MEM_ALLOC_FLAGS_INTEL 0x4195
typedef cl_bitfield cl_mem_alloc_flags_intel;
/* cl_mem_alloc_flags_intel - bitfield */
#define CL_MEM_ALLOC_WRITE_COMBINED_INTEL (1 << 0)
typedef cl_uint cl_mem_info_intel;
/* cl_mem_alloc_info_intel */
#define CL_MEM_ALLOC_TYPE_INTEL 0x419A
#define CL_MEM_ALLOC_BASE_PTR_INTEL 0x419B
#define CL_MEM_ALLOC_SIZE_INTEL 0x419C
#define CL_MEM_ALLOC_DEVICE_INTEL 0x419D
/* Enum values 0x419E-0x419F are reserved for future queries. */
typedef cl_uint cl_unified_shared_memory_type_intel;
/* cl_unified_shared_memory_type_intel */
#define CL_MEM_TYPE_UNKNOWN_INTEL 0x4196
#define CL_MEM_TYPE_HOST_INTEL 0x4197
#define CL_MEM_TYPE_DEVICE_INTEL 0x4198
#define CL_MEM_TYPE_SHARED_INTEL 0x4199
typedef cl_uint cl_mem_advice_intel;
/* cl_mem_advice_intel */
/* Enum values 0x4208-0x420F are reserved for future memory advices. */
/* cl_kernel_exec_info */
#define CL_KERNEL_EXEC_INFO_INDIRECT_HOST_ACCESS_INTEL 0x4200
#define CL_KERNEL_EXEC_INFO_INDIRECT_DEVICE_ACCESS_INTEL 0x4201
#define CL_KERNEL_EXEC_INFO_INDIRECT_SHARED_ACCESS_INTEL 0x4202
#define CL_KERNEL_EXEC_INFO_USM_PTRS_INTEL 0x4203
/* cl_command_type */
#define CL_COMMAND_MEMFILL_INTEL 0x4204
#define CL_COMMAND_MEMCPY_INTEL 0x4205
#define CL_COMMAND_MIGRATEMEM_INTEL 0x4206
#define CL_COMMAND_MEMADVISE_INTEL 0x4207
extern CL_API_ENTRY void *CL_API_CALL
clHostMemAllocINTEL(
cl_context context,
const cl_mem_properties_intel *properties,
size_t size,
cl_uint alignment,
cl_int *errcode_ret);
typedef CL_API_ENTRY void *(CL_API_CALL *
clHostMemAllocINTEL_fn)(
cl_context context,
const cl_mem_properties_intel *properties,
size_t size,
cl_uint alignment,
cl_int *errcode_ret);
extern CL_API_ENTRY void *CL_API_CALL
clDeviceMemAllocINTEL(
cl_context context,
cl_device_id device,
const cl_mem_properties_intel *properties,
size_t size,
cl_uint alignment,
cl_int *errcode_ret);
typedef CL_API_ENTRY void *(CL_API_CALL *
clDeviceMemAllocINTEL_fn)(
cl_context context,
cl_device_id device,
const cl_mem_properties_intel *properties,
size_t size,
cl_uint alignment,
cl_int *errcode_ret);
extern CL_API_ENTRY void *CL_API_CALL
clSharedMemAllocINTEL(
cl_context context,
cl_device_id device,
const cl_mem_properties_intel *properties,
size_t size,
cl_uint alignment,
cl_int *errcode_ret);
typedef CL_API_ENTRY void *(CL_API_CALL *
clSharedMemAllocINTEL_fn)(
cl_context context,
cl_device_id device,
const cl_mem_properties_intel *properties,
size_t size,
cl_uint alignment,
cl_int *errcode_ret);
extern CL_API_ENTRY cl_int CL_API_CALL
clMemFreeINTEL(
cl_context context,
void *ptr);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clMemFreeINTEL_fn)(
cl_context context,
void *ptr);
extern CL_API_ENTRY cl_int CL_API_CALL
clMemBlockingFreeINTEL(
cl_context context,
void *ptr);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clMemBlockingFreeINTEL_fn)(
cl_context context,
void *ptr);
extern CL_API_ENTRY cl_int CL_API_CALL
clGetMemAllocInfoINTEL(
cl_context context,
const void *ptr,
cl_mem_info_intel param_name,
size_t param_value_size,
void *param_value,
size_t *param_value_size_ret);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clGetMemAllocInfoINTEL_fn)(
cl_context context,
const void *ptr,
cl_mem_info_intel param_name,
size_t param_value_size,
void *param_value,
size_t *param_value_size_ret);
extern CL_API_ENTRY cl_int CL_API_CALL
clSetKernelArgMemPointerINTEL(
cl_kernel kernel,
cl_uint arg_index,
const void *arg_value);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clSetKernelArgMemPointerINTEL_fn)(
cl_kernel kernel,
cl_uint arg_index,
const void *arg_value);
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueMemsetINTEL( /* Deprecated */
cl_command_queue command_queue,
void *dst_ptr,
cl_int value,
size_t size,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clEnqueueMemsetINTEL_fn)( /* Deprecated */
cl_command_queue command_queue,
void *dst_ptr,
cl_int value,
size_t size,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueMemFillINTEL(
cl_command_queue command_queue,
void *dst_ptr,
const void *pattern,
size_t pattern_size,
size_t size,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clEnqueueMemFillINTEL_fn)(
cl_command_queue command_queue,
void *dst_ptr,
const void *pattern,
size_t pattern_size,
size_t size,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueMemcpyINTEL(
cl_command_queue command_queue,
cl_bool blocking,
void *dst_ptr,
const void *src_ptr,
size_t size,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clEnqueueMemcpyINTEL_fn)(
cl_command_queue command_queue,
cl_bool blocking,
void *dst_ptr,
const void *src_ptr,
size_t size,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
#ifdef CL_VERSION_1_2
/* Because these APIs use cl_mem_migration_flags, they require
OpenCL 1.2: */
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueMigrateMemINTEL(
cl_command_queue command_queue,
const void *ptr,
size_t size,
cl_mem_migration_flags flags,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clEnqueueMigrateMemINTEL_fn)(
cl_command_queue command_queue,
const void *ptr,
size_t size,
cl_mem_migration_flags flags,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
#endif
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueMemAdviseINTEL(
cl_command_queue command_queue,
const void *ptr,
size_t size,
cl_mem_advice_intel advice,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
typedef CL_API_ENTRY cl_int(CL_API_CALL *
clEnqueueMemAdviseINTEL_fn)(
cl_command_queue command_queue,
const void *ptr,
size_t size,
cl_mem_advice_intel advice,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event);
/***************************************************
* cl_intel_create_buffer_with_properties extension *
****************************************************/
#define cl_intel_create_buffer_with_properties 1
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateBufferWithPropertiesINTEL(
cl_context context,
const cl_mem_properties_intel *properties,
cl_mem_flags flags,
size_t size,
void *host_ptr,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_mem(CL_API_CALL *
clCreateBufferWithPropertiesINTEL_fn)(
cl_context context,
const cl_mem_properties_intel *properties,
cl_mem_flags flags,
size_t size,
void *host_ptr,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_0;
/******************************************
* cl_intel_mem_channel_property extension *
*******************************************/
#define CL_MEM_CHANNEL_INTEL 0x4213
/*********************************
* cl_intel_mem_force_host_memory *
**********************************/
#define cl_intel_mem_force_host_memory 1
/* cl_mem_flags */
#define CL_MEM_FORCE_HOST_MEMORY_INTEL (1 << 20)
#ifdef __cplusplus
}
#endif
#endif /* __CL_EXT_INTEL_H */
#include <CL/cl_ext.h>
#pragma message("The Intel extensions have been moved into cl_ext.h. Please include cl_ext.h directly.")

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2008-2020 The Khronos Group Inc.
* Copyright (c) 2008-2023 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,146 +14,389 @@
* limitations under the License.
******************************************************************************/
#ifndef __OPENCL_CL_GL_H
#define __OPENCL_CL_GL_H
#ifndef OPENCL_CL_GL_H_
#define OPENCL_CL_GL_H_
/*
** This header is generated from the Khronos OpenCL XML API Registry.
*/
#include <CL/cl.h>
/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES)
#define CL_NO_EXTENSION_PROTOTYPES
#endif
/* CL_NO_EXTENSION_PROTOTYPES implies
CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and
CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef cl_uint cl_gl_object_type;
typedef cl_uint cl_gl_texture_info;
typedef cl_uint cl_gl_platform_info;
typedef struct __GLsync *cl_GLsync;
/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken */
#define CL_GL_OBJECT_BUFFER 0x2000
#define CL_GL_OBJECT_TEXTURE2D 0x2001
#define CL_GL_OBJECT_TEXTURE3D 0x2002
#define CL_GL_OBJECT_RENDERBUFFER 0x2003
#ifdef CL_VERSION_1_2
#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E
#define CL_GL_OBJECT_TEXTURE1D 0x200F
#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010
#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011
#endif
/* cl_gl_texture_info */
#define CL_GL_TEXTURE_TARGET 0x2004
#define CL_GL_MIPMAP_LEVEL 0x2005
#ifdef CL_VERSION_1_2
#define CL_GL_NUM_SAMPLES 0x2012
#endif
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLBuffer(cl_context context,
cl_mem_flags flags,
cl_GLuint bufobj,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_0;
#ifdef CL_VERSION_1_2
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLTexture(cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_2;
#endif
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLRenderbuffer(cl_context context,
cl_mem_flags flags,
cl_GLuint renderbuffer,
cl_int *errcode_ret) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetGLObjectInfo(cl_mem memobj,
cl_gl_object_type *gl_object_type,
cl_GLuint *gl_object_name) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetGLTextureInfo(cl_mem memobj,
cl_gl_texture_info param_name,
size_t param_value_size,
void *param_value,
size_t *param_value_size_ret) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireGLObjects(cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseGLObjects(cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_API_SUFFIX__VERSION_1_0;
/* Deprecated OpenCL 1.1 APIs */
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
clCreateFromGLTexture2D(cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
clCreateFromGLTexture3D(cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
/* cl_khr_gl_sharing extension */
/***************************************************************
* cl_khr_gl_sharing
***************************************************************/
#define cl_khr_gl_sharing 1
#define CL_KHR_GL_SHARING_EXTENSION_NAME \
"cl_khr_gl_sharing"
typedef cl_uint cl_gl_context_info;
typedef cl_uint cl_gl_context_info;
/* Additional Error Codes */
#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000
/* Error codes */
#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000
/* cl_gl_context_info */
#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006
#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007
/* cl_gl_context_info */
#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006
#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007
/* Additional cl_context_properties */
#define CL_GL_CONTEXT_KHR 0x2008
#define CL_EGL_DISPLAY_KHR 0x2009
#define CL_GLX_DISPLAY_KHR 0x200A
#define CL_WGL_HDC_KHR 0x200B
#define CL_CGL_SHAREGROUP_KHR 0x200C
/* Additional cl_context_properties */
#define CL_GL_CONTEXT_KHR 0x2008
#define CL_EGL_DISPLAY_KHR 0x2009
#define CL_GLX_DISPLAY_KHR 0x200A
#define CL_WGL_HDC_KHR 0x200B
#define CL_CGL_SHAREGROUP_KHR 0x200C
typedef cl_uint cl_gl_object_type;
typedef cl_uint cl_gl_texture_info;
typedef cl_uint cl_gl_platform_info;
/* cl_gl_object_type */
#define CL_GL_OBJECT_BUFFER 0x2000
#define CL_GL_OBJECT_TEXTURE2D 0x2001
#define CL_GL_OBJECT_TEXTURE3D 0x2002
#define CL_GL_OBJECT_RENDERBUFFER 0x2003
#if defined(CL_VERSION_1_2)
/* cl_gl_object_type */
#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E
#define CL_GL_OBJECT_TEXTURE1D 0x200F
#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010
#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011
#endif /* defined(CL_VERSION_1_2) */
/* cl_gl_texture_info */
#define CL_GL_TEXTURE_TARGET 0x2004
#define CL_GL_MIPMAP_LEVEL 0x2005
typedef cl_int CL_API_CALL
clGetGLContextInfoKHR_t(
const cl_context_properties* properties,
cl_gl_context_info param_name,
size_t param_value_size,
void* param_value,
size_t* param_value_size_ret);
typedef clGetGLContextInfoKHR_t *
clGetGLContextInfoKHR_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_mem CL_API_CALL
clCreateFromGLBuffer_t(
cl_context context,
cl_mem_flags flags,
cl_GLuint bufobj,
cl_int* errcode_ret);
typedef clCreateFromGLBuffer_t *
clCreateFromGLBuffer_fn CL_API_SUFFIX__VERSION_1_0;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetGLContextInfoKHR(const cl_context_properties *properties,
cl_gl_context_info param_name,
size_t param_value_size,
void *param_value,
size_t *param_value_size_ret) CL_API_SUFFIX__VERSION_1_0;
clGetGLContextInfoKHR(
const cl_context_properties* properties,
cl_gl_context_info param_name,
size_t param_value_size,
void* param_value,
size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clGetGLContextInfoKHR_fn)(
const cl_context_properties *properties,
cl_gl_context_info param_name,
size_t param_value_size,
void *param_value,
size_t *param_value_size_ret);
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLBuffer(
cl_context context,
cl_mem_flags flags,
cl_GLuint bufobj,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
#if defined(CL_VERSION_1_2)
typedef cl_mem CL_API_CALL
clCreateFromGLTexture_t(
cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int* errcode_ret);
typedef clCreateFromGLTexture_t *
clCreateFromGLTexture_fn CL_API_SUFFIX__VERSION_1_2;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLTexture(
cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
#endif /* defined(CL_VERSION_1_2) */
typedef cl_mem CL_API_CALL
clCreateFromGLRenderbuffer_t(
cl_context context,
cl_mem_flags flags,
cl_GLuint renderbuffer,
cl_int* errcode_ret);
typedef clCreateFromGLRenderbuffer_t *
clCreateFromGLRenderbuffer_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_int CL_API_CALL
clGetGLObjectInfo_t(
cl_mem memobj,
cl_gl_object_type* gl_object_type,
cl_GLuint* gl_object_name);
typedef clGetGLObjectInfo_t *
clGetGLObjectInfo_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_int CL_API_CALL
clGetGLTextureInfo_t(
cl_mem memobj,
cl_gl_texture_info param_name,
size_t param_value_size,
void* param_value,
size_t* param_value_size_ret);
typedef clGetGLTextureInfo_t *
clGetGLTextureInfo_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_int CL_API_CALL
clEnqueueAcquireGLObjects_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueAcquireGLObjects_t *
clEnqueueAcquireGLObjects_fn CL_API_SUFFIX__VERSION_1_0;
typedef cl_int CL_API_CALL
clEnqueueReleaseGLObjects_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueReleaseGLObjects_t *
clEnqueueReleaseGLObjects_fn CL_API_SUFFIX__VERSION_1_0;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLRenderbuffer(
cl_context context,
cl_mem_flags flags,
cl_GLuint renderbuffer,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetGLObjectInfo(
cl_mem memobj,
cl_gl_object_type* gl_object_type,
cl_GLuint* gl_object_name) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetGLTextureInfo(
cl_mem memobj,
cl_gl_texture_info param_name,
size_t param_value_size,
void* param_value,
size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireGLObjects(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseGLObjects(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/* OpenCL 1.0 APIs that were deprecated in OpenCL 1.2 */
typedef cl_mem CL_API_CALL
clCreateFromGLTexture2D_t(
cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int* errcode_ret);
typedef clCreateFromGLTexture2D_t *
clCreateFromGLTexture2D_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
typedef cl_mem CL_API_CALL
clCreateFromGLTexture3D_t(
cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int* errcode_ret);
typedef clCreateFromGLTexture3D_t *
clCreateFromGLTexture3D_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLTexture2D(
cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLTexture3D(
cl_context context,
cl_mem_flags flags,
cl_GLenum target,
cl_GLint miplevel,
cl_GLuint texture,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/***************************************************************
* cl_khr_gl_event
***************************************************************/
#define cl_khr_gl_event 1
#define CL_KHR_GL_EVENT_EXTENSION_NAME \
"cl_khr_gl_event"
typedef struct __GLsync * cl_GLsync;
/* cl_command_type */
#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D
typedef cl_event CL_API_CALL
clCreateEventFromGLsyncKHR_t(
cl_context context,
cl_GLsync sync,
cl_int* errcode_ret);
typedef clCreateEventFromGLsyncKHR_t *
clCreateEventFromGLsyncKHR_fn CL_API_SUFFIX__VERSION_1_1;
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_event CL_API_CALL
clCreateEventFromGLsyncKHR(
cl_context context,
cl_GLsync sync,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1;
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/***************************************************************
* cl_khr_gl_depth_images
***************************************************************/
#define cl_khr_gl_depth_images 1
#define CL_KHR_GL_DEPTH_IMAGES_EXTENSION_NAME \
"cl_khr_gl_depth_images"
/* cl_channel_order */
#define CL_DEPTH_STENCIL 0x10BE
/* cl_channel_type */
#define CL_UNORM_INT24 0x10DF
/***************************************************************
* cl_khr_gl_msaa_sharing
***************************************************************/
#define cl_khr_gl_msaa_sharing 1
#define CL_KHR_GL_MSAA_SHARING_EXTENSION_NAME \
"cl_khr_gl_msaa_sharing"
/* cl_gl_texture_info */
#define CL_GL_NUM_SAMPLES 0x2012
/***************************************************************
* cl_intel_sharing_format_query_gl
***************************************************************/
#define cl_intel_sharing_format_query_gl 1
#define CL_INTEL_SHARING_FORMAT_QUERY_GL_EXTENSION_NAME \
"cl_intel_sharing_format_query_gl"
/* when cl_khr_gl_sharing is supported */
typedef cl_int CL_API_CALL
clGetSupportedGLTextureFormatsINTEL_t(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint num_entries,
cl_GLenum* gl_formats,
cl_uint* num_texture_formats);
typedef clGetSupportedGLTextureFormatsINTEL_t *
clGetSupportedGLTextureFormatsINTEL_fn ;
#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetSupportedGLTextureFormatsINTEL(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint num_entries,
cl_GLenum* gl_formats,
cl_uint* num_texture_formats) ;
#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_GL_H */
#endif /* OPENCL_CL_GL_H_ */

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2008-2020 The Khronos Group Inc.
* Copyright (c) 2008-2021 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,27 +14,5 @@
* limitations under the License.
******************************************************************************/
#ifndef __OPENCL_CL_GL_EXT_H
#define __OPENCL_CL_GL_EXT_H
#ifdef __cplusplus
extern "C" {
#endif
#include <CL/cl_gl.h>
/*
* cl_khr_gl_event extension
*/
#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D
extern CL_API_ENTRY cl_event CL_API_CALL
clCreateEventFromGLsyncKHR(cl_context context,
cl_GLsync sync,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_1;
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_GL_EXT_H */
#pragma message("The extensions in cl_gl_ext.h have been moved into cl_gl.h. Please include cl_gl.h directly.")

View file

@ -41,11 +41,12 @@ extern "C" {
/**
* Rounding mode used when converting to cl_half.
*/
typedef enum {
CL_HALF_RTE, // round to nearest even
CL_HALF_RTZ, // round towards zero
CL_HALF_RTP, // round towards positive infinity
CL_HALF_RTN, // round towards negative infinity
typedef enum
{
CL_HALF_RTE, // round to nearest even
CL_HALF_RTZ, // round towards zero
CL_HALF_RTP, // round towards positive infinity
CL_HALF_RTN, // round towards negative infinity
} cl_half_rounding_mode;
@ -58,317 +59,372 @@ typedef enum {
* Utility to deal with values that overflow when converting to half precision.
*/
static inline cl_half cl_half_handle_overflow(cl_half_rounding_mode rounding_mode,
uint16_t sign) {
if (rounding_mode == CL_HALF_RTZ) {
// Round overflow towards zero -> largest finite number (preserving sign)
return (sign << 15) | CL_HALF_MAX_FINITE_MAG;
} else if (rounding_mode == CL_HALF_RTP && sign) {
// Round negative overflow towards positive infinity -> most negative finite number
return (1 << 15) | CL_HALF_MAX_FINITE_MAG;
} else if (rounding_mode == CL_HALF_RTN && !sign) {
// Round positive overflow towards negative infinity -> largest finite number
return CL_HALF_MAX_FINITE_MAG;
}
uint16_t sign)
{
if (rounding_mode == CL_HALF_RTZ)
{
// Round overflow towards zero -> largest finite number (preserving sign)
return (sign << 15) | CL_HALF_MAX_FINITE_MAG;
}
else if (rounding_mode == CL_HALF_RTP && sign)
{
// Round negative overflow towards positive infinity -> most negative finite number
return (1 << 15) | CL_HALF_MAX_FINITE_MAG;
}
else if (rounding_mode == CL_HALF_RTN && !sign)
{
// Round positive overflow towards negative infinity -> largest finite number
return CL_HALF_MAX_FINITE_MAG;
}
// Overflow to infinity
return (sign << 15) | CL_HALF_EXP_MASK;
// Overflow to infinity
return (sign << 15) | CL_HALF_EXP_MASK;
}
/*
* Utility to deal with values that underflow when converting to half precision.
*/
static inline cl_half cl_half_handle_underflow(cl_half_rounding_mode rounding_mode,
uint16_t sign) {
if (rounding_mode == CL_HALF_RTP && !sign) {
// Round underflow towards positive infinity -> smallest positive value
return (sign << 15) | 1;
} else if (rounding_mode == CL_HALF_RTN && sign) {
// Round underflow towards negative infinity -> largest negative value
return (sign << 15) | 1;
}
uint16_t sign)
{
if (rounding_mode == CL_HALF_RTP && !sign)
{
// Round underflow towards positive infinity -> smallest positive value
return (sign << 15) | 1;
}
else if (rounding_mode == CL_HALF_RTN && sign)
{
// Round underflow towards negative infinity -> largest negative value
return (sign << 15) | 1;
}
// Flush to zero
return (sign << 15);
// Flush to zero
return (sign << 15);
}
/**
* Convert a cl_float to a cl_half.
*/
static inline cl_half cl_half_from_float(cl_float f, cl_half_rounding_mode rounding_mode) {
// Type-punning to get direct access to underlying bits
union {
cl_float f;
uint32_t i;
} f32;
f32.f = f;
static inline cl_half cl_half_from_float(cl_float f, cl_half_rounding_mode rounding_mode)
{
// Type-punning to get direct access to underlying bits
union
{
cl_float f;
uint32_t i;
} f32;
f32.f = f;
// Extract sign bit
uint16_t sign = f32.i >> 31;
// Extract sign bit
uint16_t sign = f32.i >> 31;
// Extract FP32 exponent and mantissa
uint32_t f_exp = (f32.i >> (CL_FLT_MANT_DIG - 1)) & 0xFF;
uint32_t f_mant = f32.i & ((1 << (CL_FLT_MANT_DIG - 1)) - 1);
// Extract FP32 exponent and mantissa
uint32_t f_exp = (f32.i >> (CL_FLT_MANT_DIG - 1)) & 0xFF;
uint32_t f_mant = f32.i & ((1 << (CL_FLT_MANT_DIG - 1)) - 1);
// Remove FP32 exponent bias
int32_t exp = f_exp - CL_FLT_MAX_EXP + 1;
// Remove FP32 exponent bias
int32_t exp = f_exp - CL_FLT_MAX_EXP + 1;
// Add FP16 exponent bias
uint16_t h_exp = (uint16_t)(exp + CL_HALF_MAX_EXP - 1);
// Add FP16 exponent bias
uint16_t h_exp = (uint16_t)(exp + CL_HALF_MAX_EXP - 1);
// Position of the bit that will become the FP16 mantissa LSB
uint32_t lsb_pos = CL_FLT_MANT_DIG - CL_HALF_MANT_DIG;
// Position of the bit that will become the FP16 mantissa LSB
uint32_t lsb_pos = CL_FLT_MANT_DIG - CL_HALF_MANT_DIG;
// Check for NaN / infinity
if (f_exp == 0xFF) {
if (f_mant) {
// NaN -> propagate mantissa and silence it
uint16_t h_mant = (uint16_t)(f_mant >> lsb_pos);
h_mant |= 0x200;
return (sign << 15) | CL_HALF_EXP_MASK | h_mant;
} else {
// Infinity -> zero mantissa
return (sign << 15) | CL_HALF_EXP_MASK;
}
// Check for NaN / infinity
if (f_exp == 0xFF)
{
if (f_mant)
{
// NaN -> propagate mantissa and silence it
uint16_t h_mant = (uint16_t)(f_mant >> lsb_pos);
h_mant |= 0x200;
return (sign << 15) | CL_HALF_EXP_MASK | h_mant;
}
// Check for zero
if (!f_exp && !f_mant) {
return (sign << 15);
else
{
// Infinity -> zero mantissa
return (sign << 15) | CL_HALF_EXP_MASK;
}
}
// Check for overflow
if (exp >= CL_HALF_MAX_EXP) {
return cl_half_handle_overflow(rounding_mode, sign);
}
// Check for zero
if (!f_exp && !f_mant)
{
return (sign << 15);
}
// Check for underflow
if (exp < (CL_HALF_MIN_EXP - CL_HALF_MANT_DIG - 1)) {
return cl_half_handle_underflow(rounding_mode, sign);
}
// Check for overflow
if (exp >= CL_HALF_MAX_EXP)
{
return cl_half_handle_overflow(rounding_mode, sign);
}
// Check for value that will become denormal
if (exp < -14) {
// Denormal -> include the implicit 1 from the FP32 mantissa
h_exp = 0;
f_mant |= 1 << (CL_FLT_MANT_DIG - 1);
// Check for underflow
if (exp < (CL_HALF_MIN_EXP - CL_HALF_MANT_DIG - 1))
{
return cl_half_handle_underflow(rounding_mode, sign);
}
// Mantissa shift amount depends on exponent
lsb_pos = -exp + (CL_FLT_MANT_DIG - 25);
}
// Check for value that will become denormal
if (exp < -14)
{
// Denormal -> include the implicit 1 from the FP32 mantissa
h_exp = 0;
f_mant |= 1 << (CL_FLT_MANT_DIG - 1);
// Generate FP16 mantissa by shifting FP32 mantissa
uint16_t h_mant = (uint16_t)(f_mant >> lsb_pos);
// Mantissa shift amount depends on exponent
lsb_pos = -exp + (CL_FLT_MANT_DIG - 25);
}
// Check whether we need to round
uint32_t halfway = 1 << (lsb_pos - 1);
uint32_t mask = (halfway << 1) - 1;
switch (rounding_mode) {
case CL_HALF_RTE:
if ((f_mant & mask) > halfway) {
// More than halfway -> round up
h_mant += 1;
} else if ((f_mant & mask) == halfway) {
// Exactly halfway -> round to nearest even
if (h_mant & 0x1)
h_mant += 1;
}
break;
case CL_HALF_RTZ:
// Mantissa has already been truncated -> do nothing
break;
case CL_HALF_RTP:
if ((f_mant & mask) && !sign) {
// Round positive numbers up
h_mant += 1;
}
break;
case CL_HALF_RTN:
if ((f_mant & mask) && sign) {
// Round negative numbers down
h_mant += 1;
}
break;
}
// Generate FP16 mantissa by shifting FP32 mantissa
uint16_t h_mant = (uint16_t)(f_mant >> lsb_pos);
// Check for mantissa overflow
if (h_mant & 0x400) {
h_exp += 1;
h_mant = 0;
}
// Check whether we need to round
uint32_t halfway = 1 << (lsb_pos - 1);
uint32_t mask = (halfway << 1) - 1;
switch (rounding_mode)
{
case CL_HALF_RTE:
if ((f_mant & mask) > halfway)
{
// More than halfway -> round up
h_mant += 1;
}
else if ((f_mant & mask) == halfway)
{
// Exactly halfway -> round to nearest even
if (h_mant & 0x1)
h_mant += 1;
}
break;
case CL_HALF_RTZ:
// Mantissa has already been truncated -> do nothing
break;
case CL_HALF_RTP:
if ((f_mant & mask) && !sign)
{
// Round positive numbers up
h_mant += 1;
}
break;
case CL_HALF_RTN:
if ((f_mant & mask) && sign)
{
// Round negative numbers down
h_mant += 1;
}
break;
}
return (sign << 15) | (h_exp << 10) | h_mant;
// Check for mantissa overflow
if (h_mant & 0x400)
{
h_exp += 1;
h_mant = 0;
}
return (sign << 15) | (h_exp << 10) | h_mant;
}
/**
* Convert a cl_double to a cl_half.
*/
static inline cl_half cl_half_from_double(cl_double d, cl_half_rounding_mode rounding_mode) {
// Type-punning to get direct access to underlying bits
union {
cl_double d;
uint64_t i;
} f64;
f64.d = d;
static inline cl_half cl_half_from_double(cl_double d, cl_half_rounding_mode rounding_mode)
{
// Type-punning to get direct access to underlying bits
union
{
cl_double d;
uint64_t i;
} f64;
f64.d = d;
// Extract sign bit
uint16_t sign = f64.i >> 63;
// Extract sign bit
uint16_t sign = f64.i >> 63;
// Extract FP64 exponent and mantissa
uint64_t d_exp = (f64.i >> (CL_DBL_MANT_DIG - 1)) & 0x7FF;
uint64_t d_mant = f64.i & (((uint64_t)1 << (CL_DBL_MANT_DIG - 1)) - 1);
// Extract FP64 exponent and mantissa
uint64_t d_exp = (f64.i >> (CL_DBL_MANT_DIG - 1)) & 0x7FF;
uint64_t d_mant = f64.i & (((uint64_t)1 << (CL_DBL_MANT_DIG - 1)) - 1);
// Remove FP64 exponent bias
int64_t exp = d_exp - CL_DBL_MAX_EXP + 1;
// Remove FP64 exponent bias
int64_t exp = d_exp - CL_DBL_MAX_EXP + 1;
// Add FP16 exponent bias
uint16_t h_exp = (uint16_t)(exp + CL_HALF_MAX_EXP - 1);
// Add FP16 exponent bias
uint16_t h_exp = (uint16_t)(exp + CL_HALF_MAX_EXP - 1);
// Position of the bit that will become the FP16 mantissa LSB
uint32_t lsb_pos = CL_DBL_MANT_DIG - CL_HALF_MANT_DIG;
// Position of the bit that will become the FP16 mantissa LSB
uint32_t lsb_pos = CL_DBL_MANT_DIG - CL_HALF_MANT_DIG;
// Check for NaN / infinity
if (d_exp == 0x7FF) {
if (d_mant) {
// NaN -> propagate mantissa and silence it
uint16_t h_mant = (uint16_t)(d_mant >> lsb_pos);
h_mant |= 0x200;
return (sign << 15) | CL_HALF_EXP_MASK | h_mant;
} else {
// Infinity -> zero mantissa
return (sign << 15) | CL_HALF_EXP_MASK;
}
// Check for NaN / infinity
if (d_exp == 0x7FF)
{
if (d_mant)
{
// NaN -> propagate mantissa and silence it
uint16_t h_mant = (uint16_t)(d_mant >> lsb_pos);
h_mant |= 0x200;
return (sign << 15) | CL_HALF_EXP_MASK | h_mant;
}
// Check for zero
if (!d_exp && !d_mant) {
return (sign << 15);
else
{
// Infinity -> zero mantissa
return (sign << 15) | CL_HALF_EXP_MASK;
}
}
// Check for overflow
if (exp >= CL_HALF_MAX_EXP) {
return cl_half_handle_overflow(rounding_mode, sign);
}
// Check for zero
if (!d_exp && !d_mant)
{
return (sign << 15);
}
// Check for underflow
if (exp < (CL_HALF_MIN_EXP - CL_HALF_MANT_DIG - 1)) {
return cl_half_handle_underflow(rounding_mode, sign);
}
// Check for overflow
if (exp >= CL_HALF_MAX_EXP)
{
return cl_half_handle_overflow(rounding_mode, sign);
}
// Check for value that will become denormal
if (exp < -14) {
// Include the implicit 1 from the FP64 mantissa
h_exp = 0;
d_mant |= (uint64_t)1 << (CL_DBL_MANT_DIG - 1);
// Check for underflow
if (exp < (CL_HALF_MIN_EXP - CL_HALF_MANT_DIG - 1))
{
return cl_half_handle_underflow(rounding_mode, sign);
}
// Mantissa shift amount depends on exponent
lsb_pos = (uint32_t)(-exp + (CL_DBL_MANT_DIG - 25));
}
// Check for value that will become denormal
if (exp < -14)
{
// Include the implicit 1 from the FP64 mantissa
h_exp = 0;
d_mant |= (uint64_t)1 << (CL_DBL_MANT_DIG - 1);
// Generate FP16 mantissa by shifting FP64 mantissa
uint16_t h_mant = (uint16_t)(d_mant >> lsb_pos);
// Mantissa shift amount depends on exponent
lsb_pos = (uint32_t)(-exp + (CL_DBL_MANT_DIG - 25));
}
// Check whether we need to round
uint64_t halfway = (uint64_t)1 << (lsb_pos - 1);
uint64_t mask = (halfway << 1) - 1;
switch (rounding_mode) {
case CL_HALF_RTE:
if ((d_mant & mask) > halfway) {
// More than halfway -> round up
h_mant += 1;
} else if ((d_mant & mask) == halfway) {
// Exactly halfway -> round to nearest even
if (h_mant & 0x1)
h_mant += 1;
}
break;
case CL_HALF_RTZ:
// Mantissa has already been truncated -> do nothing
break;
case CL_HALF_RTP:
if ((d_mant & mask) && !sign) {
// Round positive numbers up
h_mant += 1;
}
break;
case CL_HALF_RTN:
if ((d_mant & mask) && sign) {
// Round negative numbers down
h_mant += 1;
}
break;
}
// Generate FP16 mantissa by shifting FP64 mantissa
uint16_t h_mant = (uint16_t)(d_mant >> lsb_pos);
// Check for mantissa overflow
if (h_mant & 0x400) {
h_exp += 1;
h_mant = 0;
}
// Check whether we need to round
uint64_t halfway = (uint64_t)1 << (lsb_pos - 1);
uint64_t mask = (halfway << 1) - 1;
switch (rounding_mode)
{
case CL_HALF_RTE:
if ((d_mant & mask) > halfway)
{
// More than halfway -> round up
h_mant += 1;
}
else if ((d_mant & mask) == halfway)
{
// Exactly halfway -> round to nearest even
if (h_mant & 0x1)
h_mant += 1;
}
break;
case CL_HALF_RTZ:
// Mantissa has already been truncated -> do nothing
break;
case CL_HALF_RTP:
if ((d_mant & mask) && !sign)
{
// Round positive numbers up
h_mant += 1;
}
break;
case CL_HALF_RTN:
if ((d_mant & mask) && sign)
{
// Round negative numbers down
h_mant += 1;
}
break;
}
return (sign << 15) | (h_exp << 10) | h_mant;
// Check for mantissa overflow
if (h_mant & 0x400)
{
h_exp += 1;
h_mant = 0;
}
return (sign << 15) | (h_exp << 10) | h_mant;
}
/**
* Convert a cl_half to a cl_float.
*/
static inline cl_float cl_half_to_float(cl_half h) {
// Type-punning to get direct access to underlying bits
union {
cl_float f;
uint32_t i;
} f32;
static inline cl_float cl_half_to_float(cl_half h)
{
// Type-punning to get direct access to underlying bits
union
{
cl_float f;
uint32_t i;
} f32;
// Extract sign bit
uint16_t sign = h >> 15;
// Extract sign bit
uint16_t sign = h >> 15;
// Extract FP16 exponent and mantissa
uint16_t h_exp = (h >> (CL_HALF_MANT_DIG - 1)) & 0x1F;
uint16_t h_mant = h & 0x3FF;
// Extract FP16 exponent and mantissa
uint16_t h_exp = (h >> (CL_HALF_MANT_DIG - 1)) & 0x1F;
uint16_t h_mant = h & 0x3FF;
// Remove FP16 exponent bias
int32_t exp = h_exp - CL_HALF_MAX_EXP + 1;
// Remove FP16 exponent bias
int32_t exp = h_exp - CL_HALF_MAX_EXP + 1;
// Add FP32 exponent bias
uint32_t f_exp = exp + CL_FLT_MAX_EXP - 1;
// Add FP32 exponent bias
uint32_t f_exp = exp + CL_FLT_MAX_EXP - 1;
// Check for NaN / infinity
if (h_exp == 0x1F) {
if (h_mant) {
// NaN -> propagate mantissa and silence it
uint32_t f_mant = h_mant << (CL_FLT_MANT_DIG - CL_HALF_MANT_DIG);
f_mant |= 0x400000;
f32.i = (sign << 31) | 0x7F800000 | f_mant;
return f32.f;
} else {
// Infinity -> zero mantissa
f32.i = (sign << 31) | 0x7F800000;
return f32.f;
}
// Check for NaN / infinity
if (h_exp == 0x1F)
{
if (h_mant)
{
// NaN -> propagate mantissa and silence it
uint32_t f_mant = h_mant << (CL_FLT_MANT_DIG - CL_HALF_MANT_DIG);
f_mant |= 0x400000;
f32.i = (sign << 31) | 0x7F800000 | f_mant;
return f32.f;
}
// Check for zero / denormal
if (h_exp == 0) {
if (h_mant == 0) {
// Zero -> zero exponent
f_exp = 0;
} else {
// Denormal -> normalize it
// - Shift mantissa to make most-significant 1 implicit
// - Adjust exponent accordingly
uint32_t shift = 0;
while ((h_mant & 0x400) == 0) {
h_mant <<= 1;
shift++;
}
h_mant &= 0x3FF;
f_exp -= shift - 1;
}
else
{
// Infinity -> zero mantissa
f32.i = (sign << 31) | 0x7F800000;
return f32.f;
}
}
f32.i = (sign << 31) | (f_exp << 23) | (h_mant << 13);
return f32.f;
// Check for zero / denormal
if (h_exp == 0)
{
if (h_mant == 0)
{
// Zero -> zero exponent
f_exp = 0;
}
else
{
// Denormal -> normalize it
// - Shift mantissa to make most-significant 1 implicit
// - Adjust exponent accordingly
uint32_t shift = 0;
while ((h_mant & 0x400) == 0)
{
h_mant <<= 1;
shift++;
}
h_mant &= 0x3FF;
f_exp -= shift - 1;
}
}
f32.i = (sign << 31) | (f_exp << 23) | (h_mant << 13);
return f32.f;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2008-2020 The Khronos Group Inc.
* Copyright (c) 2008-2023 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -13,49 +13,86 @@
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
/*****************************************************************************\
Copyright (c) 2013-2019 Intel Corporation All Rights Reserved.
#ifndef OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H_
#define OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H_
THESE MATERIALS ARE PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR ITS
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THESE
MATERIALS, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
** This header is generated from the Khronos OpenCL XML API Registry.
*/
File Name: cl_va_api_media_sharing_intel.h
Abstract:
Notes:
\*****************************************************************************/
#ifndef __OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H
#define __OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H
#include <va/va.h>
#include <CL/cl.h>
#include <CL/cl_platform.h>
#include <va/va.h>
/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES)
#define CL_NO_EXTENSION_PROTOTYPES
#endif
/* CL_NO_EXTENSION_PROTOTYPES implies
CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and
CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
!defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES
#endif
#ifdef __cplusplus
extern "C" {
#endif
/******************************************
* cl_intel_va_api_media_sharing extension *
*******************************************/
/***************************************************************
* cl_intel_sharing_format_query_va_api
***************************************************************/
#define cl_intel_sharing_format_query_va_api 1
#define CL_INTEL_SHARING_FORMAT_QUERY_VA_API_EXTENSION_NAME \
"cl_intel_sharing_format_query_va_api"
/* when cl_intel_va_api_media_sharing is supported */
typedef cl_int CL_API_CALL
clGetSupportedVA_APIMediaSurfaceFormatsINTEL_t(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint plane,
cl_uint num_entries,
VAImageFormat* va_api_formats,
cl_uint* num_surface_formats);
typedef clGetSupportedVA_APIMediaSurfaceFormatsINTEL_t *
clGetSupportedVA_APIMediaSurfaceFormatsINTEL_fn ;
#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetSupportedVA_APIMediaSurfaceFormatsINTEL(
cl_context context,
cl_mem_flags flags,
cl_mem_object_type image_type,
cl_uint plane,
cl_uint num_entries,
VAImageFormat* va_api_formats,
cl_uint* num_surface_formats) ;
#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
/***************************************************************
* cl_intel_va_api_media_sharing
***************************************************************/
#define cl_intel_va_api_media_sharing 1
#define CL_INTEL_VA_API_MEDIA_SHARING_EXTENSION_NAME \
"cl_intel_va_api_media_sharing"
/* error codes */
typedef cl_uint cl_va_api_device_source_intel;
typedef cl_uint cl_va_api_device_set_intel;
/* Error codes */
#define CL_INVALID_VA_API_MEDIA_ADAPTER_INTEL -1098
#define CL_INVALID_VA_API_MEDIA_SURFACE_INTEL -1099
#define CL_VA_API_MEDIA_SURFACE_ALREADY_ACQUIRED_INTEL -1100
@ -81,80 +118,97 @@ extern "C" {
#define CL_COMMAND_ACQUIRE_VA_API_MEDIA_SURFACES_INTEL 0x409A
#define CL_COMMAND_RELEASE_VA_API_MEDIA_SURFACES_INTEL 0x409B
typedef cl_uint cl_va_api_device_source_intel;
typedef cl_uint cl_va_api_device_set_intel;
typedef cl_int CL_API_CALL
clGetDeviceIDsFromVA_APIMediaAdapterINTEL_t(
cl_platform_id platform,
cl_va_api_device_source_intel media_adapter_type,
void* media_adapter,
cl_va_api_device_set_intel media_adapter_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices);
typedef clGetDeviceIDsFromVA_APIMediaAdapterINTEL_t *
clGetDeviceIDsFromVA_APIMediaAdapterINTEL_fn CL_API_SUFFIX__VERSION_1_2;
typedef cl_mem CL_API_CALL
clCreateFromVA_APIMediaSurfaceINTEL_t(
cl_context context,
cl_mem_flags flags,
VASurfaceID* surface,
cl_uint plane,
cl_int* errcode_ret);
typedef clCreateFromVA_APIMediaSurfaceINTEL_t *
clCreateFromVA_APIMediaSurfaceINTEL_fn CL_API_SUFFIX__VERSION_1_2;
typedef cl_int CL_API_CALL
clEnqueueAcquireVA_APIMediaSurfacesINTEL_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueAcquireVA_APIMediaSurfacesINTEL_t *
clEnqueueAcquireVA_APIMediaSurfacesINTEL_fn CL_API_SUFFIX__VERSION_1_2;
typedef cl_int CL_API_CALL
clEnqueueReleaseVA_APIMediaSurfacesINTEL_t(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event);
typedef clEnqueueReleaseVA_APIMediaSurfacesINTEL_t *
clEnqueueReleaseVA_APIMediaSurfacesINTEL_fn CL_API_SUFFIX__VERSION_1_2;
#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
extern CL_API_ENTRY cl_int CL_API_CALL
clGetDeviceIDsFromVA_APIMediaAdapterINTEL(
cl_platform_id platform,
cl_platform_id platform,
cl_va_api_device_source_intel media_adapter_type,
void *media_adapter,
cl_va_api_device_set_intel media_adapter_set,
cl_uint num_entries,
cl_device_id *devices,
cl_uint *num_devices) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clGetDeviceIDsFromVA_APIMediaAdapterINTEL_fn)(
cl_platform_id platform,
cl_va_api_device_source_intel media_adapter_type,
void *media_adapter,
cl_va_api_device_set_intel media_adapter_set,
cl_uint num_entries,
cl_device_id *devices,
cl_uint *num_devices) CL_EXT_SUFFIX__VERSION_1_2;
void* media_adapter,
cl_va_api_device_set_intel media_adapter_set,
cl_uint num_entries,
cl_device_id* devices,
cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromVA_APIMediaSurfaceINTEL(
cl_context context,
cl_mem_flags flags,
VASurfaceID *surface,
cl_uint plane,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_mem(CL_API_CALL *clCreateFromVA_APIMediaSurfaceINTEL_fn)(
cl_context context,
cl_mem_flags flags,
VASurfaceID *surface,
cl_uint plane,
cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_2;
cl_context context,
cl_mem_flags flags,
VASurfaceID* surface,
cl_uint plane,
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireVA_APIMediaSurfacesINTEL(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueAcquireVA_APIMediaSurfacesINTEL_fn)(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_EXT_SUFFIX__VERSION_1_2;
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseVA_APIMediaSurfacesINTEL(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_EXT_SUFFIX__VERSION_1_2;
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem* mem_objects,
cl_uint num_events_in_wait_list,
const cl_event* event_wait_list,
cl_event* event) CL_API_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int(CL_API_CALL *clEnqueueReleaseVA_APIMediaSurfacesINTEL_fn)(
cl_command_queue command_queue,
cl_uint num_objects,
const cl_mem *mem_objects,
cl_uint num_events_in_wait_list,
const cl_event *event_wait_list,
cl_event *event) CL_EXT_SUFFIX__VERSION_1_2;
#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H */
#endif /* OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H_ */

View file

@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2008-2020 The Khronos Group Inc.
* Copyright (c) 2008-2021 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,7 +23,6 @@ extern "C" {
#include <CL/cl.h>
#include <CL/cl_gl.h>
#include <CL/cl_gl_ext.h>
#include <CL/cl_ext.h>
#ifdef __cplusplus