--- a/open-src/kernel/Makefile Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/Makefile Wed Nov 30 21:57:49 2016 -0800
@@ -78,7 +78,7 @@
@ set +e; \
cd $@ ; \
print "## making debug" $(OS_TARGET) "in open-src/kernel/$(DIRNAME)$@..."; \
- $(MAKE) $(MAKEFLAGS) $(PASSDOWN_VARS) $(DEBUGPASSDOWN) $(OS_TARGET)
+ $(MAKE) DEBUG=DEBUG $(MAKEFLAGS) $(PASSDOWN_VARS) $(DEBUGPASSDOWN) $(OS_TARGET)
$(OS_SUBDIRS): ALWAYS_RUN
@case '${MAKEFLAGS}' in *[ik]*) set +e;; esac; \
--- a/open-src/kernel/drm/src/drm_bufs.c Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/drm/src/drm_bufs.c Wed Nov 30 21:57:49 2016 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
*/
/**
@@ -343,6 +343,9 @@
struct drm_map_list *maplist;
int err;
+ DRM_DEBUG_DRIVER("adding map of type %d, offset: %0x, size: %0x\n",
+ map->type, map->offset,map->size);
+
if (!(DRM_SUSER(credp) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
--- a/open-src/kernel/drm/src/drm_drv.c Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/drm/src/drm_drv.c Wed Nov 30 21:57:49 2016 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -364,6 +364,9 @@
char stack_kdata[128];
char *kdata = NULL;
unsigned int usize, asize;
+#ifdef DEBUG
+ const char *ioc_name = NULL;
+#endif
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
@@ -391,6 +394,11 @@
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
func = dev->driver->dma_ioctl;
+#ifdef DEBUG
+ if (ioctl->name != NULL)
+ ioc_name = ioctl->name;
+#endif
+
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
@@ -456,8 +464,13 @@
if (kdata && (kdata != stack_kdata))
kfree(kdata, asize);
atomic_dec(&dev->ioctl_count);
+#ifdef DEBUG
if (retcode)
- DRM_DEBUG("ret = %d\n", -retcode);
+ if (ioc_name == NULL)
+ DRM_DEBUG("ioccmd: 0x%x, ret = %d\n", cmd, -retcode);
+ else
+ DRM_DEBUG("IOC %s: ret = %d\n", ioc_name, -retcode);
+#endif
return retcode;
}
--- a/open-src/kernel/drm/src/drm_gem.c Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/drm/src/drm_gem.c Wed Nov 30 21:57:49 2016 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -431,6 +431,22 @@
}
/**
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ * @file: drm file-private structure to remove the dumb handle from
+ * @dev: corresponding drm_device
+ * @handle: the dumb handle to remove
+ *
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
@@ -746,9 +762,23 @@
}
+/*
+ * XXXX FIXME - we shouldn't be alloc'ing space here for gtt_map_kaddr. If
+ * this element is actually needed, it should be part of a global GTT mapping,
+ * and should only need "loading" at best.
+ */
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
+ /*
+ * if already have a map, return.
+ */
+ if (obj->gtt_map_kaddr != NULL)
+ return 0;
+
+ /*
+ * Otherwise, get us some kernel space.
+ */
obj->gtt_map_kaddr = gfxp_alloc_kernel_space(obj->real_size);
if (obj->gtt_map_kaddr == NULL) {
return -ENOMEM;
--- a/open-src/kernel/drm/src/drm_sunmod.c Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/drm/src/drm_sunmod.c Wed Nov 30 21:57:49 2016 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -99,9 +99,14 @@
mutex_enter(&dev->struct_mutex);
dhp = (devmap_handle_t *)dhc;
cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+#ifdef DEBUG
+ if (cp->cook_refcnt != 0)
+ DRM_DEBUG_DRIVER("cookie is not zero: %d\n", cp->cook_refcnt);
+#endif
cp->cook_refcnt = 1;
mutex_exit(&dev->struct_mutex);
+ DRM_DEBUG_DRIVER("created mapping, handle: 0x%08x\n", dhp);
*new_priv = dev;
return (0);
}
@@ -116,6 +121,7 @@
devmap_handle_t *dhp;
struct ddi_umem_cookie *cp;
+ DRM_DEBUG_DRIVER("duping mapping, cookie: 0x%08x\n", dhc);
mutex_enter(&dev->struct_mutex);
dhp = (devmap_handle_t *)dhc;
cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
@@ -146,6 +152,7 @@
cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
if (new_dhp1 != NULL) {
+ DRM_DEBUG_DRIVER("new devmap_handle 1: 0x%08x\n", new_dhp1);
ndhp = (devmap_handle_t *)new_dhp1;
ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
ncp->cook_refcnt++;
@@ -154,6 +161,7 @@
}
if (new_dhp2 != NULL) {
+ DRM_DEBUG_DRIVER("new devmap_handle 2: 0x%08x\n", new_dhp2);
ndhp = (devmap_handle_t *)new_dhp2;
ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
ncp->cook_refcnt++;
@@ -161,14 +169,16 @@
ASSERT(ncp == cp);
}
- /* FIXME: dh_cookie should not be released here. */
-#if 0
+ ASSERT(cp->cook_refcnt > 0);
cp->cook_refcnt--;
if (cp->cook_refcnt == 0) {
- gfxp_umem_cookie_destroy(dhp->dh_cookie);
- dhp->dh_cookie = NULL;
+ DRM_DEBUG_DRIVER("last unmap of dh_cookie:0x%08x\n", cp);
+#if 0
+ /* FIXME: dh_cookie should not be released here. */
+ // gfxp_umem_cookie_destroy(dhp->dh_cookie);
+ // dhp->dh_cookie = NULL;
+#endif
}
-#endif
mutex_exit(&dev->struct_mutex);
}
@@ -632,15 +642,19 @@
switch (map->type) {
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
+ DRM_DEBUG_DRIVER("mapping of type %d, len %x\n", map->type, len);
return (__devmap_general(dev, dhp, map, len, maplen));
case _DRM_SHM:
+ DRM_DEBUG_DRIVER("mapping of type SHM, len %x\n", len);
return (__devmap_shm(dev, dhp, map, len, maplen));
case _DRM_AGP:
+ DRM_DEBUG_DRIVER("mapping of type AGP, len %x\n", len);
return (__devmap_agp(dev, dhp, map, len, maplen));
case _DRM_SCATTER_GATHER:
+ DRM_DEBUG_DRIVER("mapping of type SCATTER_GATHER, len %x\n", len);
return (__devmap_sg(dev, dhp, map, len, maplen));
case _DRM_GEM:
--- a/open-src/kernel/i915/src/i915_dma.c Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/i915/src/i915_dma.c Wed Nov 30 21:57:49 2016 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
*/
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
@@ -1006,6 +1006,18 @@
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
value = 1;
break;
+
+ /*
+ * These should be better supported in the next version, but
+ * are being requested in this one. so provide useful values.
+ */
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_SOFTPIN:
+ value = 0;
+ break;
+
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
--- a/open-src/kernel/i915/src/i915_drm.h Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/i915/src/i915_drm.h Wed Nov 30 21:57:49 2016 -0800
@@ -40,6 +40,31 @@
* subject to backwards-compatibility constraints.
*/
+/**
+ * DOC: uevents generated by i915 on it's device node
+ *
+ * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
+ * event from the gpu l3 cache. Additional information supplied is ROW,
+ * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
+ * track of these events and if a specific cache-line seems to have a
+ * persistent error remap it with the l3 remapping tool supplied in
+ * intel-gpu-tools. The value supplied with the event is always 1.
+ *
+ * I915_ERROR_UEVENT - Generated upon error detection, currently only via
+ * hangcheck. The error detection event is a good indicator of when things
+ * began to go badly. The value supplied with the event is a 1 upon error
+ * detection, and a 0 upon reset completion, signifying no more error
+ * exists. NOTE: Disabling hangcheck or reset via module parameter will
+ * cause the related events to not be seen.
+ *
+ * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
+ * the GPU. The value supplied with the event is always 1. NOTE: Disable
+ * reset via module parameter will cause this event to not be seen.
+ */
+#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
+#define I915_ERROR_UEVENT "ERROR"
+#define I915_RESET_UEVENT "RESET"
+
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -121,6 +146,7 @@
int pipeB_y;
int pipeB_w;
int pipeB_h;
+
int pad1;
/* fill out some space for old userspace triple buffer */
@@ -155,8 +181,12 @@
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
-/* I915 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
+/*
+ * i915 specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
@@ -206,6 +236,10 @@
#define DRM_I915_GEM_SET_CACHING 0x2f
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
+#define DRM_I915_GET_RESET_STATS 0x32
+#define DRM_I915_GEM_USERPTR 0x33
+#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
+#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -213,7 +247,7 @@
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
-#define DRM_IOCTL_I915_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
@@ -223,7 +257,7 @@
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
-#define DRM_IOCTL_I915_HWS_ADDR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, drm_i915_hws_addr_t)
+#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
@@ -250,18 +284,22 @@
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
-#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
+#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
#ifdef _MULTI_DATAMODEL
#define I915_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = _copyin32, .copyout32 = _copyout32}
#else
#define I915_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
- [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL}
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL, .name = ##_func}
#endif
/* Allow drivers to submit batchbuffers directly to hardware, relying
@@ -273,7 +311,7 @@
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
- struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+ struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
typedef struct drm_i915_batchbuffer32 {
@@ -289,12 +327,12 @@
* validated by the kernel prior to sending to hardware.
*/
typedef struct _drm_i915_cmdbuffer {
- char __user *buf; /* pointer to userspace command buffer */
+ char *buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
- struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+ struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
typedef struct drm_i915_cmdbuffer32 {
@@ -309,7 +347,7 @@
/* Userspace can request & wait on irq's:
*/
typedef struct drm_i915_irq_emit {
- int __user *irq_seq;
+ int *irq_seq;
} drm_i915_irq_emit_t;
typedef struct drm_i915_irq_emit32 {
@@ -338,7 +376,7 @@
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
-#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
#define I915_PARAM_HAS_SEMAPHORES 20
@@ -348,10 +386,25 @@
#define I915_PARAM_HAS_PINNED_BATCHES 24
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
+#define I915_PARAM_HAS_WT 27
+#define I915_PARAM_CMD_PARSER_VERSION 28
+#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
+#define I915_PARAM_MMAP_VERSION 30
+#define I915_PARAM_HAS_BSD2 31
+#define I915_PARAM_REVISION 32
+#define I915_PARAM_SUBSLICE_TOTAL 33
+#define I915_PARAM_EU_TOTAL 34
+#define I915_PARAM_HAS_GPU_RESET 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
+#define I915_PARAM_HAS_EXEC_SOFTPIN 37
typedef struct drm_i915_getparam {
- int param;
- int __user *value;
+ __s32 param;
+ /*
+ * WARNING: Using pointers instead of fixed-size u64 means we need to write
+ * compat32 code. Don't repeat this mistake.
+ */
+ int *value;
} drm_i915_getparam_t;
typedef struct drm_i915_getparam32 {
@@ -379,7 +432,7 @@
int region;
int alignment;
int size;
- int __user *region_offset; /* offset from start of fb or agp */
+ int *region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc_t;
typedef struct drm_i915_mem_alloc32 {
@@ -507,6 +560,14 @@
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * Added in version 2.
+ */
+ __u64 flags;
+#define I915_MMAP_WC 0x1
};
struct drm_i915_gem_mmap_gtt {
@@ -674,16 +735,23 @@
__u64 alignment;
/**
- * Returned value of the updated offset of the object, for future
- * presumed_offset writes.
+ * When the EXEC_OBJECT_PINNED flag is specified this is populated by
+ * the user with the GTT offset at which this object will be pinned.
+ * When the I915_EXEC_NO_RELOC flag is specified this must contain the
+ * presumed_offset of the object.
+ * During execbuffer2 the kernel populates it with the value of the
+ * current GTT offset of the object, for future presumed_offset writes.
*/
__u64 offset;
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
#define EXEC_OBJECT_WRITE (1<<2)
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+#define EXEC_OBJECT_PINNED (1<<4)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
__u64 flags;
+
__u64 rsvd1;
__u64 rsvd2;
};
@@ -721,8 +789,8 @@
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
- __u64 flags; /* currently unused */
- __u64 rsvd1;
+ __u64 flags;
+ __u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
@@ -743,7 +811,7 @@
*/
#define I915_EXEC_IS_PINNED (1<<10)
-/** Provide a hint to the kernel that the command stream and auxilliary
+/** Provide a hint to the kernel that the command stream and auxiliary
* state buffers already holds the correct presumed addresses and so the
* relocation process may be skipped if no buffers need to be moved in
* preparation for the execbuffer.
@@ -755,7 +823,18 @@
*/
#define I915_EXEC_HANDLE_LUT (1<<12)
-#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
+/** Used for switching BSD rings on the platforms with two BSD rings */
+#define I915_EXEC_BSD_MASK (3<<13)
+#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
+#define I915_EXEC_BSD_RING1 (1<<13)
+#define I915_EXEC_BSD_RING2 (2<<13)
+
+/** Tell the kernel that the batchbuffer is processed by
+ * the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER (1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -793,8 +872,32 @@
__u32 busy;
};
+/**
+ * I915_CACHING_NONE
+ *
+ * GPU access is not coherent with cpu caches. Default for machines without an
+ * LLC.
+ */
#define I915_CACHING_NONE 0
+/**
+ * I915_CACHING_CACHED
+ *
+ * GPU access is coherent with cpu caches and furthermore the data is cached in
+ * last-level caches shared between cpu cores and the gpu GT. Default on
+ * machines with HAS_LLC.
+ */
#define I915_CACHING_CACHED 1
+/**
+ * I915_CACHING_DISPLAY
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no special
+ * cache mode (like write-through or gfdt flushing) is available. The kernel
+ * automatically sets this mode when using a buffer as a scanout target.
+ * Userspace can manually set this mode to avoid a costly stall and clflush in
+ * the hotpath of drawing the first frame.
+ */
+#define I915_CACHING_DISPLAY 2
struct drm_i915_gem_caching {
/**
@@ -871,6 +974,12 @@
* mmap mapping.
*/
__u32 swizzle_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping whilst bound.
+ */
+ __u32 phys_swizzle_mode;
};
struct drm_i915_gem_get_aperture {
@@ -961,6 +1070,7 @@
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
@@ -1027,7 +1137,62 @@
};
struct drm_i915_reg_read {
+ /*
+ * Register offset.
+ * For 64bit wide registers where the upper 32bits don't immediately
+ * follow the lower 32bits, the offset of the lower 32bits must
+ * be specified
+ */
__u64 offset;
__u64 val; /* Return value */
};
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ * single instruction 8byte read, in order to workaround that use
+ * offset (0x2538 | 1) instead.
+ *
+ */
+
+struct drm_i915_reset_stats {
+ __u32 ctx_id;
+ __u32 flags;
+
+ /* All resets since boot/module reload, for all contexts */
+ __u32 reset_count;
+
+ /* Number of batches lost when active in GPU, for this context */
+ __u32 batch_active;
+
+ /* Number of batches lost pending for execution, for this context */
+ __u32 batch_pending;
+
+ __u32 pad;
+};
+
+struct drm_i915_gem_userptr {
+ __u64 user_ptr;
+ __u64 user_size;
+ __u32 flags;
+#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
+struct drm_i915_gem_context_param {
+ __u32 ctx_id;
+ __u32 size;
+ __u64 param;
+#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
+#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
+ __u64 value;
+};
+
#endif /* _I915_DRM_H_ */
--- a/open-src/kernel/i915/src/i915_drv.c Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/i915/src/i915_drv.c Wed Nov 30 21:57:49 2016 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -917,6 +917,9 @@
.gem_free_object = i915_gem_free_object,
/*.gem_vm_ops = &i915_gem_vm_ops,*/
.gem_fault = i915_gem_fault,
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.id_table = pciidlist,
--- a/open-src/kernel/i915/src/i915_gem.c Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/i915/src/i915_gem.c Wed Nov 30 21:57:49 2016 -0800
@@ -764,7 +764,7 @@
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
ret = -EAGAIN;
- /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
+ /* ... but upgrade the -EGAIN to an -EIO if the gpu is truly
* gone. */
end = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (end)
@@ -1090,6 +1090,14 @@
mutex_unlock(&dev->struct_mutex);
}
+/*
+ * FIXME - this tunable allows for the behavior of the original code
+ * to overload the umem_cookie. This *can* cause race conditions where
+ * released memory can have bad cookie values. By default, we set it to
+ * 0, but is useful for testing.
+ */
+int __overload_umem_cookie = 0;
+
/**
* i915_gem_create_mmap_offset - create a fake mmap offset for an object
* @obj: obj in question
@@ -1107,6 +1115,12 @@
struct ddi_umem_cookie *umem_cookie = obj->base.maplist.map->umem_cookie;
int ret;
+ /*
+ * Not sure that we should still be using the gtt_map_kaddr interface.
+ * as it causes a drm object to have two different memory allocations
+ * (not to mention some ugly overloading of the umem_cookie). But maybe
+ * this is something to fix with the VMA code in the next driver.
+ */
if (obj->base.gtt_map_kaddr == NULL) {
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret) {
@@ -1115,7 +1129,8 @@
}
}
- umem_cookie->cvaddr = obj->base.gtt_map_kaddr;
+ if (__overload_umem_cookie != 0)
+ umem_cookie->cvaddr = obj->base.gtt_map_kaddr;
/* user_token is the fake offset
* which create in drm_map_handle at alloc time
@@ -1330,6 +1345,12 @@
caddr_t va;
long i;
+ /*
+ * Don't leak; make sure that we haven't previously setup a pagelist
+ */
+ if (obj->page_list != NULL)
+ return 0;
+
obj->page_list = kmem_zalloc(np * sizeof(caddr_t), KM_SLEEP);
if (obj->page_list == NULL) {
DRM_ERROR("Faled to allocate page list. size = %ld", np * sizeof(caddr_t));
--- a/open-src/kernel/sys/drm/drmP.h Wed Nov 30 17:47:29 2016 -0800
+++ b/open-src/kernel/sys/drm/drmP.h Wed Nov 30 21:57:49 2016 -0800
@@ -458,7 +458,7 @@
[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = _copyin32, .copyout32 = _copyout32}
#else
#define DRM_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL}
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL, .name = ##_func}
#endif
typedef struct drm_magic_entry {
@@ -601,6 +601,7 @@
struct drm_device *dev;
/* Mapping info for this object */
+ /* Not used in this driver, should remove to prevent confusion */
struct drm_map_list map_list;
/*
@@ -641,6 +642,7 @@
caddr_t kaddr;
size_t real_size; /* real size of memory */
pfn_t *pfnarray;
+ /* Obsolete? Probably should be using the kaddr value above? */
caddr_t gtt_map_kaddr;
struct gfxp_pmem_cookie mempool_cookie;
@@ -1580,6 +1582,10 @@
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
u32 handle);
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
int drm_gem_open_ioctl(DRM_IOCTL_ARGS);