OpenCloudOS-Kernel/drivers/gpu/drm/i915/i915_gem.c

5737 lines
150 KiB
C
Raw Normal View History

/*
* Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
drm/i915: Wait on external rendering for GEM objects When transitioning to the GTT or CPU domain we wait on all rendering from i915 to complete (with the optimisation of allowing concurrent read access by both the GPU and client). We don't yet ensure all rendering from third parties (tracked by implicit fences on the dma-buf) is complete. Since implicitly tracked rendering by third parties will ignore our cache-domain tracking, we have to always wait upon rendering from third-parties when transitioning to direct access to the backing store. We still rely on clients notifying us of cache domain changes (i.e. they need to move to the GTT read or write domain after doing a CPU access before letting the third party render again). v2: This introduces a potential WARN_ON into i915_gem_object_free() as the current i915_vma_unbind() calls i915_gem_object_wait_rendering(). To hit this path we first need to render with the GPU, have a dma-buf attached with an unsignaled fence and then interrupt the wait. It does get fixed later in the series (when i915_vma_unbind() only waits on the active VMA and not all, including third-party, rendering. To offset that risk, use the __i915_vma_unbind_no_wait hack. Testcase: igt/prime_vgem/basic-fence-read Testcase: igt/prime_vgem/basic-fence-mmap Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1469002875-2335-8-git-send-email-chris@chris-wilson.co.uk
2016-07-20 16:21:15 +08:00
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <linux/swap.h>
#include <linux/pci.h>
i915: add dmabuf/prime buffer sharing support. This adds handle->fd and fd->handle support to i915, this is to allow for offloading of rendering in one direction and outputs in the other. v2 from Daniel Vetter: - fixup conflicts with the prepare/finish gtt prep work. - implement ppgtt binding support. Note that we have squat i-g-t testcoverage for any of the lifetime and access rules dma_buf/prime support brings along. And there are quite a few intricate situations here. Also note that the integration with the existing code is a bit hackish, especially around get_gtt_pages and put_gtt_pages. It imo would be easier with the prep code from Chris Wilson's unbound series, but that is for 3.6. Also note that I didn't bother to put the new prepare/finish gtt hooks to good use by moving the dma_buf_map/unmap_attachment calls in there (like we've originally planned for). Last but not least this patch is only compile-tested, but I've changed very little compared to Dave Airlie's version. So there's a decent chance v2 on drm-next works as well as v1 on 3.4-rc. v3: Right when I've hit sent I've noticed that I've screwed up one obj->sg_list (for dmar support) and obj->sg_table (for prime support) disdinction. We should be able to merge these 2 paths, but that's material for another patch. v4: fix the error reporting bugs pointed out by ickle. v5: fix another error, and stop non-gtt mmaps on shared objects stop pread/pwrite on imported objects, add fake kmap Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-10 21:25:09 +08:00
#include <linux/dma-buf.h>
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gemfs.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
#include "intel_workarounds.h"
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->cache_dirty)
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
return false;
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
return obj->pin_global; /* currently in use by HW, keep flushed */
}
static int
insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
drm: Improve drm_mm search (and fix topdown allocation) with rbtrees The drm_mm range manager claimed to support top-down insertion, but it was neither searching for the top-most hole that could fit the allocation request nor fitting the request to the hole correctly. In order to search the range efficiently, we create a secondary index for the holes using either their size or their address. This index allows us to find the smallest hole or the hole at the bottom or top of the range efficiently, whilst keeping the hole stack to rapidly service evictions. v2: Search for holes both high and low. Rename flags to mode. v3: Discover rb_entry_safe() and use it! v4: Kerneldoc for enum drm_mm_insert_mode. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "Christian König" <christian.koenig@amd.com> Cc: David Airlie <airlied@linux.ie> Cc: Russell King <rmk+kernel@armlinux.org.uk> Cc: Daniel Vetter <daniel.vetter@intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Sean Paul <seanpaul@chromium.org> Cc: Lucas Stach <l.stach@pengutronix.de> Cc: Christian Gmeiner <christian.gmeiner@gmail.com> Cc: Rob Clark <robdclark@gmail.com> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Stephen Warren <swarren@wwwdotorg.org> Cc: Alexandre Courbot <gnurou@gmail.com> Cc: Eric Anholt <eric@anholt.net> Cc: Sinclair Yeh <syeh@vmware.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> # vmwgfx Reviewed-by: Lucas Stach <l.stach@pengutronix.de> #etnaviv Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20170202210438.28702-1-chris@chris-wilson.co.uk
2017-02-03 05:04:38 +08:00
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
}
static void
remove_mappable_node(struct drm_mm_node *node)
{
drm_mm_remove_node(node);
}
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
dev_priv->mm.object_memory += size;
spin_unlock(&dev_priv->mm.object_stat_lock);
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
dev_priv->mm.object_memory -= size;
spin_unlock(&dev_priv->mm.object_stat_lock);
}
static int
i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
might_sleep();
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
* we should simply try to bail out and fail as gracefully as possible.
*/
drm/i915: clear up wedged transitions We have two important transitions of the wedged state in the current code: - 0 -> 1: This means a hang has been detected, and signals to everyone that they please get of any locks, so that the reset work item can do its job. - 1 -> 0: The reset handler has completed. Now the last transition mixes up two states: "Reset completed and successful" and "Reset failed". To distinguish these two we do some tricks with the reset completion, but I simply could not convince myself that this doesn't race under odd circumstances. Hence split this up, and add a new terminal state indicating that the hw is gone for good. Also add explicit #defines for both states, update comments. v2: Split out the reset handling bugfix for the throttle ioctl. v3: s/tmp/wedged/ sugested by Chris Wilson. Also fixup up a rebase error which prevented this patch from actually compiling. v4: To unify the wedged state with the reset counter, keep the reset-in-progress state just as a flag. The terminally-wedged state is now denoted with a big number. v5: Add a comment to the reset_counter special values explaining that WEDGED & RESET_IN_PROGRESS needs to be true for the code to be correct. v6: Fixup logic errors introduced with the wedged+reset_counter unification. Since WEDGED implies reset-in-progress (in a way we're terminally stuck in the dead-but-reset-not-completed state), we need ensure that we check for this everywhere. The specific bug was in wait_for_error, which would simply have timed out. v7: Extract an inline i915_reset_in_progress helper to make the code more readable. Also annote the reset-in-progress case with an unlikely, to help the compiler optimize the fastpath. Do the same for the terminally wedged case with i915_terminally_wedged. Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-11-16 00:17:22 +08:00
ret = wait_event_interruptible_timeout(error->reset_queue,
!i915_reset_backoff(error),
I915_RESET_TIMEOUT);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
} else {
return 0;
}
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
return 0;
}
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
GEM_TRACE("\n");
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
if (!i915->gt.awake)
return I915_EPOCH_INVALID;
GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
/*
* Be paranoid and flush a concurrent interrupt to make sure
* we don't reactivate any irq tasklets after parking.
*
* FIXME: Note that even though we have waited for execlists to be idle,
* there may still be an in-flight interrupt even though the CSB
* is now empty. synchronize_irq() makes sure that a residual interrupt
* is completed before we continue, but it doesn't prevent the HW from
* raising a spurious interrupt later. To complete the shield we should
* coordinate disabling the CS irq with flushing the interrupts.
*/
synchronize_irq(i915->drm.irq);
intel_engines_park(i915);
i915_timelines_park(i915);
i915_pmu_gt_parked(i915);
drm/i915: Lazily unbind vma on close When userspace is passing around swapbuffers using DRI, we frequently have to open and close the same object in the foreign address space. This shows itself as the same object being rebound at roughly 30fps (with a second object also being rebound at 30fps), which involves us having to rewrite the page tables and maintain the drm_mm range manager every time. However, since the object still exists and it is only the local handle that disappears, if we are lazy and do not unbind the VMA immediately when the local user closes the object but defer it until the GPU is idle, then we can reuse the same VMA binding. We still have to be careful to mark the handle and lookup tables as closed to maintain the uABI, just allowing the underlying VMA to be resurrected if the user is able to access the same object from the same context again. If the object itself is destroyed (neither userspace keeping a handle to it), the VMA will be reaped immediately as usual. In the future, this will be even more useful as instantiating a new VMA for use on the GPU will become heavier. A nuisance indeed, so nip it in the bud. v2: s/__i915_vma_final_close/i915_vma_destroy/ etc. v3: Leave a hint as to why we deferred the unbind on close. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180503195115.22309-1-chris@chris-wilson.co.uk
2018-05-04 03:51:14 +08:00
i915_vma_parked(i915);
wakeref = fetch_and_zero(&i915->gt.awake);
GEM_BUG_ON(!wakeref);
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return i915->gt.epoch;
}
void i915_gem_park(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
if (!i915->gt.awake)
return;
/* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
}
void i915_gem_unpark(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
assert_rpm_wakelock_held(i915);
if (i915->gt.awake)
return;
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
* command submission.
*
* This activity has negative impact on the performance of the chip with
* huge latencies observed in the interrupt handler and elsewhere.
*
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!i915->gt.awake);
if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
i915->gt.epoch = 1;
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
if (INTEL_GEN(i915) >= 6)
gen6_rps_busy(i915);
i915_pmu_gt_unparked(i915);
intel_engines_unpark(i915);
i915_queue_hangcheck(i915);
queue_delayed_work(i915->wq,
&i915->gt.retire_work,
round_jiffies_up_relative(HZ));
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma;
u64 pinned;
mutex_lock(&ggtt->vm.mutex);
pinned = ggtt->vm.reserved;
drm/i915: Stop tracking MRU activity on VMA Our goal is to remove struct_mutex and replace it with fine grained locking. One of the thorny issues is our eviction logic for reclaiming space for an execbuffer (or GTT mmaping, among a few other examples). While eviction itself is easy to move under a per-VM mutex, performing the activity tracking is less agreeable. One solution is not to do any MRU tracking and do a simple coarse evaluation during eviction of active/inactive, with a loose temporal ordering of last insertion/evaluation. That keeps all the locking constrained to when we are manipulating the VM itself, neatly avoiding the tricky handling of possible recursive locking during execbuf and elsewhere. Note that discarding the MRU (currently implemented as a pair of lists, to avoid scanning the active list for a NONBLOCKING search) is unlikely to impact upon our efficiency to reclaim VM space (where we think a LRU model is best) as our current strategy is to use random idle replacement first before doing a search, and over time the use of softpinned 48b per-ppGTT is growing (thereby eliminating any need to perform any eviction searches, in theory at least) with the remaining users being found on much older devices (gen2-gen6). v2: Changelog and commentary rewritten to elaborate on the duality of a single list being both an inactive and active list. v3: Consolidate bool parameters into a single set of flags; don't comment on the duality of a single variable being a multiplicity of bits. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190128102356.15037-1-chris@chris-wilson.co.uk
2019-01-28 18:23:52 +08:00
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&ggtt->vm.mutex);
args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
}
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
drm_dma_handle_t *phys;
struct sg_table *st;
struct scatterlist *sg;
char *vaddr;
int i;
int err;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
/* Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
phys = drm_pci_alloc(obj->base.dev,
roundup_pow_of_two(obj->base.size),
roundup_pow_of_two(obj->base.size));
if (!phys)
return -ENOMEM;
vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *src;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto err_phys;
}
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(src);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
put_page(page);
vaddr += PAGE_SIZE;
}
i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) {
err = -ENOMEM;
goto err_phys;
}
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
err = -ENOMEM;
goto err_phys;
}
sg = st->sgl;
sg->offset = 0;
sg->length = obj->base.size;
sg_dma_address(sg) = phys->busaddr;
sg_dma_len(sg) = obj->base.size;
obj->phys_handle = phys;
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
err_phys:
drm_pci_free(obj->base.dev, phys);
return err;
}
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
static void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
if (needs_clflush &&
(obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_sg(pages);
__start_cpu_write(obj);
}
static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
__i915_gem_object_release_shmem(obj, pages, false);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
int i;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *dst;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
continue;
dst = kmap_atomic(page);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
memcpy(dst, vaddr, PAGE_SIZE);
kunmap_atomic(dst);
set_page_dirty(page);
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
put_page(page);
vaddr += PAGE_SIZE;
}
obj->mm.dirty = false;
}
sg_free_table(pages);
kfree(pages);
drm_pci_free(obj->base.dev, obj->phys_handle);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
.release = i915_gem_object_release_phys,
};
static const struct drm_i915_gem_object_ops i915_gem_object_ops;
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
LIST_HEAD(still_in_list);
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
/* Closed vma are removed from the obj->vma_list - but they may
* still have an active binding on the object. To remove those we
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
return ret;
while ((vma = list_first_entry_or_null(&obj->vma_list,
struct i915_vma,
obj_link))) {
list_move_tail(&vma->obj_link, &still_in_list);
ret = i915_vma_unbind(vma);
if (ret)
break;
}
list_splice(&still_in_list, &obj->vma_list);
return ret;
}
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
unsigned int flags,
long timeout,
struct intel_rps_client *rps_client)
{
struct i915_request *rq;
BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return timeout;
if (!dma_fence_is_i915(fence))
return dma_fence_wait_timeout(fence,
flags & I915_WAIT_INTERRUPTIBLE,
timeout);
rq = to_request(fence);
if (i915_request_completed(rq))
goto out;
/*
* This client is about to stall waiting for the GPU. In many cases
* this is undesirable and limits the throughput of the system, as
* many clients cannot continue processing user input/output whilst
* blocked. RPS autotuning may take tens of milliseconds to respond
* to the GPU load and thus incurs additional latency for the client.
* We can circumvent that by promoting the GPU frequency to maximum
* before we wait. This makes the GPU throttle up much more quickly
* (good for benchmarks and user experience, e.g. window animations),
* but at a cost of spending more power processing the workload
* (bad for battery). Not all clients even want their results
* immediately and for them we should just let the GPU select its own
* frequency to maximise efficiency. To prevent a single client from
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
if (rps_client && !i915_request_started(rq)) {
if (INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq, rps_client);
}
timeout = i915_request_wait(rq, flags, timeout);
out:
if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
i915_request_retire_upto(rq);
return timeout;
}
static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int flags,
long timeout,
struct intel_rps_client *rps_client)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
bool prune_fences = false;
if (flags & I915_WAIT_ALL) {
struct dma_fence **shared;
unsigned int count, i;
int ret;
ret = reservation_object_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
flags, timeout,
rps_client);
if (timeout < 0)
break;
dma_fence_put(shared[i]);
}
for (; i < count; i++)
dma_fence_put(shared[i]);
kfree(shared);
/*
* If both shared fences and an exclusive fence exist,
* then by construction the shared fences must be later
* than the exclusive fence. If we successfully wait for
* all the shared fences, we know that the exclusive fence
* must all be signaled. If all the shared fences are
* signaled, we can prune the array and recover the
* floating references on the fences/requests.
*/
prune_fences = count && timeout >= 0;
} else {
excl = reservation_object_get_excl_rcu(resv);
}
if (excl && timeout >= 0)
timeout = i915_gem_object_wait_fence(excl, flags, timeout,
rps_client);
dma_fence_put(excl);
/*
* Opportunistically prune the fences iff we know they have *all* been
drm/i915: Avoiding recursing on ww_mutex inside shrinker We have to avoid taking ww_mutex inside the shrinker as we use it as a plain mutex type and so need to avoid recursive deadlocks: [ 602.771969] ================================= [ 602.771970] [ INFO: inconsistent lock state ] [ 602.771973] 4.10.0gpudebug+ #122 Not tainted [ 602.771974] --------------------------------- [ 602.771975] inconsistent {RECLAIM_FS-ON-W} -> {IN-RECLAIM_FS-W} usage. [ 602.771978] kswapd0/40 [HC0[0]:SC0[0]:HE1:SE1] takes: [ 602.771979] (reservation_ww_class_mutex){+.+.?.}, at: [<ffffffffa054680a>] i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772020] {RECLAIM_FS-ON-W} state was registered at: [ 602.772024] mark_held_locks+0x76/0x90 [ 602.772026] lockdep_trace_alloc+0xb8/0xc0 [ 602.772028] __kmalloc_track_caller+0x5d/0x130 [ 602.772031] krealloc+0x89/0xb0 [ 602.772033] reservation_object_reserve_shared+0xaf/0xd0 [ 602.772055] i915_gem_do_execbuffer.isra.35+0x1413/0x18b0 [i915] [ 602.772075] i915_gem_execbuffer2+0x10e/0x1d0 [i915] [ 602.772078] drm_ioctl+0x291/0x480 [ 602.772079] do_vfs_ioctl+0x695/0x6f0 [ 602.772081] SyS_ioctl+0x3c/0x70 [ 602.772084] entry_SYSCALL_64_fastpath+0x18/0xad [ 602.772085] irq event stamp: 5197423 [ 602.772088] hardirqs last enabled at (5197423): [<ffffffff8116751d>] kfree+0xdd/0x170 [ 602.772091] hardirqs last disabled at (5197422): [<ffffffff811674f9>] kfree+0xb9/0x170 [ 602.772095] softirqs last enabled at (5190992): [<ffffffff8107bfe1>] __do_softirq+0x221/0x280 [ 602.772097] softirqs last disabled at (5190575): [<ffffffff8107c294>] irq_exit+0x64/0xc0 [ 602.772099] other info that might help us debug this: [ 602.772100] Possible unsafe locking scenario: [ 602.772101] CPU0 [ 602.772101] ---- [ 602.772102] lock(reservation_ww_class_mutex); [ 602.772104] <Interrupt> [ 602.772105] lock(reservation_ww_class_mutex); [ 602.772107] *** DEADLOCK *** [ 602.772109] 2 locks held by kswapd0/40: [ 602.772110] #0: (shrinker_rwsem){++++..}, at: [<ffffffff811337b5>] shrink_slab.constprop.62+0x35/0x280 [ 602.772116] #1: (&dev->struct_mutex){+.+.+.}, at: [<ffffffffa0553957>] i915_gem_shrinker_lock+0x27/0x60 [i915] [ 602.772141] stack backtrace: [ 602.772144] CPU: 2 PID: 40 Comm: kswapd0 Not tainted 4.10.0gpudebug+ #122 [ 602.772145] Hardware name: LENOVO 42433ZG/42433ZG, BIOS 8AET64WW (1.44 ) 07/26/2013 [ 602.772147] Call Trace: [ 602.772151] dump_stack+0x68/0xa1 [ 602.772153] print_usage_bug+0x1d4/0x1f0 [ 602.772155] mark_lock+0x390/0x530 [ 602.772157] ? print_irq_inversion_bug+0x200/0x200 [ 602.772159] __lock_acquire+0x405/0x1260 [ 602.772181] ? i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772183] lock_acquire+0x60/0x80 [ 602.772205] ? i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772207] mutex_lock_nested+0x69/0x760 [ 602.772229] ? i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772231] ? kfree+0xdd/0x170 [ 602.772253] ? i915_gem_object_wait+0x163/0x410 [i915] [ 602.772255] ? trace_hardirqs_on_caller+0x18d/0x1c0 [ 602.772256] ? trace_hardirqs_on+0xd/0x10 [ 602.772278] i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772300] i915_gem_object_unbind+0x5e/0x130 [i915] [ 602.772323] i915_gem_shrink+0x22d/0x3d0 [i915] [ 602.772347] i915_gem_shrinker_scan+0x3f/0x80 [i915] [ 602.772349] shrink_slab.constprop.62+0x1ad/0x280 [ 602.772352] shrink_node+0x52/0x80 [ 602.772355] kswapd+0x427/0x5c0 [ 602.772358] kthread+0x122/0x130 [ 602.772360] ? try_to_free_pages+0x270/0x270 [ 602.772362] ? kthread_stop+0x70/0x70 [ 602.772365] ret_from_fork+0x2e/0x40 v2: Add commentary about the pruning being opportunistic Reported-by: Jan Nordholz <jckn@gmx.net> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99977#c10 Fixes: e54ca9774777 ("drm/i915: Remove completed fences after a wait") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170308132629.7987-1-chris@chris-wilson.co.uk
2017-03-08 21:26:28 +08:00
* signaled and that the reservation object has not been changed (i.e.
* no new fences have been added).
*/
if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
drm/i915: Avoiding recursing on ww_mutex inside shrinker We have to avoid taking ww_mutex inside the shrinker as we use it as a plain mutex type and so need to avoid recursive deadlocks: [ 602.771969] ================================= [ 602.771970] [ INFO: inconsistent lock state ] [ 602.771973] 4.10.0gpudebug+ #122 Not tainted [ 602.771974] --------------------------------- [ 602.771975] inconsistent {RECLAIM_FS-ON-W} -> {IN-RECLAIM_FS-W} usage. [ 602.771978] kswapd0/40 [HC0[0]:SC0[0]:HE1:SE1] takes: [ 602.771979] (reservation_ww_class_mutex){+.+.?.}, at: [<ffffffffa054680a>] i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772020] {RECLAIM_FS-ON-W} state was registered at: [ 602.772024] mark_held_locks+0x76/0x90 [ 602.772026] lockdep_trace_alloc+0xb8/0xc0 [ 602.772028] __kmalloc_track_caller+0x5d/0x130 [ 602.772031] krealloc+0x89/0xb0 [ 602.772033] reservation_object_reserve_shared+0xaf/0xd0 [ 602.772055] i915_gem_do_execbuffer.isra.35+0x1413/0x18b0 [i915] [ 602.772075] i915_gem_execbuffer2+0x10e/0x1d0 [i915] [ 602.772078] drm_ioctl+0x291/0x480 [ 602.772079] do_vfs_ioctl+0x695/0x6f0 [ 602.772081] SyS_ioctl+0x3c/0x70 [ 602.772084] entry_SYSCALL_64_fastpath+0x18/0xad [ 602.772085] irq event stamp: 5197423 [ 602.772088] hardirqs last enabled at (5197423): [<ffffffff8116751d>] kfree+0xdd/0x170 [ 602.772091] hardirqs last disabled at (5197422): [<ffffffff811674f9>] kfree+0xb9/0x170 [ 602.772095] softirqs last enabled at (5190992): [<ffffffff8107bfe1>] __do_softirq+0x221/0x280 [ 602.772097] softirqs last disabled at (5190575): [<ffffffff8107c294>] irq_exit+0x64/0xc0 [ 602.772099] other info that might help us debug this: [ 602.772100] Possible unsafe locking scenario: [ 602.772101] CPU0 [ 602.772101] ---- [ 602.772102] lock(reservation_ww_class_mutex); [ 602.772104] <Interrupt> [ 602.772105] lock(reservation_ww_class_mutex); [ 602.772107] *** DEADLOCK *** [ 602.772109] 2 locks held by kswapd0/40: [ 602.772110] #0: (shrinker_rwsem){++++..}, at: [<ffffffff811337b5>] shrink_slab.constprop.62+0x35/0x280 [ 602.772116] #1: (&dev->struct_mutex){+.+.+.}, at: [<ffffffffa0553957>] i915_gem_shrinker_lock+0x27/0x60 [i915] [ 602.772141] stack backtrace: [ 602.772144] CPU: 2 PID: 40 Comm: kswapd0 Not tainted 4.10.0gpudebug+ #122 [ 602.772145] Hardware name: LENOVO 42433ZG/42433ZG, BIOS 8AET64WW (1.44 ) 07/26/2013 [ 602.772147] Call Trace: [ 602.772151] dump_stack+0x68/0xa1 [ 602.772153] print_usage_bug+0x1d4/0x1f0 [ 602.772155] mark_lock+0x390/0x530 [ 602.772157] ? print_irq_inversion_bug+0x200/0x200 [ 602.772159] __lock_acquire+0x405/0x1260 [ 602.772181] ? i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772183] lock_acquire+0x60/0x80 [ 602.772205] ? i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772207] mutex_lock_nested+0x69/0x760 [ 602.772229] ? i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772231] ? kfree+0xdd/0x170 [ 602.772253] ? i915_gem_object_wait+0x163/0x410 [i915] [ 602.772255] ? trace_hardirqs_on_caller+0x18d/0x1c0 [ 602.772256] ? trace_hardirqs_on+0xd/0x10 [ 602.772278] i915_gem_object_wait+0x39a/0x410 [i915] [ 602.772300] i915_gem_object_unbind+0x5e/0x130 [i915] [ 602.772323] i915_gem_shrink+0x22d/0x3d0 [i915] [ 602.772347] i915_gem_shrinker_scan+0x3f/0x80 [i915] [ 602.772349] shrink_slab.constprop.62+0x1ad/0x280 [ 602.772352] shrink_node+0x52/0x80 [ 602.772355] kswapd+0x427/0x5c0 [ 602.772358] kthread+0x122/0x130 [ 602.772360] ? try_to_free_pages+0x270/0x270 [ 602.772362] ? kthread_stop+0x70/0x70 [ 602.772365] ret_from_fork+0x2e/0x40 v2: Add commentary about the pruning being opportunistic Reported-by: Jan Nordholz <jckn@gmx.net> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99977#c10 Fixes: e54ca9774777 ("drm/i915: Remove completed fences after a wait") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170308132629.7987-1-chris@chris-wilson.co.uk
2017-03-08 21:26:28 +08:00
if (reservation_object_trylock(resv)) {
if (!__read_seqcount_retry(&resv->seq, seq))
reservation_object_add_excl_fence(resv, NULL);
reservation_object_unlock(resv);
}
}
return timeout;
}
static void __fence_set_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr)
{
struct i915_request *rq;
struct intel_engine_cs *engine;
drm/i915: Don't adjust priority on an already signaled fence When we retire a signaled fence, we free the dependency tree. However, we skip clearing the list so that if we then try to adjust the priority of the signaled fence, we may walk the list of freed dependencies. [ 3083.156757] ================================================================== [ 3083.156806] BUG: KASAN: use-after-free in execlists_schedule+0x199/0x660 [i915] [ 3083.156810] Read of size 8 at addr ffff8806bf20f400 by task Xorg/831 [ 3083.156815] CPU: 0 PID: 831 Comm: Xorg Not tainted 4.15.0-rc6-no-psn+ #1 [ 3083.156817] Hardware name: Notebook N24_25BU/N24_25BU, BIOS 5.12 02/17/2017 [ 3083.156818] Call Trace: [ 3083.156823] dump_stack+0x5c/0x7a [ 3083.156827] print_address_description+0x6b/0x290 [ 3083.156830] kasan_report+0x28f/0x380 [ 3083.156872] ? execlists_schedule+0x199/0x660 [i915] [ 3083.156914] execlists_schedule+0x199/0x660 [i915] [ 3083.156956] ? intel_crtc_atomic_check+0x146/0x4e0 [i915] [ 3083.156997] ? execlists_submit_request+0xe0/0xe0 [i915] [ 3083.157038] ? i915_vma_misplaced.part.4+0x25/0xb0 [i915] [ 3083.157079] ? __i915_vma_do_pin+0x7c8/0xc80 [i915] [ 3083.157121] ? intel_atomic_state_alloc+0x44/0x60 [i915] [ 3083.157130] ? drm_atomic_helper_page_flip+0x3e/0xb0 [drm_kms_helper] [ 3083.157145] ? drm_mode_page_flip_ioctl+0x7d2/0x850 [drm] [ 3083.157159] ? drm_ioctl_kernel+0xa7/0xf0 [drm] [ 3083.157172] ? drm_ioctl+0x45b/0x560 [drm] [ 3083.157211] i915_gem_object_wait_priority+0x14c/0x2c0 [i915] [ 3083.157251] ? i915_gem_get_aperture_ioctl+0x150/0x150 [i915] [ 3083.157290] ? i915_vma_pin_fence+0x1d8/0x320 [i915] [ 3083.157331] ? intel_pin_and_fence_fb_obj+0x175/0x250 [i915] [ 3083.157372] ? intel_rotation_info_size+0x60/0x60 [i915] [ 3083.157413] ? intel_link_compute_m_n+0x80/0x80 [i915] [ 3083.157428] ? drm_dev_printk+0x1b0/0x1b0 [drm] [ 3083.157443] ? drm_dev_printk+0x1b0/0x1b0 [drm] [ 3083.157485] intel_prepare_plane_fb+0x2f8/0x5a0 [i915] [ 3083.157527] ? intel_crtc_get_vblank_counter+0x80/0x80 [i915] [ 3083.157536] drm_atomic_helper_prepare_planes+0xa0/0x1c0 [drm_kms_helper] [ 3083.157587] intel_atomic_commit+0x12e/0x4e0 [i915] [ 3083.157605] drm_atomic_helper_page_flip+0xa2/0xb0 [drm_kms_helper] [ 3083.157621] drm_mode_page_flip_ioctl+0x7d2/0x850 [drm] [ 3083.157638] ? drm_mode_cursor2_ioctl+0x10/0x10 [drm] [ 3083.157652] ? drm_lease_owner+0x1a/0x30 [drm] [ 3083.157668] ? drm_mode_cursor2_ioctl+0x10/0x10 [drm] [ 3083.157681] drm_ioctl_kernel+0xa7/0xf0 [drm] [ 3083.157696] drm_ioctl+0x45b/0x560 [drm] [ 3083.157711] ? drm_mode_cursor2_ioctl+0x10/0x10 [drm] [ 3083.157725] ? drm_getstats+0x20/0x20 [drm] [ 3083.157729] ? timerqueue_del+0x49/0x80 [ 3083.157732] ? __remove_hrtimer+0x62/0xb0 [ 3083.157735] ? hrtimer_try_to_cancel+0x173/0x210 [ 3083.157738] do_vfs_ioctl+0x13b/0x880 [ 3083.157741] ? ioctl_preallocate+0x140/0x140 [ 3083.157744] ? _raw_spin_unlock_irq+0xe/0x30 [ 3083.157746] ? do_setitimer+0x234/0x370 [ 3083.157750] ? SyS_setitimer+0x19e/0x1b0 [ 3083.157752] ? SyS_alarm+0x140/0x140 [ 3083.157755] ? __rcu_read_unlock+0x66/0x80 [ 3083.157757] ? __fget+0xc4/0x100 [ 3083.157760] SyS_ioctl+0x74/0x80 [ 3083.157763] entry_SYSCALL_64_fastpath+0x1a/0x7d [ 3083.157765] RIP: 0033:0x7f6135d0c6a7 [ 3083.157767] RSP: 002b:00007fff01451888 EFLAGS: 00003246 ORIG_RAX: 0000000000000010 [ 3083.157769] RAX: ffffffffffffffda RBX: 0000000000000004 RCX: 00007f6135d0c6a7 [ 3083.157771] RDX: 00007fff01451950 RSI: 00000000c01864b0 RDI: 000000000000000c [ 3083.157772] RBP: 00007f613076f600 R08: 0000000000000001 R09: 0000000000000000 [ 3083.157773] R10: 0000000000000060 R11: 0000000000003246 R12: 0000000000000000 [ 3083.157774] R13: 0000000000000060 R14: 000000000000001b R15: 0000000000000060 [ 3083.157779] Allocated by task 831: [ 3083.157783] kmem_cache_alloc+0xc0/0x200 [ 3083.157822] i915_gem_request_await_dma_fence+0x2c4/0x5d0 [i915] [ 3083.157861] i915_gem_request_await_object+0x321/0x370 [i915] [ 3083.157900] i915_gem_do_execbuffer+0x1165/0x19c0 [i915] [ 3083.157937] i915_gem_execbuffer2+0x1ad/0x550 [i915] [ 3083.157950] drm_ioctl_kernel+0xa7/0xf0 [drm] [ 3083.157962] drm_ioctl+0x45b/0x560 [drm] [ 3083.157964] do_vfs_ioctl+0x13b/0x880 [ 3083.157966] SyS_ioctl+0x74/0x80 [ 3083.157968] entry_SYSCALL_64_fastpath+0x1a/0x7d [ 3083.157971] Freed by task 831: [ 3083.157973] kmem_cache_free+0x77/0x220 [ 3083.158012] i915_gem_request_retire+0x72c/0xa70 [i915] [ 3083.158051] i915_gem_request_alloc+0x1e9/0x8b0 [i915] [ 3083.158089] i915_gem_do_execbuffer+0xa96/0x19c0 [i915] [ 3083.158127] i915_gem_execbuffer2+0x1ad/0x550 [i915] [ 3083.158140] drm_ioctl_kernel+0xa7/0xf0 [drm] [ 3083.158153] drm_ioctl+0x45b/0x560 [drm] [ 3083.158155] do_vfs_ioctl+0x13b/0x880 [ 3083.158156] SyS_ioctl+0x74/0x80 [ 3083.158158] entry_SYSCALL_64_fastpath+0x1a/0x7d [ 3083.158162] The buggy address belongs to the object at ffff8806bf20f400 which belongs to the cache i915_dependency of size 64 [ 3083.158166] The buggy address is located 0 bytes inside of 64-byte region [ffff8806bf20f400, ffff8806bf20f440) [ 3083.158168] The buggy address belongs to the page: [ 3083.158171] page:00000000d43decc4 count:1 mapcount:0 mapping: (null) index:0x0 [ 3083.158174] flags: 0x17ffe0000000100(slab) [ 3083.158179] raw: 017ffe0000000100 0000000000000000 0000000000000000 0000000180200020 [ 3083.158182] raw: ffffea001afc16c0 0000000500000005 ffff880731b881c0 0000000000000000 [ 3083.158184] page dumped because: kasan: bad access detected [ 3083.158187] Memory state around the buggy address: [ 3083.158190] ffff8806bf20f300: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 3083.158192] ffff8806bf20f380: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 3083.158195] >ffff8806bf20f400: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 3083.158196] ^ [ 3083.158199] ffff8806bf20f480: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 3083.158201] ffff8806bf20f500: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 3083.158203] ================================================================== Reported-by: Alexandru Chirvasitu <achirvasub@gmail.com> Reported-by: Mike Keehan <mike@keehan.net> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104436 Fixes: 1f181225f8ec ("drm/i915/execlists: Keep request->priority for its lifetime") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Alexandru Chirvasitu <achirvasub@gmail.com> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Tested-by: Alexandru Chirvasitu <achirvasub@gmail.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180106105618.13532-1-chris@chris-wilson.co.uk
2018-01-06 18:56:18 +08:00
if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
return;
rq = to_request(fence);
engine = rq->engine;
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
engine->schedule(rq, attr);
rcu_read_unlock();
local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
static void fence_set_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr)
{
/* Recurse once into a fence-array */
if (dma_fence_is_array(fence)) {
struct dma_fence_array *array = to_dma_fence_array(fence);
int i;
for (i = 0; i < array->num_fences; i++)
__fence_set_priority(array->fences[i], attr);
} else {
__fence_set_priority(fence, attr);
}
}
int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr)
{
struct dma_fence *excl;
if (flags & I915_WAIT_ALL) {
struct dma_fence **shared;
unsigned int count, i;
int ret;
ret = reservation_object_get_fences_rcu(obj->resv,
&excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
fence_set_priority(shared[i], attr);
dma_fence_put(shared[i]);
}
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(obj->resv);
}
if (excl) {
fence_set_priority(excl, attr);
dma_fence_put(excl);
}
return 0;
}
/**
* Waits for rendering to the object to be completed
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
* @rps_client: client (user process) to charge for any waitboosting
*/
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
struct intel_rps_client *rps_client)
{
might_sleep();
GEM_BUG_ON(timeout < 0);
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
timeout = i915_gem_object_wait_reservation(obj->resv,
flags, timeout,
rps_client);
return timeout < 0 ? timeout : 0;
}
static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
struct drm_i915_file_private *fpriv = file->driver_priv;
return &fpriv->rps_client;
}
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(to_i915(obj->base.dev));
intel_fb_obj_flush(obj, ORIGIN_CPU);
return 0;
}
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
{
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
kmem_cache_free(dev_priv->objects, obj);
}
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
u64 size,
u32 *handle_p)
{
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
size = roundup(size, PAGE_SIZE);
if (size == 0)
return -EINVAL;
/* Allocate the new object */
obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
i915_gem_object_put(obj);
if (ret)
return ret;
*handle_p = handle;
return 0;
}
int
i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
/* have to work out size/pitch and return them */
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, to_i915(dev),
args->size, &args->handle);
}
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
obj->cache_level == I915_CACHE_WT);
}
/**
* Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_create *args = data;
i915_gem_flush_free_objects(dev_priv);
return i915_gem_create(file, dev_priv,
args->size, &args->handle);
}
static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
return (domain == I915_GEM_DOMAIN_GTT ?
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref;
/*
* No actual flushing is required for the GTT write domain for reads
* from the GTT domain. Writes to it "immediately" go to main memory
* as far as we know, so there's no chipset flush. It also doesn't
* land in the GPU render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
* the GATT itself.
*
* We also have to wait a bit for the writes to land from the GTT.
* An uncached read (i.e. mmio) seems to be ideal for the round-trip
* timing. This issue has only been observed when switching quickly
* between GTT writes and CPU reads from inside the kernel on recent hw,
* and it appears to only affect discrete GTT blocks (i.e. on LLC
* system agents we cannot reproduce this behaviour, until Cannonlake
* that was!).
*/
wmb();
if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
return;
i915_gem_chipset_flush(dev_priv);
with_intel_runtime_pm(dev_priv, wakeref) {
spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
spin_unlock_irq(&dev_priv->uncore.lock);
}
}
static void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
if (!(obj->write_domain & flush_domains))
return;
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
i915_gem_flush_ggtt_writes(dev_priv);
intel_fb_obj_flush(obj,
fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
for_each_ggtt_vma(vma, obj) {
if (vma->iomap)
continue;
i915_vma_unset_ggtt_write(vma);
}
break;
case I915_GEM_DOMAIN_WC:
wmb();
break;
case I915_GEM_DOMAIN_CPU:
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
case I915_GEM_DOMAIN_RENDER:
if (gpu_write_needs_clflush(obj))
obj->cache_dirty = true;
break;
}
obj->write_domain = 0;
}
/*
* Pins the specified object's pages and synchronizes the object with
* GPU accesses. Sets needs_clflush to non-zero if the caller should
* flush the object from the CPU cache.
*/
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush)
{
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
*needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT,
NULL);
drm/i915: Wait on external rendering for GEM objects When transitioning to the GTT or CPU domain we wait on all rendering from i915 to complete (with the optimisation of allowing concurrent read access by both the GPU and client). We don't yet ensure all rendering from third parties (tracked by implicit fences on the dma-buf) is complete. Since implicitly tracked rendering by third parties will ignore our cache-domain tracking, we have to always wait upon rendering from third-parties when transitioning to direct access to the backing store. We still rely on clients notifying us of cache domain changes (i.e. they need to move to the GTT read or write domain after doing a CPU access before letting the third party render again). v2: This introduces a potential WARN_ON into i915_gem_object_free() as the current i915_vma_unbind() calls i915_gem_object_wait_rendering(). To hit this path we first need to render with the GPU, have a dma-buf attached with an unsignaled fence and then interrupt the wait. It does get fixed later in the series (when i915_vma_unbind() only waits on the active VMA and not all, including third-party, rendering. To offset that risk, use the __i915_vma_unbind_no_wait hack. Testcase: igt/prime_vgem/basic-fence-read Testcase: igt/prime_vgem/basic-fence-mmap Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1469002875-2335-8-git-send-email-chris@chris-wilson.co.uk
2016-07-20 16:21:15 +08:00
if (ret)
return ret;
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
goto err_unpin;
else
goto out;
}
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens.
*/
if (!obj->cache_dirty &&
!(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = CLFLUSH_BEFORE;
out:
/* return with the pages pinned */
return 0;
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
}
int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush)
{
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
*needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT,
NULL);
if (ret)
return ret;
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
goto err_unpin;
else
goto out;
}
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu write domain, set ourself into the
* gtt write domain and manually flush cachelines (as required).
* This optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway.
*/
if (!obj->cache_dirty) {
*needs_clflush |= CLFLUSH_AFTER;
/*
* Same trick applies to invalidate partially written
* cachelines read before writing.
*/
if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE;
}
out:
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
}
static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
{
char *vaddr;
int ret;
vaddr = kmap(page);
if (needs_clflush)
drm_clflush_virt_range(vaddr + offset, len);
ret = __copy_to_user(user_data, vaddr + offset, len);
kunmap(page);
return ret ? -EFAULT : 0;
}
static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args)
{
char __user *user_data;
u64 remain;
unsigned int needs_clflush;
unsigned int idx, offset;
int ret;
ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
if (ret)
return ret;
ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
mutex_unlock(&obj->base.dev->struct_mutex);
if (ret)
return ret;
remain = args->size;
user_data = u64_to_user_ptr(args->data_ptr);
offset = offset_in_page(args->offset);
for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
struct page *page = i915_gem_object_get_page(obj, idx);
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pread(page, offset, length, user_data,
needs_clflush);
if (ret)
break;
remain -= length;
user_data += length;
offset = 0;
}
i915_gem_obj_finish_shmem_access(obj);
return ret;
}
static inline bool
gtt_user_read(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
{
void __iomem *vaddr;
unsigned long unwritten;
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
/* We can use the cpu mem copy function because this is X86. */
vaddr = io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_to_user_inatomic(user_data,
(void __force *)vaddr + offset,
length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_to_user(user_data,
(void __force *)vaddr + offset,
length);
io_mapping_unmap(vaddr);
}
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
return unwritten;
}
static int
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *args)
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
struct drm_mm_node node;
struct i915_vma *vma;
void __user *user_data;
u64 remain, offset;
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
wakeref = intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
ret = i915_vma_put_fence(vma);
if (ret) {
i915_vma_unpin(vma);
vma = ERR_PTR(ret);
}
}
if (IS_ERR(vma)) {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
if (ret)
goto out_unlock;
GEM_BUG_ON(!node.allocated);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
}
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
goto out_unpin;
mutex_unlock(&i915->drm.struct_mutex);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
offset = args->offset;
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
while (remain > 0) {
/* Operation in this page
*
* page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
u32 page_base = node.start;
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb();
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
wmb();
} else {
page_base += offset & PAGE_MASK;
}
if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
ret = -EFAULT;
break;
}
remain -= page_length;
user_data += page_length;
offset += page_length;
}
mutex_lock(&i915->drm.struct_mutex);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
out_unpin:
if (node.allocated) {
wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
}
out_unlock:
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
/**
* Reads data from the object referenced by handle.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
*
* On error, the contents of *data are undefined.
*/
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
int ret;
drm/i915: Do not hold mutex when faulting in user addresses Linus Torvalds found that it was rather trivial to trigger a system freeze: In fact, with lockdep, I don't even need to do the sysrq-d thing: it shows the bug as it happens. It's the X server taking the same lock recursively. Here's the problem: ============================================= [ INFO: possible recursive locking detected ] 2.6.37-rc2-00012-gbdbd01a #7 --------------------------------------------- Xorg/2816 is trying to acquire lock: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c626c>] i915_gem_fault+0x50/0x17e but task is already holding lock: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c403b>] i915_mutex_lock_interruptible+0x28/0x4a other info that might help us debug this: 2 locks held by Xorg/2816: #0: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c403b>] i915_mutex_lock_interruptible+0x28/0x4a #1: (&mm->mmap_sem){++++++}, at: [<ffffffff81022d4f>] page_fault+0x156/0x37b This recursion was introduced by rearranging the locking to avoid the double locking on the fast path (4f27b5d and fbd5a26d) and the introduction of the prefault to encourage the fast paths (b5e4f2b). In order to undo the problem, we rearrange the code to perform the access validation upfront, attempt to prefault and then fight for control of the mutex. the best case scenario where the mutex is uncontended the prefaulting is not wasted. Reported-and-tested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2010-11-17 17:10:42 +08:00
if (args->size == 0)
return 0;
Remove 'type' argument from access_ok() function Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument of the user address range verification function since we got rid of the old racy i386-only code to walk page tables by hand. It existed because the original 80386 would not honor the write protect bit when in kernel mode, so you had to do COW by hand before doing any user access. But we haven't supported that in a long time, and these days the 'type' argument is a purely historical artifact. A discussion about extending 'user_access_begin()' to do the range checking resulted this patch, because there is no way we're going to move the old VERIFY_xyz interface to that model. And it's best done at the end of the merge window when I've done most of my merges, so let's just get this done once and for all. This patch was mostly done with a sed-script, with manual fix-ups for the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form. There were a couple of notable cases: - csky still had the old "verify_area()" name as an alias. - the iter_iov code had magical hardcoded knowledge of the actual values of VERIFY_{READ,WRITE} (not that they mattered, since nothing really used it) - microblaze used the type argument for a debug printout but other than those oddities this should be a total no-op patch. I tried to fix up all architectures, did fairly extensive grepping for access_ok() uses, and the changes are trivial, but I may have missed something. Any missed conversion should be trivially fixable, though. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 10:57:57 +08:00
if (!access_ok(u64_to_user_ptr(args->data_ptr),
drm/i915: Do not hold mutex when faulting in user addresses Linus Torvalds found that it was rather trivial to trigger a system freeze: In fact, with lockdep, I don't even need to do the sysrq-d thing: it shows the bug as it happens. It's the X server taking the same lock recursively. Here's the problem: ============================================= [ INFO: possible recursive locking detected ] 2.6.37-rc2-00012-gbdbd01a #7 --------------------------------------------- Xorg/2816 is trying to acquire lock: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c626c>] i915_gem_fault+0x50/0x17e but task is already holding lock: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c403b>] i915_mutex_lock_interruptible+0x28/0x4a other info that might help us debug this: 2 locks held by Xorg/2816: #0: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c403b>] i915_mutex_lock_interruptible+0x28/0x4a #1: (&mm->mmap_sem){++++++}, at: [<ffffffff81022d4f>] page_fault+0x156/0x37b This recursion was introduced by rearranging the locking to avoid the double locking on the fast path (4f27b5d and fbd5a26d) and the introduction of the prefault to encourage the fast paths (b5e4f2b). In order to undo the problem, we rearrange the code to perform the access validation upfront, attempt to prefault and then fight for control of the mutex. the best case scenario where the mutex is uncontended the prefaulting is not wasted. Reported-and-tested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2010-11-17 17:10:42 +08:00
args->size))
return -EFAULT;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* Bounds check source. */
if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ret = -EINVAL;
goto out;
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
if (ret)
goto out;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto out;
ret = i915_gem_shmem_pread(obj, args);
if (ret == -EFAULT || ret == -ENODEV)
ret = i915_gem_gtt_pread(obj, args);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
return ret;
}
/* This is the fast write path which cannot handle
* page faults in the source data
*/
static inline bool
ggtt_write(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
vaddr = io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_from_user((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap(vaddr);
}
return unwritten;
}
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
* @obj: i915 GEM object
* @args: pwrite arguments structure
*/
static int
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
u64 remain, offset;
void __user *user_data;
int ret;
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
* waking/resuming is very slow (worst-case 10-100 ms
* depending on PCI sleeps and our own resume time).
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
wakeref = intel_runtime_pm_get_if_in_use(i915);
if (!wakeref) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
wakeref = intel_runtime_pm_get(i915);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
ret = i915_vma_put_fence(vma);
if (ret) {
i915_vma_unpin(vma);
vma = ERR_PTR(ret);
}
}
if (IS_ERR(vma)) {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
mutex_unlock(&i915->drm.struct_mutex);
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
remain = args->size;
while (remain) {
/* Operation in this page
*
* page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
u32 page_base = node.start;
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
wmb(); /* flush modifications to the GGTT (insert_page) */
} else {
page_base += offset & PAGE_MASK;
}
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
}
remain -= page_length;
user_data += page_length;
offset += page_length;
}
intel_fb_obj_flush(obj, ORIGIN_CPU);
mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
out_rpm:
intel_runtime_pm_put(i915, wakeref);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
/* Per-page copy function for the shmem pwrite fastpath.
* Flushes invalid cachelines before writing to the target if
* needs_clflush_before is set and flushes out any written cachelines after
* writing if needs_clflush is set.
*/
static int
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush_before,
bool needs_clflush_after)
{
char *vaddr;
int ret;
vaddr = kmap(page);
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + offset, len);
ret = __copy_from_user(vaddr + offset, user_data, len);
if (!ret && needs_clflush_after)
drm_clflush_virt_range(vaddr + offset, len);
kunmap(page);
return ret ? -EFAULT : 0;
}
static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
void __user *user_data;
u64 remain;
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
unsigned int offset, idx;
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
mutex_unlock(&i915->drm.struct_mutex);
if (ret)
return ret;
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch.
*/
partial_cacheline_write = 0;
if (needs_clflush & CLFLUSH_BEFORE)
partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
offset = offset_in_page(args->offset);
for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
struct page *page = i915_gem_object_get_page(obj, idx);
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pwrite(page, offset, length, user_data,
(offset | length) & partial_cacheline_write,
needs_clflush & CLFLUSH_AFTER);
if (ret)
break;
remain -= length;
user_data += length;
offset = 0;
drm/i915: rewrite shmem_pwrite_slow to use copy_from_user ... instead of get_user_pages, because that fails on non page-backed user addresses like e.g. a gtt mapping of a bo. To get there essentially copy the vfs read path into pagecache. We can't call that right away because we have to take care of bit17 swizzling. To not deadlock with our own pagefault handler we need to completely drop struct_mutex, reducing the atomicty-guarantees of our userspace abi. Implications for racing with other gem ioctl: - execbuf, pwrite, pread: Due to -EFAULT fallback to slow paths there's already the risk of the pwrite call not being atomic, no degration. - read/write access to mmaps: already fully racy, no degration. - set_tiling: Calling set_tiling while reading/writing is already pretty much undefined, now it just got a bit worse. set_tiling is only called by libdrm on unused/new bos, so no problem. - set_domain: When changing to the gtt domain while copying (without any read/write access, e.g. for synchronization), we might leave unflushed data in the cpu caches. The clflush_object at the end of pwrite_slow takes care of this problem. - truncating of purgeable objects: the shmem_read_mapping_page call could reinstate backing storage for truncated objects. The check at the end of pwrite_slow takes care of this. v2: - add missing intel_gtt_chipset_flush - add __ to copy_from_user_swizzled as suggest by Chris Wilson. v3: Fixup bit17 swizzling, it swizzled the wrong pages. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2011-12-14 20:57:31 +08:00
}
intel_fb_obj_flush(obj, ORIGIN_CPU);
i915_gem_obj_finish_shmem_access(obj);
return ret;
}
/**
* Writes data to the object referenced by handle.
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*
* On error, the contents of the buffer that were to be modified are undefined.
*/
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
drm/i915: Do not hold mutex when faulting in user addresses Linus Torvalds found that it was rather trivial to trigger a system freeze: In fact, with lockdep, I don't even need to do the sysrq-d thing: it shows the bug as it happens. It's the X server taking the same lock recursively. Here's the problem: ============================================= [ INFO: possible recursive locking detected ] 2.6.37-rc2-00012-gbdbd01a #7 --------------------------------------------- Xorg/2816 is trying to acquire lock: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c626c>] i915_gem_fault+0x50/0x17e but task is already holding lock: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c403b>] i915_mutex_lock_interruptible+0x28/0x4a other info that might help us debug this: 2 locks held by Xorg/2816: #0: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c403b>] i915_mutex_lock_interruptible+0x28/0x4a #1: (&mm->mmap_sem){++++++}, at: [<ffffffff81022d4f>] page_fault+0x156/0x37b This recursion was introduced by rearranging the locking to avoid the double locking on the fast path (4f27b5d and fbd5a26d) and the introduction of the prefault to encourage the fast paths (b5e4f2b). In order to undo the problem, we rearrange the code to perform the access validation upfront, attempt to prefault and then fight for control of the mutex. the best case scenario where the mutex is uncontended the prefaulting is not wasted. Reported-and-tested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2010-11-17 17:10:42 +08:00
int ret;
if (args->size == 0)
return 0;
Remove 'type' argument from access_ok() function Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument of the user address range verification function since we got rid of the old racy i386-only code to walk page tables by hand. It existed because the original 80386 would not honor the write protect bit when in kernel mode, so you had to do COW by hand before doing any user access. But we haven't supported that in a long time, and these days the 'type' argument is a purely historical artifact. A discussion about extending 'user_access_begin()' to do the range checking resulted this patch, because there is no way we're going to move the old VERIFY_xyz interface to that model. And it's best done at the end of the merge window when I've done most of my merges, so let's just get this done once and for all. This patch was mostly done with a sed-script, with manual fix-ups for the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form. There were a couple of notable cases: - csky still had the old "verify_area()" name as an alias. - the iter_iov code had magical hardcoded knowledge of the actual values of VERIFY_{READ,WRITE} (not that they mattered, since nothing really used it) - microblaze used the type argument for a debug printout but other than those oddities this should be a total no-op patch. I tried to fix up all architectures, did fairly extensive grepping for access_ok() uses, and the changes are trivial, but I may have missed something. Any missed conversion should be trivially fixable, though. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 10:57:57 +08:00
if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
drm/i915: Do not hold mutex when faulting in user addresses Linus Torvalds found that it was rather trivial to trigger a system freeze: In fact, with lockdep, I don't even need to do the sysrq-d thing: it shows the bug as it happens. It's the X server taking the same lock recursively. Here's the problem: ============================================= [ INFO: possible recursive locking detected ] 2.6.37-rc2-00012-gbdbd01a #7 --------------------------------------------- Xorg/2816 is trying to acquire lock: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c626c>] i915_gem_fault+0x50/0x17e but task is already holding lock: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c403b>] i915_mutex_lock_interruptible+0x28/0x4a other info that might help us debug this: 2 locks held by Xorg/2816: #0: (&dev->struct_mutex){+.+.+.}, at: [<ffffffff812c403b>] i915_mutex_lock_interruptible+0x28/0x4a #1: (&mm->mmap_sem){++++++}, at: [<ffffffff81022d4f>] page_fault+0x156/0x37b This recursion was introduced by rearranging the locking to avoid the double locking on the fast path (4f27b5d and fbd5a26d) and the introduction of the prefault to encourage the fast paths (b5e4f2b). In order to undo the problem, we rearrange the code to perform the access validation upfront, attempt to prefault and then fight for control of the mutex. the best case scenario where the mutex is uncontended the prefaulting is not wasted. Reported-and-tested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2010-11-17 17:10:42 +08:00
return -EFAULT;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* Bounds check destination. */
if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ret = -EINVAL;
goto err;
}
/* Writes not allowed into this read-only object */
if (i915_gem_object_is_readonly(obj)) {
ret = -EINVAL;
goto err;
}
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -ENODEV;
if (obj->ops->pwrite)
ret = obj->ops->pwrite(obj, args);
if (ret != -ENODEV)
goto err;
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
if (ret)
goto err;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
* textures). Fallback to the shmem path in that case.
*/
ret = i915_gem_gtt_pwrite_fast(obj, args);
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
drm/i915: Support for pread/pwrite from/to non shmem backed objects This patch adds support for extending the pread/pwrite functionality for objects not backed by shmem. The access will be made through gtt interface. This will cover objects backed by stolen memory as well as other non-shmem backed objects. v2: Drop locks around slow_user_access, prefault the pages before access (Chris) v3: Rebased to the latest drm-intel-nightly (Ankit) v4: Moved page base & offset calculations outside the copy loop, corrected data types for size and offset variables, corrected if-else braces format (Tvrtko/kerneldocs) v5: Enabled pread/pwrite for all non-shmem backed objects including without tiling restrictions (Ankit) v6: Using pwrite_fast for non-shmem backed objects as well (Chris) v7: Updated commit message, Renamed i915_gem_gtt_read to i915_gem_gtt_copy, added pwrite slow path for non-shmem backed objects (Chris/Tvrtko) v8: Updated v7 commit message, mutex unlock around pwrite slow path for non-shmem backed objects (Tvrtko) v9: Corrected check during pread_ioctl, to avoid shmem_pread being called for non-shmem backed objects (Tvrtko) v10: Moved the write_domain check to needs_clflush and tiling mode check to pwrite_fast (Chris) v11: Use pwrite_fast fallback for all objects (shmem and non-shmem backed), call fast_user_write regardless of pagefault in previous iteration v12: Use page-by-page copy for slow user access too (Chris) v13: Handled EFAULT, Avoid use of WARN_ON, put_fence only if whole obj pinned (Chris) v14: Corrected datatypes/initializations (Tvrtko) Testcase: igt/gem_stolen, igt/gem_pread, igt/gem_pwrite Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1465548783-19712-1-git-send-email-ankitprasad.r.sharma@intel.com
2016-06-10 16:53:03 +08:00
else
ret = i915_gem_shmem_pwrite(obj, args);
}
i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return ret;
}
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct list_head *list;
struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
mutex_lock(&i915->ggtt.vm.mutex);
for_each_ggtt_vma(vma, obj) {
if (!drm_mm_node_allocated(&vma->node))
continue;
drm/i915: Stop tracking MRU activity on VMA Our goal is to remove struct_mutex and replace it with fine grained locking. One of the thorny issues is our eviction logic for reclaiming space for an execbuffer (or GTT mmaping, among a few other examples). While eviction itself is easy to move under a per-VM mutex, performing the activity tracking is less agreeable. One solution is not to do any MRU tracking and do a simple coarse evaluation during eviction of active/inactive, with a loose temporal ordering of last insertion/evaluation. That keeps all the locking constrained to when we are manipulating the VM itself, neatly avoiding the tricky handling of possible recursive locking during execbuf and elsewhere. Note that discarding the MRU (currently implemented as a pair of lists, to avoid scanning the active list for a NONBLOCKING search) is unlikely to impact upon our efficiency to reclaim VM space (where we think a LRU model is best) as our current strategy is to use random idle replacement first before doing a search, and over time the use of softpinned 48b per-ppGTT is growing (thereby eliminating any need to perform any eviction searches, in theory at least) with the remaining users being found on much older devices (gen2-gen6). v2: Changelog and commentary rewritten to elaborate on the duality of a single list being both an inactive and active list. v3: Consolidate bool parameters into a single set of flags; don't comment on the duality of a single variable being a multiplicity of bits. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190128102356.15037-1-chris@chris-wilson.co.uk
2019-01-28 18:23:52 +08:00
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
mutex_unlock(&i915->ggtt.vm.mutex);
spin_lock(&i915->mm.obj_lock);
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
list_move_tail(&obj->mm.link, list);
spin_unlock(&i915->mm.obj_lock);
}
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_domain *args = data;
struct drm_i915_gem_object *obj;
u32 read_domains = args->read_domains;
u32 write_domain = args->write_domain;
int err;
/* Only handle setting domains to types used by the CPU. */
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
return -EINVAL;
/* Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
err = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
(write_domain ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
if (err)
goto out;
drm/i915: Introduce GEM proxy GEM proxy is a kind of GEM, whose backing physical memory is pinned and produced by guest VM and is used by host as read only. With GEM proxy, host is able to access guest physical memory through GEM object interface. As GEM proxy is such a special kind of GEM, a new flag I915_GEM_OBJECT_IS_PROXY is introduced to ban host from changing the backing storage of GEM proxy. v3: - update "Reviewed-by". (Joonas) v2: - return -ENXIO when pin and map pages of GEM proxy to kernel space. (Chris) Here are the histories of this patch in "Dma-buf support for Gvt-g" patch-set: v14: - return -ENXIO when gem proxy object is banned by ioctl. (Chris) (Daniel) v13: - add comments to GEM proxy. (Chris) - don't ban GEM proxy in i915_gem_sw_finish_ioctl. (Chris) - check GEM proxy bar after finishing i915_gem_object_wait. (Chris) - remove GEM proxy bar in i915_gem_madvise_ioctl. v6: - add gem proxy barrier in the following ioctls. (Chris) i915_gem_set_caching_ioctl i915_gem_set_domain_ioctl i915_gem_sw_finish_ioctl i915_gem_set_tiling_ioctl i915_gem_madvise_ioctl Signed-off-by: Tina Zhang <tina.zhang@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/1510555798-21079-2-git-send-email-tina.zhang@intel.com Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171114102513.22269-2-chris@chris-wilson.co.uk
2017-11-14 18:25:13 +08:00
/*
* Proxy objects do not control access to the backing storage, ergo
* they cannot be used as a means to manipulate the cache domain
* tracking for that backing storage. The proxy object is always
* considered to be outside of any cache domain.
*/
if (i915_gem_object_is_proxy(obj)) {
err = -ENXIO;
goto out;
}
/*
* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
* For example, if the obj->filp was moved to swap without us
* being notified and releasing the pages, we would mistakenly
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
err = i915_gem_object_pin_pages(obj);
if (err)
goto out;
err = i915_mutex_lock_interruptible(dev);
if (err)
goto out_unpin;
if (read_domains & I915_GEM_DOMAIN_WC)
err = i915_gem_object_set_to_wc_domain(obj, write_domain);
else if (read_domains & I915_GEM_DOMAIN_GTT)
err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
/* And bump the LRU for this access */
i915_gem_object_bump_inactive_ggtt(obj);
mutex_unlock(&dev->struct_mutex);
if (write_domain != 0)
intel_fb_obj_invalidate(obj,
fb_write_origin(obj, write_domain));
out_unpin:
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
return err;
}
/**
* Called when user space has done writes to this buffer
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_sw_finish *args = data;
struct drm_i915_gem_object *obj;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
drm/i915: Introduce GEM proxy GEM proxy is a kind of GEM, whose backing physical memory is pinned and produced by guest VM and is used by host as read only. With GEM proxy, host is able to access guest physical memory through GEM object interface. As GEM proxy is such a special kind of GEM, a new flag I915_GEM_OBJECT_IS_PROXY is introduced to ban host from changing the backing storage of GEM proxy. v3: - update "Reviewed-by". (Joonas) v2: - return -ENXIO when pin and map pages of GEM proxy to kernel space. (Chris) Here are the histories of this patch in "Dma-buf support for Gvt-g" patch-set: v14: - return -ENXIO when gem proxy object is banned by ioctl. (Chris) (Daniel) v13: - add comments to GEM proxy. (Chris) - don't ban GEM proxy in i915_gem_sw_finish_ioctl. (Chris) - check GEM proxy bar after finishing i915_gem_object_wait. (Chris) - remove GEM proxy bar in i915_gem_madvise_ioctl. v6: - add gem proxy barrier in the following ioctls. (Chris) i915_gem_set_caching_ioctl i915_gem_set_domain_ioctl i915_gem_sw_finish_ioctl i915_gem_set_tiling_ioctl i915_gem_madvise_ioctl Signed-off-by: Tina Zhang <tina.zhang@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/1510555798-21079-2-git-send-email-tina.zhang@intel.com Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171114102513.22269-2-chris@chris-wilson.co.uk
2017-11-14 18:25:13 +08:00
/*
* Proxy objects are barred from CPU access, so there is no
* need to ban sw_finish as it is a nop.
*/
/* Pinned buffers may be scanout, so flush the cache */
i915_gem_object_flush_if_display(obj);
i915_gem_object_put(obj);
return 0;
}
/**
* i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
* it is mapped to.
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
*
* IMPORTANT:
*
* DRM driver writers who look a this function as an example for how to do GEM
* mmap support, please don't implement mmap support like here. The modern way
* to implement DRM mmap support is with an mmap offset ioctl (like
* i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
* That way debug tooling like valgrind will understand what's going on, hiding
* the mmap call in a driver private ioctl will break that. The i915 driver only
* does cpu mmaps this way because we didn't know better.
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_mmap *args = data;
struct drm_i915_gem_object *obj;
unsigned long addr;
drm/i915: Support creation of unbound wc user mappings for objects This patch provides support to create write-combining virtual mappings of GEM object. It intends to provide the same funtionality of 'mmap_gtt' interface without the constraints and contention of a limited aperture space, but requires clients handles the linear to tile conversion on their own. This is for improving the CPU write operation performance, as with such mapping, writes and reads are almost 50% faster than with mmap_gtt. Similar to the GTT mmapping, unlike the regular CPU mmapping, it avoids the cache flush after update from CPU side, when object is passed onto GPU. This type of mapping is specially useful in case of sub-region update, i.e. when only a portion of the object is to be updated. Using a CPU mmap in such cases would normally incur a clflush of the whole object, and using a GTT mmapping would likely require eviction of an active object or fence and thus stall. The write-combining CPU mmap avoids both. To ensure the cache coherency, before using this mapping, the GTT domain has been reused here. This provides the required cache flush if the object is in CPU domain or synchronization against the concurrent rendering. Although the access through an uncached mmap should automatically invalidate the cache lines, this may not be true for non-temporal write instructions and also not all pages of the object may be updated at any given point of time through this mapping. Having a call to get_pages in set_to_gtt_domain function, as added in the earlier patch 'drm/i915: Broaden application of set-domain(GTT)', would guarantee the clflush and so there will be no cachelines holding the data for the object before it is accessed through this map. The drm_i915_gem_mmap structure (for the DRM_I915_GEM_MMAP_IOCTL) has been extended with a new flags field (defaulting to 0 for existent users). In order for userspace to detect the extended ioctl, a new parameter I915_PARAM_MMAP_VERSION has been added for versioning the ioctl interface. v2: Fix error handling, invalid flag detection, renaming (ickle) v3: Rebase to latest drm-intel-nightly codebase The new mmapping is exercised by igt/gem_mmap_wc, igt/gem_concurrent_blit and igt/gem_gtt_speed. Change-Id: Ie883942f9e689525f72fe9a8d3780c3a9faa769a Signed-off-by: Akash Goel <akash.goel@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-02 18:59:30 +08:00
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
drm/i915: Support creation of unbound wc user mappings for objects This patch provides support to create write-combining virtual mappings of GEM object. It intends to provide the same funtionality of 'mmap_gtt' interface without the constraints and contention of a limited aperture space, but requires clients handles the linear to tile conversion on their own. This is for improving the CPU write operation performance, as with such mapping, writes and reads are almost 50% faster than with mmap_gtt. Similar to the GTT mmapping, unlike the regular CPU mmapping, it avoids the cache flush after update from CPU side, when object is passed onto GPU. This type of mapping is specially useful in case of sub-region update, i.e. when only a portion of the object is to be updated. Using a CPU mmap in such cases would normally incur a clflush of the whole object, and using a GTT mmapping would likely require eviction of an active object or fence and thus stall. The write-combining CPU mmap avoids both. To ensure the cache coherency, before using this mapping, the GTT domain has been reused here. This provides the required cache flush if the object is in CPU domain or synchronization against the concurrent rendering. Although the access through an uncached mmap should automatically invalidate the cache lines, this may not be true for non-temporal write instructions and also not all pages of the object may be updated at any given point of time through this mapping. Having a call to get_pages in set_to_gtt_domain function, as added in the earlier patch 'drm/i915: Broaden application of set-domain(GTT)', would guarantee the clflush and so there will be no cachelines holding the data for the object before it is accessed through this map. The drm_i915_gem_mmap structure (for the DRM_I915_GEM_MMAP_IOCTL) has been extended with a new flags field (defaulting to 0 for existent users). In order for userspace to detect the extended ioctl, a new parameter I915_PARAM_MMAP_VERSION has been added for versioning the ioctl interface. v2: Fix error handling, invalid flag detection, renaming (ickle) v3: Rebase to latest drm-intel-nightly codebase The new mmapping is exercised by igt/gem_mmap_wc, igt/gem_concurrent_blit and igt/gem_gtt_speed. Change-Id: Ie883942f9e689525f72fe9a8d3780c3a9faa769a Signed-off-by: Akash Goel <akash.goel@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-02 18:59:30 +08:00
return -ENODEV;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
i915: add dmabuf/prime buffer sharing support. This adds handle->fd and fd->handle support to i915, this is to allow for offloading of rendering in one direction and outputs in the other. v2 from Daniel Vetter: - fixup conflicts with the prepare/finish gtt prep work. - implement ppgtt binding support. Note that we have squat i-g-t testcoverage for any of the lifetime and access rules dma_buf/prime support brings along. And there are quite a few intricate situations here. Also note that the integration with the existing code is a bit hackish, especially around get_gtt_pages and put_gtt_pages. It imo would be easier with the prep code from Chris Wilson's unbound series, but that is for 3.6. Also note that I didn't bother to put the new prepare/finish gtt hooks to good use by moving the dma_buf_map/unmap_attachment calls in there (like we've originally planned for). Last but not least this patch is only compile-tested, but I've changed very little compared to Dave Airlie's version. So there's a decent chance v2 on drm-next works as well as v1 on 3.4-rc. v3: Right when I've hit sent I've noticed that I've screwed up one obj->sg_list (for dmar support) and obj->sg_table (for prime support) disdinction. We should be able to merge these 2 paths, but that's material for another patch. v4: fix the error reporting bugs pointed out by ickle. v5: fix another error, and stop non-gtt mmaps on shared objects stop pread/pwrite on imported objects, add fake kmap Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-10 21:25:09 +08:00
/* prime objects have no backing filp to GEM mmap
* pages from.
*/
if (!obj->base.filp) {
i915_gem_object_put(obj);
return -ENXIO;
i915: add dmabuf/prime buffer sharing support. This adds handle->fd and fd->handle support to i915, this is to allow for offloading of rendering in one direction and outputs in the other. v2 from Daniel Vetter: - fixup conflicts with the prepare/finish gtt prep work. - implement ppgtt binding support. Note that we have squat i-g-t testcoverage for any of the lifetime and access rules dma_buf/prime support brings along. And there are quite a few intricate situations here. Also note that the integration with the existing code is a bit hackish, especially around get_gtt_pages and put_gtt_pages. It imo would be easier with the prep code from Chris Wilson's unbound series, but that is for 3.6. Also note that I didn't bother to put the new prepare/finish gtt hooks to good use by moving the dma_buf_map/unmap_attachment calls in there (like we've originally planned for). Last but not least this patch is only compile-tested, but I've changed very little compared to Dave Airlie's version. So there's a decent chance v2 on drm-next works as well as v1 on 3.4-rc. v3: Right when I've hit sent I've noticed that I've screwed up one obj->sg_list (for dmar support) and obj->sg_table (for prime support) disdinction. We should be able to merge these 2 paths, but that's material for another patch. v4: fix the error reporting bugs pointed out by ickle. v5: fix another error, and stop non-gtt mmaps on shared objects stop pread/pwrite on imported objects, add fake kmap Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-10 21:25:09 +08:00
}
addr = vm_mmap(obj->base.filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
drm/i915: Support creation of unbound wc user mappings for objects This patch provides support to create write-combining virtual mappings of GEM object. It intends to provide the same funtionality of 'mmap_gtt' interface without the constraints and contention of a limited aperture space, but requires clients handles the linear to tile conversion on their own. This is for improving the CPU write operation performance, as with such mapping, writes and reads are almost 50% faster than with mmap_gtt. Similar to the GTT mmapping, unlike the regular CPU mmapping, it avoids the cache flush after update from CPU side, when object is passed onto GPU. This type of mapping is specially useful in case of sub-region update, i.e. when only a portion of the object is to be updated. Using a CPU mmap in such cases would normally incur a clflush of the whole object, and using a GTT mmapping would likely require eviction of an active object or fence and thus stall. The write-combining CPU mmap avoids both. To ensure the cache coherency, before using this mapping, the GTT domain has been reused here. This provides the required cache flush if the object is in CPU domain or synchronization against the concurrent rendering. Although the access through an uncached mmap should automatically invalidate the cache lines, this may not be true for non-temporal write instructions and also not all pages of the object may be updated at any given point of time through this mapping. Having a call to get_pages in set_to_gtt_domain function, as added in the earlier patch 'drm/i915: Broaden application of set-domain(GTT)', would guarantee the clflush and so there will be no cachelines holding the data for the object before it is accessed through this map. The drm_i915_gem_mmap structure (for the DRM_I915_GEM_MMAP_IOCTL) has been extended with a new flags field (defaulting to 0 for existent users). In order for userspace to detect the extended ioctl, a new parameter I915_PARAM_MMAP_VERSION has been added for versioning the ioctl interface. v2: Fix error handling, invalid flag detection, renaming (ickle) v3: Rebase to latest drm-intel-nightly codebase The new mmapping is exercised by igt/gem_mmap_wc, igt/gem_concurrent_blit and igt/gem_gtt_speed. Change-Id: Ie883942f9e689525f72fe9a8d3780c3a9faa769a Signed-off-by: Akash Goel <akash.goel@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-02 18:59:30 +08:00
if (args->flags & I915_MMAP_WC) {
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
i915_gem_object_put(obj);
return -EINTR;
}
drm/i915: Support creation of unbound wc user mappings for objects This patch provides support to create write-combining virtual mappings of GEM object. It intends to provide the same funtionality of 'mmap_gtt' interface without the constraints and contention of a limited aperture space, but requires clients handles the linear to tile conversion on their own. This is for improving the CPU write operation performance, as with such mapping, writes and reads are almost 50% faster than with mmap_gtt. Similar to the GTT mmapping, unlike the regular CPU mmapping, it avoids the cache flush after update from CPU side, when object is passed onto GPU. This type of mapping is specially useful in case of sub-region update, i.e. when only a portion of the object is to be updated. Using a CPU mmap in such cases would normally incur a clflush of the whole object, and using a GTT mmapping would likely require eviction of an active object or fence and thus stall. The write-combining CPU mmap avoids both. To ensure the cache coherency, before using this mapping, the GTT domain has been reused here. This provides the required cache flush if the object is in CPU domain or synchronization against the concurrent rendering. Although the access through an uncached mmap should automatically invalidate the cache lines, this may not be true for non-temporal write instructions and also not all pages of the object may be updated at any given point of time through this mapping. Having a call to get_pages in set_to_gtt_domain function, as added in the earlier patch 'drm/i915: Broaden application of set-domain(GTT)', would guarantee the clflush and so there will be no cachelines holding the data for the object before it is accessed through this map. The drm_i915_gem_mmap structure (for the DRM_I915_GEM_MMAP_IOCTL) has been extended with a new flags field (defaulting to 0 for existent users). In order for userspace to detect the extended ioctl, a new parameter I915_PARAM_MMAP_VERSION has been added for versioning the ioctl interface. v2: Fix error handling, invalid flag detection, renaming (ickle) v3: Rebase to latest drm-intel-nightly codebase The new mmapping is exercised by igt/gem_mmap_wc, igt/gem_concurrent_blit and igt/gem_gtt_speed. Change-Id: Ie883942f9e689525f72fe9a8d3780c3a9faa769a Signed-off-by: Akash Goel <akash.goel@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-02 18:59:30 +08:00
vma = find_vma(mm, addr);
if (vma)
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
addr = -ENOMEM;
up_write(&mm->mmap_sem);
/* This may race, but that's ok, it only gets set */
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
drm/i915: Support creation of unbound wc user mappings for objects This patch provides support to create write-combining virtual mappings of GEM object. It intends to provide the same funtionality of 'mmap_gtt' interface without the constraints and contention of a limited aperture space, but requires clients handles the linear to tile conversion on their own. This is for improving the CPU write operation performance, as with such mapping, writes and reads are almost 50% faster than with mmap_gtt. Similar to the GTT mmapping, unlike the regular CPU mmapping, it avoids the cache flush after update from CPU side, when object is passed onto GPU. This type of mapping is specially useful in case of sub-region update, i.e. when only a portion of the object is to be updated. Using a CPU mmap in such cases would normally incur a clflush of the whole object, and using a GTT mmapping would likely require eviction of an active object or fence and thus stall. The write-combining CPU mmap avoids both. To ensure the cache coherency, before using this mapping, the GTT domain has been reused here. This provides the required cache flush if the object is in CPU domain or synchronization against the concurrent rendering. Although the access through an uncached mmap should automatically invalidate the cache lines, this may not be true for non-temporal write instructions and also not all pages of the object may be updated at any given point of time through this mapping. Having a call to get_pages in set_to_gtt_domain function, as added in the earlier patch 'drm/i915: Broaden application of set-domain(GTT)', would guarantee the clflush and so there will be no cachelines holding the data for the object before it is accessed through this map. The drm_i915_gem_mmap structure (for the DRM_I915_GEM_MMAP_IOCTL) has been extended with a new flags field (defaulting to 0 for existent users). In order for userspace to detect the extended ioctl, a new parameter I915_PARAM_MMAP_VERSION has been added for versioning the ioctl interface. v2: Fix error handling, invalid flag detection, renaming (ickle) v3: Rebase to latest drm-intel-nightly codebase The new mmapping is exercised by igt/gem_mmap_wc, igt/gem_concurrent_blit and igt/gem_gtt_speed. Change-Id: Ie883942f9e689525f72fe9a8d3780c3a9faa769a Signed-off-by: Akash Goel <akash.goel@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-02 18:59:30 +08:00
}
i915_gem_object_put(obj);
if (IS_ERR((void *)addr))
return addr;
args->addr_ptr = (u64)addr;
return 0;
}
static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
}
/**
* i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
*
* A history of the GTT mmap interface:
*
* 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
* aligned and suitable for fencing, and still fit into the available
* mappable space left by the pinned display objects. A classic problem
* we called the page-fault-of-doom where we would ping-pong between
* two objects that could not fit inside the GTT and so the memcpy
* would page one object in at the expense of the other between every
* single byte.
*
* 1 - Objects can be any size, and have any compatible fencing (X Y, or none
* as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
* object is too large for the available space (or simply too large
* for the mappable aperture!), a view is created instead and faulted
* into userspace. (This view is aligned and sized appropriately for
* fenced access.)
*
* 2 - Recognise WC as a separate cache domain so that we can flush the
* delayed writes via GTT before performing direct access via WC.
*
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
* hangs on some architectures, corruption on others. An attempt to service
* a GTT page fault from a snoopable object will generate a SIGBUS.
*
* * the object must be able to fit into RAM (physical memory, though no
* limited to the mappable aperture).
*
*
* Caveats:
*
* * a new GTT page fault will synchronize rendering from the GPU and flush
* all data to system memory. Subsequent access will not be synchronized.
*
* * all mappings are revoked on runtime device suspend.
*
* * there are only 8, 16 or 32 fence registers to share between all users
* (older machines require fence register for display and blitter access
* as well). Contention of the fence registers will cause the previous users
* to be unmapped and any new access will generate new page faults.
*
* * running out of memory while servicing a fault may generate a SIGBUS,
* rather than the expected SIGSEGV.
*/
int i915_gem_mmap_gtt_version(void)
{
return 2;
}
static inline struct i915_ggtt_view
compute_partial_view(const struct drm_i915_gem_object *obj,
pgoff_t page_offset,
unsigned int chunk)
{
struct i915_ggtt_view view;
if (i915_gem_object_is_tiled(obj))
chunk = roundup(chunk, tile_row_pages(obj));
view.type = I915_GGTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
view.partial.size =
min_t(unsigned int, chunk,
(obj->base.size >> PAGE_SHIFT) - view.partial.offset);
/* If the partial covers the entire object, just create a normal VMA. */
if (chunk >= obj->base.size >> PAGE_SHIFT)
view.type = I915_GGTT_VIEW_NORMAL;
return view;
}
/**
* i915_gem_fault - fault a page into the GTT
* @vmf: fault info
*
* The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
* from userspace. The fault handler takes care of binding the object to
* the GTT (if needed), allocating and programming a fence register (again,
* only if needed based on whether the old reg is still valid or the object
* is tiled) and inserting a new PTE into the faulting process.
*
* Note that the faulting process may involve evicting existing objects
* from the GTT and/or fence registers to make room. So performance may
* suffer if the GTT working set is large or there are few fence registers
* left.
*
* The current feature set supported by i915_gem_fault() and thus GTT mmaps
* is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
*/
vm_fault_t i915_gem_fault(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool write = area->vm_flags & VM_WRITE;
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
int ret;
/* Sanity check that we allow writing into this object */
if (i915_gem_object_is_readonly(obj) && write)
return VM_FAULT_SIGBUS;
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
trace_i915_gem_object_fault(obj, page_offset, true, write);
drm/i915: Flush GPU rendering with a lockless wait during a pagefault Arjan van de Ven reported that on his test machine that he was seeing stalls of greater than 1 frame greatly impacting the user experience. He tracked this down to being the locked flush during a pagefault as being the culprit hogging the struct_mutex and so blocking any other user from proceeding. Stalling on a pagefault is bad behaviour on userspace's part, for one it means that they are ignoring the coherency rules on pointer access through the GTT, but fortunately we can apply the same trick as the set-to-domain ioctl to do a lightweight, nonblocking flush of outstanding rendering first. "Prior to the patch it looks like this (this one testrun does not show the 20ms+ I've seen occasionally) 4.99 ms 2.36 ms 31360 __wait_seqno i915_wait_seqno i915_gem_object_wait_rendering i915_gem_object_set_to_gtt_domain i915_gem_fault __do_fault handle_ +pte_fault handle_mm_fault __do_page_fault do_page_fault page_fault 4.99 ms 2.75 ms 107751 __wait_seqno i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.99 ms 1.63 ms 1666 i915_mutex_lock_interruptible i915_gem_fault __do_fault handle_pte_fault handle_mm_fault __do_page_fault do_page_fault page_fa +ult 4.93 ms 2.45 ms 980 i915_mutex_lock_interruptible intel_crtc_page_flip drm_mode_page_flip_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_ +sysret 4.89 ms 2.20 ms 3283 i915_mutex_lock_interruptible i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.34 ms 1.66 ms 1715 i915_mutex_lock_interruptible i915_gem_pwrite_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.73 ms 3.73 ms 49 i915_mutex_lock_interruptible i915_gem_set_domain_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.17 ms 0.33 ms 931 i915_mutex_lock_interruptible i915_gem_madvise_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.97 ms 0.43 ms 1029 i915_mutex_lock_interruptible i915_gem_busy_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.55 ms 0.51 ms 735 i915_gem_get_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret After the patch it looks like this: 4.99 ms 2.14 ms 22212 __wait_seqno i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.86 ms 0.99 ms 14170 __wait_seqno i915_gem_object_wait_rendering__nonblocking i915_gem_fault __do_fault handle_pte_fault handle_mm_fault __do_page_ +fault do_page_fault page_fault 3.59 ms 1.31 ms 325 i915_gem_get_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.37 ms 3.37 ms 65 i915_mutex_lock_interruptible i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.58 ms 2.58 ms 65 i915_mutex_lock_interruptible i915_gem_do_execbuffer.isra.23 i915_gem_execbuffer2 drm_ioctl i915_compat_ioctl compat_sys_ioctl +ia32_sysret 2.19 ms 2.19 ms 65 i915_mutex_lock_interruptible intel_crtc_page_flip drm_mode_page_flip_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_ +sysret 2.18 ms 2.18 ms 65 i915_mutex_lock_interruptible i915_gem_busy_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 1.66 ms 1.66 ms 65 i915_gem_set_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret It may not look like it, but this is quite a large difference, and I've been unable to reproduce > 5 msec delays at all, while before they do happen (just not in the trace above)." gem_gtt_hog on an old Pineview (GMA3150), before: 4969.119ms after: 4122.749ms Reported-by: Arjan van de Ven <arjan.van.de.ven@intel.com> Testcase: igt/gem_gtt_hog Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@gmail.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-02-08 04:37:06 +08:00
/* Try to flush the object off the GPU first without holding the lock.
* Upon acquiring the lock, we will perform our sanity checks and then
drm/i915: Flush GPU rendering with a lockless wait during a pagefault Arjan van de Ven reported that on his test machine that he was seeing stalls of greater than 1 frame greatly impacting the user experience. He tracked this down to being the locked flush during a pagefault as being the culprit hogging the struct_mutex and so blocking any other user from proceeding. Stalling on a pagefault is bad behaviour on userspace's part, for one it means that they are ignoring the coherency rules on pointer access through the GTT, but fortunately we can apply the same trick as the set-to-domain ioctl to do a lightweight, nonblocking flush of outstanding rendering first. "Prior to the patch it looks like this (this one testrun does not show the 20ms+ I've seen occasionally) 4.99 ms 2.36 ms 31360 __wait_seqno i915_wait_seqno i915_gem_object_wait_rendering i915_gem_object_set_to_gtt_domain i915_gem_fault __do_fault handle_ +pte_fault handle_mm_fault __do_page_fault do_page_fault page_fault 4.99 ms 2.75 ms 107751 __wait_seqno i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.99 ms 1.63 ms 1666 i915_mutex_lock_interruptible i915_gem_fault __do_fault handle_pte_fault handle_mm_fault __do_page_fault do_page_fault page_fa +ult 4.93 ms 2.45 ms 980 i915_mutex_lock_interruptible intel_crtc_page_flip drm_mode_page_flip_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_ +sysret 4.89 ms 2.20 ms 3283 i915_mutex_lock_interruptible i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.34 ms 1.66 ms 1715 i915_mutex_lock_interruptible i915_gem_pwrite_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.73 ms 3.73 ms 49 i915_mutex_lock_interruptible i915_gem_set_domain_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.17 ms 0.33 ms 931 i915_mutex_lock_interruptible i915_gem_madvise_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.97 ms 0.43 ms 1029 i915_mutex_lock_interruptible i915_gem_busy_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.55 ms 0.51 ms 735 i915_gem_get_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret After the patch it looks like this: 4.99 ms 2.14 ms 22212 __wait_seqno i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.86 ms 0.99 ms 14170 __wait_seqno i915_gem_object_wait_rendering__nonblocking i915_gem_fault __do_fault handle_pte_fault handle_mm_fault __do_page_ +fault do_page_fault page_fault 3.59 ms 1.31 ms 325 i915_gem_get_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.37 ms 3.37 ms 65 i915_mutex_lock_interruptible i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.58 ms 2.58 ms 65 i915_mutex_lock_interruptible i915_gem_do_execbuffer.isra.23 i915_gem_execbuffer2 drm_ioctl i915_compat_ioctl compat_sys_ioctl +ia32_sysret 2.19 ms 2.19 ms 65 i915_mutex_lock_interruptible intel_crtc_page_flip drm_mode_page_flip_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_ +sysret 2.18 ms 2.18 ms 65 i915_mutex_lock_interruptible i915_gem_busy_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 1.66 ms 1.66 ms 65 i915_gem_set_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret It may not look like it, but this is quite a large difference, and I've been unable to reproduce > 5 msec delays at all, while before they do happen (just not in the trace above)." gem_gtt_hog on an old Pineview (GMA3150), before: 4969.119ms after: 4122.749ms Reported-by: Arjan van de Ven <arjan.van.de.ven@intel.com> Testcase: igt/gem_gtt_hog Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@gmail.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-02-08 04:37:06 +08:00
* repeat the flush holding the lock in the normal manner to catch cases
* where we are gazumped.
*/
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT,
NULL);
drm/i915: Flush GPU rendering with a lockless wait during a pagefault Arjan van de Ven reported that on his test machine that he was seeing stalls of greater than 1 frame greatly impacting the user experience. He tracked this down to being the locked flush during a pagefault as being the culprit hogging the struct_mutex and so blocking any other user from proceeding. Stalling on a pagefault is bad behaviour on userspace's part, for one it means that they are ignoring the coherency rules on pointer access through the GTT, but fortunately we can apply the same trick as the set-to-domain ioctl to do a lightweight, nonblocking flush of outstanding rendering first. "Prior to the patch it looks like this (this one testrun does not show the 20ms+ I've seen occasionally) 4.99 ms 2.36 ms 31360 __wait_seqno i915_wait_seqno i915_gem_object_wait_rendering i915_gem_object_set_to_gtt_domain i915_gem_fault __do_fault handle_ +pte_fault handle_mm_fault __do_page_fault do_page_fault page_fault 4.99 ms 2.75 ms 107751 __wait_seqno i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.99 ms 1.63 ms 1666 i915_mutex_lock_interruptible i915_gem_fault __do_fault handle_pte_fault handle_mm_fault __do_page_fault do_page_fault page_fa +ult 4.93 ms 2.45 ms 980 i915_mutex_lock_interruptible intel_crtc_page_flip drm_mode_page_flip_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_ +sysret 4.89 ms 2.20 ms 3283 i915_mutex_lock_interruptible i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.34 ms 1.66 ms 1715 i915_mutex_lock_interruptible i915_gem_pwrite_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.73 ms 3.73 ms 49 i915_mutex_lock_interruptible i915_gem_set_domain_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.17 ms 0.33 ms 931 i915_mutex_lock_interruptible i915_gem_madvise_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.97 ms 0.43 ms 1029 i915_mutex_lock_interruptible i915_gem_busy_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.55 ms 0.51 ms 735 i915_gem_get_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret After the patch it looks like this: 4.99 ms 2.14 ms 22212 __wait_seqno i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.86 ms 0.99 ms 14170 __wait_seqno i915_gem_object_wait_rendering__nonblocking i915_gem_fault __do_fault handle_pte_fault handle_mm_fault __do_page_ +fault do_page_fault page_fault 3.59 ms 1.31 ms 325 i915_gem_get_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.37 ms 3.37 ms 65 i915_mutex_lock_interruptible i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.58 ms 2.58 ms 65 i915_mutex_lock_interruptible i915_gem_do_execbuffer.isra.23 i915_gem_execbuffer2 drm_ioctl i915_compat_ioctl compat_sys_ioctl +ia32_sysret 2.19 ms 2.19 ms 65 i915_mutex_lock_interruptible intel_crtc_page_flip drm_mode_page_flip_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_ +sysret 2.18 ms 2.18 ms 65 i915_mutex_lock_interruptible i915_gem_busy_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 1.66 ms 1.66 ms 65 i915_gem_set_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret It may not look like it, but this is quite a large difference, and I've been unable to reproduce > 5 msec delays at all, while before they do happen (just not in the trace above)." gem_gtt_hog on an old Pineview (GMA3150), before: 4969.119ms after: 4122.749ms Reported-by: Arjan van de Ven <arjan.van.de.ven@intel.com> Testcase: igt/gem_gtt_hog Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@gmail.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-02-08 04:37:06 +08:00
if (ret)
goto err;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
wakeref = intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto err_rpm;
drm/i915: Flush GPU rendering with a lockless wait during a pagefault Arjan van de Ven reported that on his test machine that he was seeing stalls of greater than 1 frame greatly impacting the user experience. He tracked this down to being the locked flush during a pagefault as being the culprit hogging the struct_mutex and so blocking any other user from proceeding. Stalling on a pagefault is bad behaviour on userspace's part, for one it means that they are ignoring the coherency rules on pointer access through the GTT, but fortunately we can apply the same trick as the set-to-domain ioctl to do a lightweight, nonblocking flush of outstanding rendering first. "Prior to the patch it looks like this (this one testrun does not show the 20ms+ I've seen occasionally) 4.99 ms 2.36 ms 31360 __wait_seqno i915_wait_seqno i915_gem_object_wait_rendering i915_gem_object_set_to_gtt_domain i915_gem_fault __do_fault handle_ +pte_fault handle_mm_fault __do_page_fault do_page_fault page_fault 4.99 ms 2.75 ms 107751 __wait_seqno i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.99 ms 1.63 ms 1666 i915_mutex_lock_interruptible i915_gem_fault __do_fault handle_pte_fault handle_mm_fault __do_page_fault do_page_fault page_fa +ult 4.93 ms 2.45 ms 980 i915_mutex_lock_interruptible intel_crtc_page_flip drm_mode_page_flip_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_ +sysret 4.89 ms 2.20 ms 3283 i915_mutex_lock_interruptible i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.34 ms 1.66 ms 1715 i915_mutex_lock_interruptible i915_gem_pwrite_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.73 ms 3.73 ms 49 i915_mutex_lock_interruptible i915_gem_set_domain_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.17 ms 0.33 ms 931 i915_mutex_lock_interruptible i915_gem_madvise_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.97 ms 0.43 ms 1029 i915_mutex_lock_interruptible i915_gem_busy_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.55 ms 0.51 ms 735 i915_gem_get_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret After the patch it looks like this: 4.99 ms 2.14 ms 22212 __wait_seqno i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 4.86 ms 0.99 ms 14170 __wait_seqno i915_gem_object_wait_rendering__nonblocking i915_gem_fault __do_fault handle_pte_fault handle_mm_fault __do_page_ +fault do_page_fault page_fault 3.59 ms 1.31 ms 325 i915_gem_get_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 3.37 ms 3.37 ms 65 i915_mutex_lock_interruptible i915_gem_wait_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 2.58 ms 2.58 ms 65 i915_mutex_lock_interruptible i915_gem_do_execbuffer.isra.23 i915_gem_execbuffer2 drm_ioctl i915_compat_ioctl compat_sys_ioctl +ia32_sysret 2.19 ms 2.19 ms 65 i915_mutex_lock_interruptible intel_crtc_page_flip drm_mode_page_flip_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_ +sysret 2.18 ms 2.18 ms 65 i915_mutex_lock_interruptible i915_gem_busy_ioctl drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret 1.66 ms 1.66 ms 65 i915_gem_set_tiling drm_ioctl i915_compat_ioctl compat_sys_ioctl ia32_sysret It may not look like it, but this is quite a large difference, and I've been unable to reproduce > 5 msec delays at all, while before they do happen (just not in the trace above)." gem_gtt_hog on an old Pineview (GMA3150), before: 4969.119ms after: 4122.749ms Reported-by: Arjan van de Ven <arjan.van.de.ven@intel.com> Testcase: igt/gem_gtt_hog Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@gmail.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-02-08 04:37:06 +08:00
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
ret = -EFAULT;
goto err_unlock;
}
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK |
PIN_NONFAULT);
if (IS_ERR(vma)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
flags = PIN_MAPPABLE;
if (view.type == I915_GGTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
/*
* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
if (IS_ERR(vma) && !view.type) {
flags = PIN_MAPPABLE;
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
}
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unlock;
}
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto err_unpin;
ret = i915_vma_pin_fence(vma);
if (ret)
goto err_unpin;
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret)
goto err_fence;
/* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(dev_priv);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
GEM_BUG_ON(!obj->userfault_count);
i915_vma_set_ggtt_write(vma);
err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
case -EIO:
/*
* We eat errors when the gpu is terminally wedged to avoid
* userspace unduly crashing (gl has no provisions for mmaps to
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
if (!i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS;
/* else: fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
* handler to reset everything when re-faulting in
* i915_mutex_lock_interruptible.
*/
case 0:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
/*
* EBUSY is ok: this just means that another thread
* already did the job.
*/
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
return VM_FAULT_SIGBUS;
}
}
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
GEM_BUG_ON(!obj->userfault_count);
obj->userfault_count = 0;
list_del(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
for_each_ggtt_vma(vma, obj)
i915_vma_unset_userfault(vma);
}
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
*
* Preserve the reservation of the mmapping with the DRM core code, but
* relinquish ownership of the pages back to the system.
*
* It is vital that we remove the page mapping if we have mapped a tiled
* object through the GTT and then lose the fence register due to
* resource pressure. Similarly if the object has been moved out of the
* aperture, than pages mapped into userspace must be revoked. Removing the
* mapping will then trigger a page fault on the next user access, allowing
* fixup by i915_gem_fault().
*/
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
*
* Note that RPM complicates somewhat by adding an additional
* requirement that operations to the GGTT be made holding the RPM
* wakeref.
*/
lockdep_assert_held(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (!obj->userfault_count)
goto out;
__i915_gem_object_release_mmap(obj);
/* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
* flushing implied above by changing the PTE above *should* be
* sufficient, an extra barrier here just provides us with a bit
* of paranoid documentation about our requirement to serialise
* memory writes before touching registers / GSM.
*/
wmb();
out:
intel_runtime_pm_put(i915, wakeref);
}
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *on;
int i;
/*
* Only called during RPM suspend. All users of the userfault_list
* must be holding an RPM wakeref to ensure that this can not
* run concurrently with themselves (and use the struct_mutex for
* protection between themselves).
*/
list_for_each_entry_safe(obj, on,
&dev_priv->mm.userfault_list, userfault_link)
__i915_gem_object_release_mmap(obj);
/* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
drm/i915: Remove overzealous fence warn on runtime suspend The goal of the WARN was to catch when we are still actively using the fence as we go into the runtime suspend. However, the reg->pin_count is too coarse as it does not distinguish between exclusive ownership of the fence register from activity. I've not improved on the WARN, nor have we captured this WARN in an exact igt, but it is showing up regularly in the wild: [ 1915.935332] WARNING: CPU: 1 PID: 10861 at drivers/gpu/drm/i915/i915_gem.c:2022 i915_gem_runtime_suspend+0x116/0x130 [i915] [ 1915.935383] WARN_ON(reg->pin_count)[ 1915.935399] Modules linked in: snd_hda_intel i915 drm_kms_helper vgem netconsole scsi_transport_iscsi fuse vfat fat x86_pkg_temp_thermal coretemp intel_cstate intel_uncore snd_hda_codec_hdmi snd_hda_codec_generic snd_hda_codec snd_hwdep snd_hda_core snd_pcm snd_timer snd mei_me mei serio_raw intel_rapl_perf intel_pch_thermal soundcore wmi acpi_pad i2c_algo_bit syscopyarea sysfillrect sysimgblt fb_sys_fops drm r8169 mii video [last unloaded: drm_kms_helper] [ 1915.935785] CPU: 1 PID: 10861 Comm: kworker/1:0 Tainted: G U W 4.9.0-rc5+ #170 [ 1915.935799] Hardware name: LENOVO 80MX/Lenovo E31-80, BIOS DCCN34WW(V2.03) 12/01/2015 [ 1915.935822] Workqueue: pm pm_runtime_work [ 1915.935845] ffffc900044fbbf0 ffffffffac3220bc ffffc900044fbc40 0000000000000000 [ 1915.935890] ffffc900044fbc30 ffffffffac059bcb 000007e6044fbc60 ffff8801626e3198 [ 1915.935937] ffff8801626e0000 0000000000000002 ffffffffc05e5d4e 0000000000000000 [ 1915.935985] Call Trace: [ 1915.936013] [<ffffffffac3220bc>] dump_stack+0x4f/0x73 [ 1915.936038] [<ffffffffac059bcb>] __warn+0xcb/0xf0 [ 1915.936060] [<ffffffffac059c4f>] warn_slowpath_fmt+0x5f/0x80 [ 1915.936158] [<ffffffffc052d916>] i915_gem_runtime_suspend+0x116/0x130 [i915] [ 1915.936251] [<ffffffffc04f1c74>] intel_runtime_suspend+0x64/0x280 [i915] [ 1915.936277] [<ffffffffac0926f1>] ? dequeue_entity+0x241/0xbc0 [ 1915.936298] [<ffffffffac36bb85>] pci_pm_runtime_suspend+0x55/0x180 [ 1915.936317] [<ffffffffac36bb30>] ? pci_pm_runtime_resume+0xa0/0xa0 [ 1915.936339] [<ffffffffac4514e2>] __rpm_callback+0x32/0x70 [ 1915.936356] [<ffffffffac451544>] rpm_callback+0x24/0x80 [ 1915.936375] [<ffffffffac36bb30>] ? pci_pm_runtime_resume+0xa0/0xa0 [ 1915.936392] [<ffffffffac45222d>] rpm_suspend+0x12d/0x680 [ 1915.936415] [<ffffffffac69f6d7>] ? _raw_spin_unlock_irq+0x17/0x30 [ 1915.936435] [<ffffffffac0810b8>] ? finish_task_switch+0x88/0x220 [ 1915.936455] [<ffffffffac4534bf>] pm_runtime_work+0x6f/0xb0 [ 1915.936477] [<ffffffffac074353>] process_one_work+0x1f3/0x4d0 [ 1915.936501] [<ffffffffac074678>] worker_thread+0x48/0x4e0 [ 1915.936523] [<ffffffffac074630>] ? process_one_work+0x4d0/0x4d0 [ 1915.936542] [<ffffffffac074630>] ? process_one_work+0x4d0/0x4d0 [ 1915.936559] [<ffffffffac07a2c9>] kthread+0xd9/0xf0 [ 1915.936580] [<ffffffffac07a1f0>] ? kthread_park+0x60/0x60 [ 1915.936600] [<ffffffffac69fe62>] ret_from_fork+0x22/0x30 In the case the register is pinned, it should be present and we will need to invalidate them to be restored upon resume as we cannot expect the owner of the pin to call get_fence prior to use after resume. Fixes: 7c108fd8feac ("drm/i915: Move fence cancellation to runtime suspend") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=98804 Reported-by: Lionel Landwerlin <lionel.g.landwerlin@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Imre Deak <imre.deak@linux.intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: <drm-intel-fixes@lists.freedesktop.org> # v4.10-rc1+ Link: http://patchwork.freedesktop.org/patch/msgid/20170203125717.8431-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-02-03 20:57:17 +08:00
/* Ideally we want to assert that the fence register is not
* live at this point (i.e. that no piece of code will be
* trying to write through fence + GTT, as that both violates
* our tracking of activity and associated locking/barriers,
* but also is illegal given that the hw is powered down).
*
* Previously we used reg->pin_count as a "liveness" indicator.
* That is not sufficient, and we need a more fine-grained
* tool if we want to have a sanity check here.
*/
if (!reg->vma)
continue;
GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
reg->dirty = true;
}
}
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int err;
err = drm_gem_create_mmap_offset(&obj->base);
if (likely(!err))
return 0;
/* Attempt to reap some mmap space from dead objects */
do {
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
err = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (err)
break;
i915_gem_drain_freed_objects(dev_priv);
err = drm_gem_create_mmap_offset(&obj->base);
if (!err)
break;
} while (flush_delayed_work(&dev_priv->gt.retire_work));
return err;
}
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
drm_gem_free_mmap_offset(&obj->base);
}
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
u32 handle,
u64 *offset)
{
struct drm_i915_gem_object *obj;
int ret;
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
ret = i915_gem_object_create_mmap_offset(obj);
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
i915_gem_object_put(obj);
return ret;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
* up so we can get faults in the handler above.
*
* The fault handler will take care of binding the object into the GTT
* (since it may have been evicted to make room for something), allocating
* a fence register, and mapping the appropriate aperture address into
* userspace.
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_mmap_gtt *args = data;
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
i915_gem_object_free_mmap_offset(obj);
i915: add dmabuf/prime buffer sharing support. This adds handle->fd and fd->handle support to i915, this is to allow for offloading of rendering in one direction and outputs in the other. v2 from Daniel Vetter: - fixup conflicts with the prepare/finish gtt prep work. - implement ppgtt binding support. Note that we have squat i-g-t testcoverage for any of the lifetime and access rules dma_buf/prime support brings along. And there are quite a few intricate situations here. Also note that the integration with the existing code is a bit hackish, especially around get_gtt_pages and put_gtt_pages. It imo would be easier with the prep code from Chris Wilson's unbound series, but that is for 3.6. Also note that I didn't bother to put the new prepare/finish gtt hooks to good use by moving the dma_buf_map/unmap_attachment calls in there (like we've originally planned for). Last but not least this patch is only compile-tested, but I've changed very little compared to Dave Airlie's version. So there's a decent chance v2 on drm-next works as well as v1 on 3.4-rc. v3: Right when I've hit sent I've noticed that I've screwed up one obj->sg_list (for dmar support) and obj->sg_table (for prime support) disdinction. We should be able to merge these 2 paths, but that's material for another patch. v4: fix the error reporting bugs pointed out by ickle. v5: fix another error, and stop non-gtt mmaps on shared objects stop pread/pwrite on imported objects, add fake kmap Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-10 21:25:09 +08:00
if (obj->base.filp == NULL)
return;
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED;
obj->mm.pages = ERR_PTR(-EFAULT);
}
/* Try to discard unwanted pages */
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
struct address_space *mapping;
lockdep_assert_held(&obj->mm.lock);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
case __I915_MADV_PURGED:
return;
}
if (obj->base.filp == NULL)
return;
mapping = obj->base.filp->f_mapping,
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
* ref count of those pages.
*/
static void check_release_pagevec(struct pagevec *pvec)
{
check_move_unevictable_pages(pvec);
__pagevec_release(pvec);
cond_resched();
}
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct pagevec pvec;
struct page *page;
i915: add dmabuf/prime buffer sharing support. This adds handle->fd and fd->handle support to i915, this is to allow for offloading of rendering in one direction and outputs in the other. v2 from Daniel Vetter: - fixup conflicts with the prepare/finish gtt prep work. - implement ppgtt binding support. Note that we have squat i-g-t testcoverage for any of the lifetime and access rules dma_buf/prime support brings along. And there are quite a few intricate situations here. Also note that the integration with the existing code is a bit hackish, especially around get_gtt_pages and put_gtt_pages. It imo would be easier with the prep code from Chris Wilson's unbound series, but that is for 3.6. Also note that I didn't bother to put the new prepare/finish gtt hooks to good use by moving the dma_buf_map/unmap_attachment calls in there (like we've originally planned for). Last but not least this patch is only compile-tested, but I've changed very little compared to Dave Airlie's version. So there's a decent chance v2 on drm-next works as well as v1 on 3.4-rc. v3: Right when I've hit sent I've noticed that I've screwed up one obj->sg_list (for dmar support) and obj->sg_table (for prime support) disdinction. We should be able to merge these 2 paths, but that's material for another patch. v4: fix the error reporting bugs pointed out by ickle. v5: fix another error, and stop non-gtt mmaps on shared objects stop pread/pwrite on imported objects, add fake kmap Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-10 21:25:09 +08:00
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
drm/i915: avoid leaking DMA mappings We have 3 types of DMA mappings for GEM objects: 1. physically contiguous for stolen and for objects needing contiguous memory 2. DMA-buf mappings imported via a DMA-buf attach operation 3. SG DMA mappings for shmem backed and userptr objects For 1. and 2. the lifetime of the DMA mapping matches the lifetime of the corresponding backing pages and so in practice we create/release the mapping in the object's get_pages/put_pages callback. For 3. the lifetime of the mapping matches that of any existing GPU binding of the object, so we'll create the mapping when the object is bound to the first vma and release the mapping when the object is unbound from its last vma. Since the object can be bound to multiple vmas, we can end up creating a new DMA mapping in the 3. case even if the object already had one. This is not allowed by the DMA API and can lead to leaked mapping data and IOMMU memory space starvation in certain cases. For example HW IOMMU drivers (intel_iommu) allocate a new range from their memory space whenever a mapping is created, silently overriding a pre-existing mapping. Fix this by moving the creation/removal of DMA mappings to the object's get_pages/put_pages callbacks. These callbacks already check for and do an early return in case of any nested calls. This way objects of the 3. case also become more like the other object types. I noticed this issue by enabling DMA debugging, which got disabled after a while due to its internal mapping tables getting full. It also reported errors in connection to random other drivers that did a DMA mapping for an address that was previously mapped by i915 but was never released. Besides these diagnostic messages and the memory space starvation problem for IOMMUs, I'm not aware of this causing a real issue. The fix is based on a patch from Chris. v2: - move the DMA mapping create/remove calls to the get_pages/put_pages callbacks instead of adding new callbacks for these (Chris) v3: - also fix the get_page cache logic on the userptr async path (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-07-09 17:59:05 +08:00
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages);
mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
if (!pagevec_add(&pvec, page))
check_release_pagevec(&pvec);
}
if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
struct radix_tree_iter iter;
void __rcu **slot;
drm/i915: Hold rcu_read_lock when iterating over the radixtree (objects) Kasan spotted [IGT] gem_tiled_pread_pwrite: exiting, ret=0 ================================================================== BUG: KASAN: use-after-free in __i915_gem_object_reset_page_iter+0x15c/0x170 [i915] Read of size 8 at addr ffff8801359da310 by task kworker/3:2/182 CPU: 3 PID: 182 Comm: kworker/3:2 Tainted: G U 4.14.0-rc6-CI-Custom_3340+ #1 Hardware name: Intel Corp. Geminilake/GLK RVP1 DDR4 (05), BIOS GELKRVPA.X64.0062.B30.1708222146 08/22/2017 Workqueue: events __i915_gem_free_work [i915] Call Trace: dump_stack+0x68/0xa0 print_address_description+0x78/0x290 ? __i915_gem_object_reset_page_iter+0x15c/0x170 [i915] kasan_report+0x23d/0x350 __asan_report_load8_noabort+0x19/0x20 __i915_gem_object_reset_page_iter+0x15c/0x170 [i915] ? i915_gem_object_truncate+0x100/0x100 [i915] ? lock_acquire+0x380/0x380 __i915_gem_object_put_pages+0x30d/0x530 [i915] __i915_gem_free_objects+0x551/0xbd0 [i915] ? lock_acquire+0x13e/0x380 __i915_gem_free_work+0x4e/0x70 [i915] process_one_work+0x6f6/0x1590 ? pwq_dec_nr_in_flight+0x2b0/0x2b0 worker_thread+0xe6/0xe90 ? pci_mmcfg_check_reserved+0x110/0x110 kthread+0x309/0x410 ? process_one_work+0x1590/0x1590 ? kthread_create_on_node+0xb0/0xb0 ret_from_fork+0x27/0x40 Allocated by task 1801: save_stack_trace+0x1b/0x20 kasan_kmalloc+0xee/0x190 kasan_slab_alloc+0x12/0x20 kmem_cache_alloc+0xdc/0x2e0 radix_tree_node_alloc.constprop.12+0x48/0x330 __radix_tree_create+0x274/0x480 __radix_tree_insert+0xa2/0x610 i915_gem_object_get_sg+0x224/0x670 [i915] i915_gem_object_get_page+0xb5/0x1c0 [i915] i915_gem_pread_ioctl+0x822/0xf60 [i915] drm_ioctl_kernel+0x13f/0x1c0 drm_ioctl+0x6cf/0x980 do_vfs_ioctl+0x184/0xf30 SyS_ioctl+0x41/0x70 entry_SYSCALL_64_fastpath+0x1c/0xb1 Freed by task 37: save_stack_trace+0x1b/0x20 kasan_slab_free+0xaf/0x190 kmem_cache_free+0xbf/0x340 radix_tree_node_rcu_free+0x79/0x90 rcu_process_callbacks+0x46d/0xf40 __do_softirq+0x21c/0x8d3 The buggy address belongs to the object at ffff8801359da0f0 which belongs to the cache radix_tree_node of size 576 The buggy address is located 544 bytes inside of 576-byte region [ffff8801359da0f0, ffff8801359da330) The buggy address belongs to the page: page:ffffea0004d67600 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0 flags: 0x8000000000008100(slab|head) raw: 8000000000008100 0000000000000000 0000000000000000 0000000100110011 raw: ffffea0004b52920 ffffea0004b38020 ffff88015b416a80 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8801359da200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8801359da280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff8801359da300: fb fb fb fb fb fb fc fc fc fc fc fc fc fc fc fc ^ ffff8801359da380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff8801359da400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ================================================================== Disabling lock debugging due to kernel taint which looks like the slab containing the radixtree iter was freed as we traversed the tree, taking the rcu read lock across the loop should prevent that (deferring all the frees until the end). Reported-by: Tomi Sarvela <tomi.p.sarvela@intel.com> Fixes: 96d776345277 ("drm/i915: Use a radixtree for random access to the object's backing storage") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171026130032.10677-1-chris@chris-wilson.co.uk Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
2017-10-26 21:00:31 +08:00
rcu_read_lock();
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
drm/i915: Hold rcu_read_lock when iterating over the radixtree (objects) Kasan spotted [IGT] gem_tiled_pread_pwrite: exiting, ret=0 ================================================================== BUG: KASAN: use-after-free in __i915_gem_object_reset_page_iter+0x15c/0x170 [i915] Read of size 8 at addr ffff8801359da310 by task kworker/3:2/182 CPU: 3 PID: 182 Comm: kworker/3:2 Tainted: G U 4.14.0-rc6-CI-Custom_3340+ #1 Hardware name: Intel Corp. Geminilake/GLK RVP1 DDR4 (05), BIOS GELKRVPA.X64.0062.B30.1708222146 08/22/2017 Workqueue: events __i915_gem_free_work [i915] Call Trace: dump_stack+0x68/0xa0 print_address_description+0x78/0x290 ? __i915_gem_object_reset_page_iter+0x15c/0x170 [i915] kasan_report+0x23d/0x350 __asan_report_load8_noabort+0x19/0x20 __i915_gem_object_reset_page_iter+0x15c/0x170 [i915] ? i915_gem_object_truncate+0x100/0x100 [i915] ? lock_acquire+0x380/0x380 __i915_gem_object_put_pages+0x30d/0x530 [i915] __i915_gem_free_objects+0x551/0xbd0 [i915] ? lock_acquire+0x13e/0x380 __i915_gem_free_work+0x4e/0x70 [i915] process_one_work+0x6f6/0x1590 ? pwq_dec_nr_in_flight+0x2b0/0x2b0 worker_thread+0xe6/0xe90 ? pci_mmcfg_check_reserved+0x110/0x110 kthread+0x309/0x410 ? process_one_work+0x1590/0x1590 ? kthread_create_on_node+0xb0/0xb0 ret_from_fork+0x27/0x40 Allocated by task 1801: save_stack_trace+0x1b/0x20 kasan_kmalloc+0xee/0x190 kasan_slab_alloc+0x12/0x20 kmem_cache_alloc+0xdc/0x2e0 radix_tree_node_alloc.constprop.12+0x48/0x330 __radix_tree_create+0x274/0x480 __radix_tree_insert+0xa2/0x610 i915_gem_object_get_sg+0x224/0x670 [i915] i915_gem_object_get_page+0xb5/0x1c0 [i915] i915_gem_pread_ioctl+0x822/0xf60 [i915] drm_ioctl_kernel+0x13f/0x1c0 drm_ioctl+0x6cf/0x980 do_vfs_ioctl+0x184/0xf30 SyS_ioctl+0x41/0x70 entry_SYSCALL_64_fastpath+0x1c/0xb1 Freed by task 37: save_stack_trace+0x1b/0x20 kasan_slab_free+0xaf/0x190 kmem_cache_free+0xbf/0x340 radix_tree_node_rcu_free+0x79/0x90 rcu_process_callbacks+0x46d/0xf40 __do_softirq+0x21c/0x8d3 The buggy address belongs to the object at ffff8801359da0f0 which belongs to the cache radix_tree_node of size 576 The buggy address is located 544 bytes inside of 576-byte region [ffff8801359da0f0, ffff8801359da330) The buggy address belongs to the page: page:ffffea0004d67600 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0 flags: 0x8000000000008100(slab|head) raw: 8000000000008100 0000000000000000 0000000000000000 0000000100110011 raw: ffffea0004b52920 ffffea0004b38020 ffff88015b416a80 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8801359da200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8801359da280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff8801359da300: fb fb fb fb fb fb fc fc fc fc fc fc fc fc fc fc ^ ffff8801359da380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff8801359da400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ================================================================== Disabling lock debugging due to kernel taint which looks like the slab containing the radixtree iter was freed as we traversed the tree, taking the rcu read lock across the loop should prevent that (deferring all the frees until the end). Reported-by: Tomi Sarvela <tomi.p.sarvela@intel.com> Fixes: 96d776345277 ("drm/i915: Use a radixtree for random access to the object's backing storage") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171026130032.10677-1-chris@chris-wilson.co.uk Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
2017-10-26 21:00:31 +08:00
rcu_read_unlock();
}
static struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
spin_unlock(&i915->mm.obj_lock);
if (obj->mm.mapping) {
void *ptr;
ptr = page_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
obj->mm.mapping = NULL;
}
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
return pages;
}
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass)
{
struct sg_table *pages;
int ret;
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
GEM_BUG_ON(obj->bind_count);
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock_nested(&obj->mm.lock, subclass);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
ret = -EBUSY;
goto unlock;
}
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early.
*/
pages = __i915_gem_object_unset_pages(obj);
/*
* XXX Temporary hijinx to avoid updating all backends to handle
* NULL pages. In the future, when we have more asynchronous
* get_pages backends we should be better able to handle the
* cancellation of the async task in a more uniform manner.
*/
if (!pages && !i915_gem_object_needs_async_cancel(obj))
pages = ERR_PTR(-EINVAL);
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
ret = 0;
unlock:
mutex_unlock(&obj->mm.lock);
return ret;
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
}
bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
struct scatterlist *sg, *new_sg;
unsigned int i;
if (orig_st->nents == orig_st->orig_nents)
return false;
if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
return false;
new_sg = new_st.sgl;
for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, 0);
sg_dma_address(new_sg) = sg_dma_address(sg);
sg_dma_len(new_sg) = sg_dma_len(sg);
new_sg = sg_next(new_sg);
}
GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
sg_free_table(orig_st);
*orig_st = new_st;
return true;
}
static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_page_sizes;
struct pagevec pvec;
drm/i915: Encourage our shrinker more when our shmemfs allocations fails Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk
2017-06-09 19:03:46 +08:00
gfp_t noreclaim;
drm/i915: avoid leaking DMA mappings We have 3 types of DMA mappings for GEM objects: 1. physically contiguous for stolen and for objects needing contiguous memory 2. DMA-buf mappings imported via a DMA-buf attach operation 3. SG DMA mappings for shmem backed and userptr objects For 1. and 2. the lifetime of the DMA mapping matches the lifetime of the corresponding backing pages and so in practice we create/release the mapping in the object's get_pages/put_pages callback. For 3. the lifetime of the mapping matches that of any existing GPU binding of the object, so we'll create the mapping when the object is bound to the first vma and release the mapping when the object is unbound from its last vma. Since the object can be bound to multiple vmas, we can end up creating a new DMA mapping in the 3. case even if the object already had one. This is not allowed by the DMA API and can lead to leaked mapping data and IOMMU memory space starvation in certain cases. For example HW IOMMU drivers (intel_iommu) allocate a new range from their memory space whenever a mapping is created, silently overriding a pre-existing mapping. Fix this by moving the creation/removal of DMA mappings to the object's get_pages/put_pages callbacks. These callbacks already check for and do an early return in case of any nested calls. This way objects of the 3. case also become more like the other object types. I noticed this issue by enabling DMA debugging, which got disabled after a while due to its internal mapping tables getting full. It also reported errors in connection to random other drivers that did a DMA mapping for an address that was previously mapped by i915 but was never released. Besides these diagnostic messages and the memory space starvation problem for IOMMUs, I'm not aware of this causing a real issue. The fix is based on a patch from Chris. v2: - move the DMA mapping create/remove calls to the get_pages/put_pages callbacks instead of adding new callbacks for these (Chris) v3: - also fix the get_page cache logic on the userptr async path (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-07-09 17:59:05 +08:00
int ret;
/*
* Assert that the object is not currently in any GPU domain. As it
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
/*
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
if (page_count > totalram_pages())
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
/*
* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
drm/i915: Encourage our shrinker more when our shmemfs allocations fails Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk
2017-06-09 19:03:46 +08:00
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl;
st->nents = 0;
sg_page_sizes = 0;
for (i = 0; i < page_count; i++) {
drm/i915: Encourage our shrinker more when our shmemfs allocations fails Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk
2017-06-09 19:03:46 +08:00
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
0,
}, *s = shrink;
gfp_t gfp = noreclaim;
do {
cond_resched();
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
drm/i915: Encourage our shrinker more when our shmemfs allocations fails Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk
2017-06-09 19:03:46 +08:00
if (likely(!IS_ERR(page)))
break;
if (!*s) {
ret = PTR_ERR(page);
goto err_sg;
}
i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
/*
* We've tried hard to allocate the memory by reaping
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*
* However, since graphics tend to be disposable,
* defer the oom here by reporting the ENOMEM back
* to userspace.
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
*/
drm/i915: Encourage our shrinker more when our shmemfs allocations fails Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk
2017-06-09 19:03:46 +08:00
if (!*s) {
/* reclaim and warn, but no oom */
gfp = mapping_gfp_mask(mapping);
drm/i915: Remove __GFP_NORETRY from our buffer allocator I tried __GFP_NORETRY in the belief that __GFP_RECLAIM was effective. It struggles with handling reclaim of our dirty buffers and relies on reclaim via kswapd. As a result, a single pass of direct reclaim is unreliable when i915 occupies the majority of available memory, and the only means of effectively waiting on kswapd to amke progress is by not setting the __GFP_NORETRY flag and lopping. That leaves us with the dilemma of invoking the oomkiller instead of propagating the allocation failure back to userspace where it can be handled more gracefully (one hopes). In the future we may have __GFP_MAYFAIL to allow repeats up until we genuinely run out of memory and the oomkiller would have been invoked. Until then, let the oomkiller wreck havoc. v2: Stop playing with side-effects of gfp flags and await __GFP_MAYFAIL v3: Update comments that direct reclaim only appears to be ignoring our dirty buffers! Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Testcase: igt/gem_tiled_swapping Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Michal Hocko <mhocko@suse.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-2-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-09 19:03:47 +08:00
/*
* Our bo are always dirty and so we require
drm/i915: Remove __GFP_NORETRY from our buffer allocator I tried __GFP_NORETRY in the belief that __GFP_RECLAIM was effective. It struggles with handling reclaim of our dirty buffers and relies on reclaim via kswapd. As a result, a single pass of direct reclaim is unreliable when i915 occupies the majority of available memory, and the only means of effectively waiting on kswapd to amke progress is by not setting the __GFP_NORETRY flag and lopping. That leaves us with the dilemma of invoking the oomkiller instead of propagating the allocation failure back to userspace where it can be handled more gracefully (one hopes). In the future we may have __GFP_MAYFAIL to allow repeats up until we genuinely run out of memory and the oomkiller would have been invoked. Until then, let the oomkiller wreck havoc. v2: Stop playing with side-effects of gfp flags and await __GFP_MAYFAIL v3: Update comments that direct reclaim only appears to be ignoring our dirty buffers! Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Testcase: igt/gem_tiled_swapping Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Michal Hocko <mhocko@suse.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-2-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-09 19:03:47 +08:00
* kswapd to reclaim our pages (direct reclaim
* does not effectively begin pageout of our
* buffers on its own). However, direct reclaim
* only waits for kswapd when under allocation
* congestion. So as a result __GFP_RECLAIM is
* unreliable and fails to actually reclaim our
* dirty pages -- unless you try over and over
* again with !__GFP_NORETRY. However, we still
* want to fail this allocation rather than
* trigger the out-of-memory killer and for
* this we want __GFP_RETRY_MAYFAIL.
drm/i915: Remove __GFP_NORETRY from our buffer allocator I tried __GFP_NORETRY in the belief that __GFP_RECLAIM was effective. It struggles with handling reclaim of our dirty buffers and relies on reclaim via kswapd. As a result, a single pass of direct reclaim is unreliable when i915 occupies the majority of available memory, and the only means of effectively waiting on kswapd to amke progress is by not setting the __GFP_NORETRY flag and lopping. That leaves us with the dilemma of invoking the oomkiller instead of propagating the allocation failure back to userspace where it can be handled more gracefully (one hopes). In the future we may have __GFP_MAYFAIL to allow repeats up until we genuinely run out of memory and the oomkiller would have been invoked. Until then, let the oomkiller wreck havoc. v2: Stop playing with side-effects of gfp flags and await __GFP_MAYFAIL v3: Update comments that direct reclaim only appears to be ignoring our dirty buffers! Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Testcase: igt/gem_tiled_swapping Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Michal Hocko <mhocko@suse.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-2-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-09 19:03:47 +08:00
*/
gfp |= __GFP_RETRY_MAYFAIL;
drm/i915: avoid leaking DMA mappings We have 3 types of DMA mappings for GEM objects: 1. physically contiguous for stolen and for objects needing contiguous memory 2. DMA-buf mappings imported via a DMA-buf attach operation 3. SG DMA mappings for shmem backed and userptr objects For 1. and 2. the lifetime of the DMA mapping matches the lifetime of the corresponding backing pages and so in practice we create/release the mapping in the object's get_pages/put_pages callback. For 3. the lifetime of the mapping matches that of any existing GPU binding of the object, so we'll create the mapping when the object is bound to the first vma and release the mapping when the object is unbound from its last vma. Since the object can be bound to multiple vmas, we can end up creating a new DMA mapping in the 3. case even if the object already had one. This is not allowed by the DMA API and can lead to leaked mapping data and IOMMU memory space starvation in certain cases. For example HW IOMMU drivers (intel_iommu) allocate a new range from their memory space whenever a mapping is created, silently overriding a pre-existing mapping. Fix this by moving the creation/removal of DMA mappings to the object's get_pages/put_pages callbacks. These callbacks already check for and do an early return in case of any nested calls. This way objects of the 3. case also become more like the other object types. I noticed this issue by enabling DMA debugging, which got disabled after a while due to its internal mapping tables getting full. It also reported errors in connection to random other drivers that did a DMA mapping for an address that was previously mapped by i915 but was never released. Besides these diagnostic messages and the memory space starvation problem for IOMMUs, I'm not aware of this causing a real issue. The fix is based on a patch from Chris. v2: - move the DMA mapping create/remove calls to the get_pages/put_pages callbacks instead of adding new callbacks for these (Chris) v3: - also fix the get_page cache logic on the userptr async path (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-07-09 17:59:05 +08:00
}
drm/i915: Encourage our shrinker more when our shmemfs allocations fails Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk
2017-06-09 19:03:46 +08:00
} while (1);
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
if (i) {
sg_page_sizes |= sg->length;
sg = sg_next(sg);
}
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
} else {
sg->length += PAGE_SIZE;
}
last_pfn = page_to_pfn(page);
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
if (sg) { /* loop terminated early; short sg table */
sg_page_sizes |= sg->length;
drm/i915: make compact dma scatter lists creation work with SWIOTLB backend. Git commit 90797e6d1ec0dfde6ba62a48b9ee3803887d6ed4 ("drm/i915: create compact dma scatter lists for gem objects") makes certain assumptions about the under laying DMA API that are not always correct. On a ThinkPad X230 with an Intel HD 4000 with Xen during the bootup I see: [drm:intel_pipe_set_base] *ERROR* pin & fence failed [drm:intel_crtc_set_config] *ERROR* failed to set mode on [CRTC:3], err = -28 Bit of debugging traced it down to dma_map_sg failing (in i915_gem_gtt_prepare_object) as some of the SG entries were huge (3MB). That unfortunately are sizes that the SWIOTLB is incapable of handling - the maximum it can handle is a an entry of 512KB of virtual contiguous memory for its bounce buffer. (See IO_TLB_SEGSIZE). Previous to the above mention git commit the SG entries were of 4KB, and the code introduced by above git commit squashed the CPU contiguous PFNs in one big virtual address provided to DMA API. This patch is a simple semi-revert - were we emulate the old behavior if we detect that SWIOTLB is online. If it is not online then we continue on with the new compact scatter gather mechanism. An alternative solution would be for the the '.get_pages' and the i915_gem_gtt_prepare_object to retry with smaller max gap of the amount of PFNs that can be combined together - but with this issue discovered during rc7 that might be too risky. Reported-and-Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> CC: Chris Wilson <chris@chris-wilson.co.uk> CC: Imre Deak <imre.deak@intel.com> CC: Daniel Vetter <daniel.vetter@ffwll.ch> CC: David Airlie <airlied@linux.ie> CC: <dri-devel@lists.freedesktop.org> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-06-24 23:47:48 +08:00
sg_mark_end(sg);
}
/* Trim unused sg entries to avoid wasting memory. */
i915_sg_trim(st);
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
/*
* DMA remapping failed? One possible cause is that
* it could not reserve enough large entries, asking
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
max_segment = PAGE_SIZE;
goto rebuild_st;
} else {
dev_warn(&dev_priv->drm.pdev->dev,
"Failed to DMA remap %lu pages\n",
page_count);
goto err_pages;
}
}
drm/i915: avoid leaking DMA mappings We have 3 types of DMA mappings for GEM objects: 1. physically contiguous for stolen and for objects needing contiguous memory 2. DMA-buf mappings imported via a DMA-buf attach operation 3. SG DMA mappings for shmem backed and userptr objects For 1. and 2. the lifetime of the DMA mapping matches the lifetime of the corresponding backing pages and so in practice we create/release the mapping in the object's get_pages/put_pages callback. For 3. the lifetime of the mapping matches that of any existing GPU binding of the object, so we'll create the mapping when the object is bound to the first vma and release the mapping when the object is unbound from its last vma. Since the object can be bound to multiple vmas, we can end up creating a new DMA mapping in the 3. case even if the object already had one. This is not allowed by the DMA API and can lead to leaked mapping data and IOMMU memory space starvation in certain cases. For example HW IOMMU drivers (intel_iommu) allocate a new range from their memory space whenever a mapping is created, silently overriding a pre-existing mapping. Fix this by moving the creation/removal of DMA mappings to the object's get_pages/put_pages callbacks. These callbacks already check for and do an early return in case of any nested calls. This way objects of the 3. case also become more like the other object types. I noticed this issue by enabling DMA debugging, which got disabled after a while due to its internal mapping tables getting full. It also reported errors in connection to random other drivers that did a DMA mapping for an address that was previously mapped by i915 but was never released. Besides these diagnostic messages and the memory space starvation problem for IOMMUs, I'm not aware of this causing a real issue. The fix is based on a patch from Chris. v2: - move the DMA mapping create/remove calls to the get_pages/put_pages callbacks instead of adding new callbacks for these (Chris) v3: - also fix the get_page cache logic on the userptr async path (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-07-09 17:59:05 +08:00
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
err_sg:
sg_mark_end(sg);
err_pages:
mapping_clear_unevictable(mapping);
pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, st) {
if (!pagevec_add(&pvec, page))
check_release_pagevec(&pvec);
}
if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
sg_free_table(st);
kfree(st);
/*
* shmemfs first checks if there is enough memory to allocate the page
* and reports ENOSPC should there be insufficient, along with the usual
* ENOMEM for a genuine allocation failure.
*
* We use ENOSPC in our driver to mean that we have run out of aperture
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM.
*/
drm/i915: avoid leaking DMA mappings We have 3 types of DMA mappings for GEM objects: 1. physically contiguous for stolen and for objects needing contiguous memory 2. DMA-buf mappings imported via a DMA-buf attach operation 3. SG DMA mappings for shmem backed and userptr objects For 1. and 2. the lifetime of the DMA mapping matches the lifetime of the corresponding backing pages and so in practice we create/release the mapping in the object's get_pages/put_pages callback. For 3. the lifetime of the mapping matches that of any existing GPU binding of the object, so we'll create the mapping when the object is bound to the first vma and release the mapping when the object is unbound from its last vma. Since the object can be bound to multiple vmas, we can end up creating a new DMA mapping in the 3. case even if the object already had one. This is not allowed by the DMA API and can lead to leaked mapping data and IOMMU memory space starvation in certain cases. For example HW IOMMU drivers (intel_iommu) allocate a new range from their memory space whenever a mapping is created, silently overriding a pre-existing mapping. Fix this by moving the creation/removal of DMA mappings to the object's get_pages/put_pages callbacks. These callbacks already check for and do an early return in case of any nested calls. This way objects of the 3. case also become more like the other object types. I noticed this issue by enabling DMA debugging, which got disabled after a while due to its internal mapping tables getting full. It also reported errors in connection to random other drivers that did a DMA mapping for an address that was previously mapped by i915 but was never released. Besides these diagnostic messages and the memory space starvation problem for IOMMUs, I'm not aware of this causing a real issue. The fix is based on a patch from Chris. v2: - move the DMA mapping create/remove calls to the get_pages/put_pages callbacks instead of adding new callbacks for these (Chris) v3: - also fix the get_page cache logic on the userptr async path (Chris) Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-07-09 17:59:05 +08:00
if (ret == -ENOSPC)
ret = -ENOMEM;
return ret;
}
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
int i;
lockdep_assert_held(&obj->mm.lock);
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
obj->mm.pages = pages;
if (i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_page_sizes;
/*
* Calculate the supported page-sizes which fit into the given
* sg_page_sizes. This will give us the page-sizes which we may be able
* to use opportunistically when later inserting into the GTT. For
* example if phys=2G, then in theory we should be able to use 1G, 2M,
* 64K or 4K pages, although in practice this will depend on a number of
* other factors.
*/
obj->mm.page_sizes.sg = 0;
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
if (obj->mm.page_sizes.phys & ~0u << i)
obj->mm.page_sizes.sg |= BIT(i);
}
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
spin_lock(&i915->mm.obj_lock);
list_add(&obj->mm.link, &i915->mm.unbound_list);
spin_unlock(&i915->mm.obj_lock);
}
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
err = obj->ops->get_pages(obj);
GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
return err;
}
/* Ensure that the associated pages are gathered from the backing storage
* and pinned into our object. i915_gem_object_pin_pages() may be called
* multiple times before they are released by a single call to
* i915_gem_object_unpin_pages() - once the pages are no longer referenced
* either as a result of memory pressure (reaping pages under the shrinker)
* or as the object is itself released.
*/
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
err = mutex_lock_interruptible(&obj->mm.lock);
if (err)
return err;
if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
if (err)
goto unlock;
smp_mb__before_atomic();
}
atomic_inc(&obj->mm.pages_pin_count);
unlock:
mutex_unlock(&obj->mm.lock);
return err;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->mm.pages;
struct sgt_iter sgt_iter;
struct page *page;
struct page *stack_pages[32];
struct page **pages = stack_pages;
unsigned long i = 0;
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
pgprot_t pgprot;
void *addr;
/* A single page can always be kmapped */
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
if (n_pages > ARRAY_SIZE(stack_pages)) {
/* Too big for stack -- allocate temporary array instead */
mm: treewide: remove GFP_TEMPORARY allocation flag GFP_TEMPORARY was introduced by commit e12ba74d8ff3 ("Group short-lived and reclaimable kernel allocations") along with __GFP_RECLAIMABLE. It's primary motivation was to allow users to tell that an allocation is short lived and so the allocator can try to place such allocations close together and prevent long term fragmentation. As much as this sounds like a reasonable semantic it becomes much less clear when to use the highlevel GFP_TEMPORARY allocation flag. How long is temporary? Can the context holding that memory sleep? Can it take locks? It seems there is no good answer for those questions. The current implementation of GFP_TEMPORARY is basically GFP_KERNEL | __GFP_RECLAIMABLE which in itself is tricky because basically none of the existing caller provide a way to reclaim the allocated memory. So this is rather misleading and hard to evaluate for any benefits. I have checked some random users and none of them has added the flag with a specific justification. I suspect most of them just copied from other existing users and others just thought it might be a good idea to use without any measuring. This suggests that GFP_TEMPORARY just motivates for cargo cult usage without any reasoning. I believe that our gfp flags are quite complex already and especially those with highlevel semantic should be clearly defined to prevent from confusion and abuse. Therefore I propose dropping GFP_TEMPORARY and replace all existing users to simply use GFP_KERNEL. Please note that SLAB users with shrinkers will still get __GFP_RECLAIMABLE heuristic and so they will be placed properly for memory fragmentation prevention. I can see reasons we might want some gfp flag to reflect shorterm allocations but I propose starting from a clear semantic definition and only then add users with proper justification. This was been brought up before LSF this year by Matthew [1] and it turned out that GFP_TEMPORARY really doesn't have a clear semantic. It seems to be a heuristic without any measured advantage for most (if not all) its current users. The follow up discussion has revealed that opinions on what might be temporary allocation differ a lot between developers. So rather than trying to tweak existing users into a semantic which they haven't expected I propose to simply remove the flag and start from scratch if we really need a semantic for short term allocations. [1] http://lkml.kernel.org/r/20170118054945.GD18349@bombadil.infradead.org [akpm@linux-foundation.org: fix typo] [akpm@linux-foundation.org: coding-style fixes] [sfr@canb.auug.org.au: drm/i915: fix up] Link: http://lkml.kernel.org/r/20170816144703.378d4f4d@canb.auug.org.au Link: http://lkml.kernel.org/r/20170728091904.14627-1-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Matthew Wilcox <willy@infradead.org> Cc: Neil Brown <neilb@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-14 07:28:29 +08:00
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
}
for_each_sgt_page(page, sgt_iter, sgt)
pages[i++] = page;
/* Check that we have the expected number of pages */
GEM_BUG_ON(i != n_pages);
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
switch (type) {
drm/i915: Recreate vmapping even when the object is pinned Sometimes we know we are the only user of the bo, but since we take a protective pin_pages early on, an attempt to change the vmap on the object is denied because it is busy. i915_gem_object_pin_map() cannot tell from our single pin_count if the operation is safe. Instead we must pass that information down from the caller in the manner of I915_MAP_OVERRIDE. This issue has existed from the introduction of the mapping, but was never noticed as the only place where this conflict might happen is for cached kernel buffers (such as allocated by i915_gem_batch_pool_get()). Until recently there was only a single user (the cmdparser) so no conflicts ever occurred. However, we now use it to allocate batches for different operations (using MAP_WC on !llc for writes) in addition to the existing shadow batch (using MAP_WB for reads). We could either keep both mappings cached, or use a different write mechanism if we detect a MAP_WB already exists (i.e. clflush afterwards), but as we haven't seen this issue in the wild (it requires hitting the GPU reloc path in addition to the cmdparser) for simplicity just allow the mappings to be recreated. v2: Include the i915_MAP_OVERRIDE bit in the enum so the compiler knows about all the valid values. Fixes: 7dd4f6729f92 ("drm/i915: Async GPU relocation processing") Testcase: igt/gem_lut_handle # byt, completely by accident Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170828104631.8606-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-28 18:46:31 +08:00
default:
MISSING_CASE(type);
/* fallthrough to use PAGE_KERNEL anyway */
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
case I915_MAP_WC:
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
break;
}
addr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack_pages)
kvfree(pages);
return addr;
}
/* get, pin, and map the pages of the object into kernel space */
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
enum i915_map_type has_type;
bool pinned;
void *ptr;
int ret;
drm/i915: Introduce GEM proxy GEM proxy is a kind of GEM, whose backing physical memory is pinned and produced by guest VM and is used by host as read only. With GEM proxy, host is able to access guest physical memory through GEM object interface. As GEM proxy is such a special kind of GEM, a new flag I915_GEM_OBJECT_IS_PROXY is introduced to ban host from changing the backing storage of GEM proxy. v3: - update "Reviewed-by". (Joonas) v2: - return -ENXIO when pin and map pages of GEM proxy to kernel space. (Chris) Here are the histories of this patch in "Dma-buf support for Gvt-g" patch-set: v14: - return -ENXIO when gem proxy object is banned by ioctl. (Chris) (Daniel) v13: - add comments to GEM proxy. (Chris) - don't ban GEM proxy in i915_gem_sw_finish_ioctl. (Chris) - check GEM proxy bar after finishing i915_gem_object_wait. (Chris) - remove GEM proxy bar in i915_gem_madvise_ioctl. v6: - add gem proxy barrier in the following ioctls. (Chris) i915_gem_set_caching_ioctl i915_gem_set_domain_ioctl i915_gem_sw_finish_ioctl i915_gem_set_tiling_ioctl i915_gem_madvise_ioctl Signed-off-by: Tina Zhang <tina.zhang@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/1510555798-21079-2-git-send-email-tina.zhang@intel.com Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171114102513.22269-2-chris@chris-wilson.co.uk
2017-11-14 18:25:13 +08:00
if (unlikely(!i915_gem_object_has_struct_page(obj)))
return ERR_PTR(-ENXIO);
ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
return ERR_PTR(ret);
drm/i915: Recreate vmapping even when the object is pinned Sometimes we know we are the only user of the bo, but since we take a protective pin_pages early on, an attempt to change the vmap on the object is denied because it is busy. i915_gem_object_pin_map() cannot tell from our single pin_count if the operation is safe. Instead we must pass that information down from the caller in the manner of I915_MAP_OVERRIDE. This issue has existed from the introduction of the mapping, but was never noticed as the only place where this conflict might happen is for cached kernel buffers (such as allocated by i915_gem_batch_pool_get()). Until recently there was only a single user (the cmdparser) so no conflicts ever occurred. However, we now use it to allocate batches for different operations (using MAP_WC on !llc for writes) in addition to the existing shadow batch (using MAP_WB for reads). We could either keep both mappings cached, or use a different write mechanism if we detect a MAP_WB already exists (i.e. clflush afterwards), but as we haven't seen this issue in the wild (it requires hitting the GPU reloc path in addition to the cmdparser) for simplicity just allow the mappings to be recreated. v2: Include the i915_MAP_OVERRIDE bit in the enum so the compiler knows about all the valid values. Fixes: 7dd4f6729f92 ("drm/i915: Async GPU relocation processing") Testcase: igt/gem_lut_handle # byt, completely by accident Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170828104631.8606-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-28 18:46:31 +08:00
pinned = !(type & I915_MAP_OVERRIDE);
type &= ~I915_MAP_OVERRIDE;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
ret = ____i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
smp_mb__before_atomic();
}
atomic_inc(&obj->mm.pages_pin_count);
pinned = false;
}
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
if (ptr && has_type != type) {
if (pinned) {
ret = -EBUSY;
goto err_unpin;
}
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
ptr = obj->mm.mapping = NULL;
}
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
if (!ptr) {
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
ret = -ENOMEM;
goto err_unpin;
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
}
obj->mm.mapping = page_pack_bits(ptr, type);
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
}
out_unlock:
mutex_unlock(&obj->mm.lock);
drm/i915: Support for creating write combined type vmaps vmaps has a provision for controlling the page protection bits, with which we can use to control the mapping type, e.g. WB, WC, UC or even WT. To allow the caller to choose their mapping type, we add a parameter to i915_gem_object_pin_map - but we still only allow one vmap to be cached per object. If the object is currently not pinned, then we recreate the previous vmap with the new access type, but if it was pinned we report an error. This effectively limits the access via i915_gem_object_pin_map to a single mapping type for the lifetime of the object. Not usually a problem, but something to be aware of when setting up the object's vmap. We will want to vary the access type to enable WC mappings of ringbuffer and context objects on !llc platforms, as well as other objects where we need coherent access to the GPU's pages without going through the GTT v2: Remove the redundant braces around pin count check and fix the marker in documentation (Chris) v3: - Add a new enum for the vmalloc mapping type & pass that as an argument to i915_object_pin_map. (Tvrtko) - Use PAGE_MASK to extract or filter the mapping type info and remove a superfluous BUG_ON.(Tvrtko) v4: - Rename the enums and clean up the pin_map function. (Chris) v5: Drop the VM_NO_GUARD, minor cosmetics. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1471001999-17787-1-git-send-email-chris@chris-wilson.co.uk
2016-08-12 19:39:58 +08:00
return ptr;
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
ptr = ERR_PTR(ret);
goto out_unlock;
}
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
{
struct address_space *mapping = obj->base.filp->f_mapping;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
u64 remain, offset;
unsigned int pg;
/* Before we instantiate/pin the backing store for our use, we
* can prepopulate the shmemfs filp efficiently using a write into
* the pagecache. We avoid the penalty of instantiating all the
* pages, important if the user is just writing to a few and never
* uses the object on the GPU, and using a direct write into shmemfs
* allows it to avoid the cost of retrieving a page (either swapin
* or clearing-before-use) before it is overwritten.
*/
if (i915_gem_object_has_pages(obj))
return -ENODEV;
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
/* Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace
* races pwrite with any other operation; corruption will ensue -
* that is userspace's prerogative!
*/
remain = arg->size;
offset = arg->offset;
pg = offset_in_page(offset);
do {
unsigned int len, unwritten;
struct page *page;
void *data, *vaddr;
int err;
len = PAGE_SIZE - pg;
if (len > remain)
len = remain;
err = pagecache_write_begin(obj->base.filp, mapping,
offset, len, 0,
&page, &data);
if (err < 0)
return err;
vaddr = kmap(page);
unwritten = copy_from_user(vaddr + pg, user_data, len);
kunmap(page);
err = pagecache_write_end(obj->base.filp, mapping,
offset, len, len - unwritten,
page, data);
if (err < 0)
return err;
if (unwritten)
return -EFAULT;
remain -= len;
user_data += len;
offset += len;
pg = 0;
} while (remain);
return 0;
}
struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
unsigned long flags;
/*
* We are called by the error capture, reset and to dump engine
* state at random points in time. In particular, note that neither is
* crucially ordered with an interrupt. After a hang, the GPU is dead
* and we assume that no more writes can happen (we waited long enough
* for all writes that were in transaction to be flushed) - adding an
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
spin_lock_irqsave(&engine->timeline.lock, flags);
list_for_each_entry(request, &engine->timeline.requests, link) {
if (__i915_request_completed(request, request->global_seqno))
continue;
active = request;
break;
}
spin_unlock_irqrestore(&engine->timeline.lock, flags);
return active;
}
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), gt.retire_work.work);
struct drm_device *dev = &dev_priv->drm;
/* Come back later if the device is busy... */
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
if (mutex_trylock(&dev->struct_mutex)) {
i915_retire_requests(dev_priv);
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
mutex_unlock(&dev->struct_mutex);
}
drm/i915: Always run hangcheck while the GPU is busy Previously, we relied on only running the hangcheck while somebody was waiting on the GPU, in order to minimise the amount of time hangcheck had to run. (If nobody was watching the GPU, nobody would notice if the GPU wasn't responding -- eventually somebody would care and so kick hangcheck into action.) However, this falls apart from around commit 4680816be336 ("drm/i915: Wait first for submission, before waiting for request completion"), as not all waiters declare themselves to hangcheck and so we could switch off hangcheck and miss GPU hangs even when waiting under the struct_mutex. If we enable hangcheck from the first request submission, and let it run until the GPU is idle again, we forgo all the complexity involved with only enabling around waiters. We just have to remember to be careful that we do not declare a GPU hang when idly waiting for the next request to be come ready, as we will run hangcheck continuously even when the engines are stalled waiting for external events. This should be true already as we should only be tracking requests submitted to hardware for execution as an indicator that the engine is busy. Fixes: 4680816be336 ("drm/i915: Wait first for submission, before waiting for request completion" Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104840 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180129144104.3921-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2018-01-29 22:41:04 +08:00
/*
* Keep the retire handler running until we are finally idle.
* We do not need to do this test under locking as in the worst-case
* we queue the retire worker once too often.
*/
drm/i915: Always run hangcheck while the GPU is busy Previously, we relied on only running the hangcheck while somebody was waiting on the GPU, in order to minimise the amount of time hangcheck had to run. (If nobody was watching the GPU, nobody would notice if the GPU wasn't responding -- eventually somebody would care and so kick hangcheck into action.) However, this falls apart from around commit 4680816be336 ("drm/i915: Wait first for submission, before waiting for request completion"), as not all waiters declare themselves to hangcheck and so we could switch off hangcheck and miss GPU hangs even when waiting under the struct_mutex. If we enable hangcheck from the first request submission, and let it run until the GPU is idle again, we forgo all the complexity involved with only enabling around waiters. We just have to remember to be careful that we do not declare a GPU hang when idly waiting for the next request to be come ready, as we will run hangcheck continuously even when the engines are stalled waiting for external events. This should be true already as we should only be tracking requests submitted to hardware for execution as an indicator that the engine is busy. Fixes: 4680816be336 ("drm/i915: Wait first for submission, before waiting for request completion" Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104840 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180129144104.3921-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2018-01-29 22:41:04 +08:00
if (READ_ONCE(dev_priv->gt.awake))
queue_delayed_work(dev_priv->wq,
&dev_priv->gt.retire_work,
round_jiffies_up_relative(HZ));
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
}
static void shrink_caches(struct drm_i915_private *i915)
{
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
kmem_cache_shrink(i915->priorities);
kmem_cache_shrink(i915->dependencies);
kmem_cache_shrink(i915->requests);
kmem_cache_shrink(i915->luts);
kmem_cache_shrink(i915->vmas);
kmem_cache_shrink(i915->objects);
}
struct sleep_rcu_work {
union {
struct rcu_head rcu;
struct work_struct work;
};
struct drm_i915_private *i915;
unsigned int epoch;
};
static inline bool
same_epoch(struct drm_i915_private *i915, unsigned int epoch)
{
/*
* There is a small chance that the epoch wrapped since we started
* sleeping. If we assume that epoch is at least a u32, then it will
* take at least 2^32 * 100ms for it to wrap, or about 326 years.
*/
return epoch == READ_ONCE(i915->gt.epoch);
}
static void __sleep_work(struct work_struct *work)
{
struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
struct drm_i915_private *i915 = s->i915;
unsigned int epoch = s->epoch;
kfree(s);
if (same_epoch(i915, epoch))
shrink_caches(i915);
}
static void __sleep_rcu(struct rcu_head *rcu)
{
struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
struct drm_i915_private *i915 = s->i915;
destroy_rcu_head(&s->rcu);
if (same_epoch(i915, s->epoch)) {
INIT_WORK(&s->work, __sleep_work);
queue_work(i915->wq, &s->work);
} else {
kfree(s);
}
}
static inline bool
new_requests_since_last_retire(const struct drm_i915_private *i915)
{
return (READ_ONCE(i915->gt.active_requests) ||
work_pending(&i915->gt.idle_work.work));
}
static void assert_kernel_context_is_current(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (i915_terminally_wedged(&i915->gpu_error))
return;
GEM_BUG_ON(i915->gt.active_requests);
for_each_engine(engine, i915, id) {
GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
GEM_BUG_ON(engine->last_retired_context !=
to_intel_context(i915->kernel_context, engine));
}
}
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), gt.idle_work.work);
unsigned int epoch = I915_EPOCH_INVALID;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
return;
if (READ_ONCE(dev_priv->gt.active_requests))
return;
/*
* Flush out the last user context, leaving only the pinned
* kernel context resident. When we are idling on the kernel_context,
* no more new requests (with a context switch) are emitted and we
* can finally rest. A consequence is that the idle work handler is
* always called at least twice before idling (and if the system is
* idle that implies a round trip through the retire worker).
*/
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_switch_to_kernel_context(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
READ_ONCE(dev_priv->gt.active_requests));
drm/i915: Make sure engines are idle during GPU idling in LR mode We assume that the GPU is idle once receiving the seqno via the last request's user interrupt. In execlist mode the corresponding context completed interrupt can be delayed though and until this latter interrupt arrives we consider the request to be pending on the ELSP submit port. This can cause a problem during system suspend where this last request will be seen by the resume code as still pending. Such pending requests are normally replayed after a GPU reset, but during resume we reset both SW and HW tracking of the ring head/tail pointers, so replaying the pending request with its stale tail pointer will leave the ring in an inconsistent state. A subsequent request submission can lead then to the GPU executing from uninitialized area in the ring behind the above stale tail pointer. Fix this by making sure any pending request on the ELSP port is completed before suspending. I used a polling wait since the completion time I measured was <1ms and since normally we only need to wait during system suspend. GPU idling during runtime suspend is scheduled with a delay (currently 50-100ms) after the retirement of the last request at which point the context completed interrupt must have arrived already. The chance of this bug was increased by commit 1c777c5d1dcdf8fa0223fcff35fb387b5bb9517a Author: Imre Deak <imre.deak@intel.com> Date: Wed Oct 12 17:46:37 2016 +0300 drm/i915/hsw: Fix GPU hang during resume from S3-devices state but it could happen even without the explicit GPU reset, since we disable interrupts afterwards during the suspend sequence. v2: - Do an unlocked poll-wait first. (Chris) v3-4: - s/intel_lr_engines_idle/intel_execlists_idle/ and move i915.enable_execlists check to the new helper. (Chris) Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=98470 Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1478510405-11799-3-git-send-email-imre.deak@intel.com
2016-11-07 17:20:04 +08:00
/*
* Wait for last execlists context complete, but bail out in case a
* new request is submitted. As we don't trust the hardware, we
* continue on if the wait times out. This is necessary to allow
* the machine to suspend even if the hardware dies, and we will
* try to recover in resume (after depriving the hardware of power,
* it may be in a better mmod).
drm/i915: Make sure engines are idle during GPU idling in LR mode We assume that the GPU is idle once receiving the seqno via the last request's user interrupt. In execlist mode the corresponding context completed interrupt can be delayed though and until this latter interrupt arrives we consider the request to be pending on the ELSP submit port. This can cause a problem during system suspend where this last request will be seen by the resume code as still pending. Such pending requests are normally replayed after a GPU reset, but during resume we reset both SW and HW tracking of the ring head/tail pointers, so replaying the pending request with its stale tail pointer will leave the ring in an inconsistent state. A subsequent request submission can lead then to the GPU executing from uninitialized area in the ring behind the above stale tail pointer. Fix this by making sure any pending request on the ELSP port is completed before suspending. I used a polling wait since the completion time I measured was <1ms and since normally we only need to wait during system suspend. GPU idling during runtime suspend is scheduled with a delay (currently 50-100ms) after the retirement of the last request at which point the context completed interrupt must have arrived already. The chance of this bug was increased by commit 1c777c5d1dcdf8fa0223fcff35fb387b5bb9517a Author: Imre Deak <imre.deak@intel.com> Date: Wed Oct 12 17:46:37 2016 +0300 drm/i915/hsw: Fix GPU hang during resume from S3-devices state but it could happen even without the explicit GPU reset, since we disable interrupts afterwards during the suspend sequence. v2: - Do an unlocked poll-wait first. (Chris) v3-4: - s/intel_lr_engines_idle/intel_execlists_idle/ and move i915.enable_execlists check to the new helper. (Chris) Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=98470 Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1478510405-11799-3-git-send-email-imre.deak@intel.com
2016-11-07 17:20:04 +08:00
*/
__wait_for(if (new_requests_since_last_retire(dev_priv)) return,
intel_engines_are_idle(dev_priv),
I915_IDLE_ENGINES_TIMEOUT * 1000,
10, 500);
rearm_hangcheck =
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
/* Currently busy, come back later */
mod_delayed_work(dev_priv->wq,
&dev_priv->gt.idle_work,
msecs_to_jiffies(50));
goto out_rearm;
}
/*
* New request retired after this work handler started, extend active
* period until next instance of the work.
*/
if (new_requests_since_last_retire(dev_priv))
goto out_unlock;
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
epoch = __i915_gem_park(dev_priv);
assert_kernel_context_is_current(dev_priv);
rearm_hangcheck = false;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
out_rearm:
if (rearm_hangcheck) {
GEM_BUG_ON(!dev_priv->gt.awake);
i915_queue_hangcheck(dev_priv);
}
/*
* When we are idle, it is an opportune time to reap our caches.
* However, we have many objects that utilise RCU and the ordered
* i915->wq that this work is executing on. To try and flush any
* pending frees now we are idle, we first wait for an RCU grace
* period, and then queue a task (that will run last on the wq) to
* shrink and re-optimize the caches.
*/
if (same_epoch(dev_priv, epoch)) {
struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s) {
init_rcu_head(&s->rcu);
s->i915 = dev_priv;
s->epoch = epoch;
call_rcu(&s->rcu, __sleep_rcu);
}
}
}
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(gem->dev);
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_lut_handle *lut, *ln;
mutex_lock(&i915->drm.struct_mutex);
list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
if (ctx->file_priv != fpriv)
continue;
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
GEM_BUG_ON(vma->obj != obj);
/* We allow the process to have multiple handles to the same
* vma, in the same fd namespace, by virtue of flink/open.
*/
GEM_BUG_ON(!vma->open_count);
if (!--vma->open_count && !i915_vma_is_ggtt(vma))
i915_vma_close(vma);
list_del(&lut->obj_link);
list_del(&lut->ctx_link);
kmem_cache_free(i915->luts, lut);
__i915_gem_object_release_unless_active(obj);
}
mutex_unlock(&i915->drm.struct_mutex);
}
static unsigned long to_wait_timeout(s64 timeout_ns)
{
if (timeout_ns < 0)
return MAX_SCHEDULE_TIMEOUT;
if (timeout_ns == 0)
return 0;
return nsecs_to_jiffies_timeout(timeout_ns);
}
drm/i915: wait render timeout ioctl This helps implement GL_ARB_sync but stops short of allowing full blown sync objects. Finally we can use the new timed seqno waiting function to allow userspace to wait on a buffer object with a timeout. This implements that interface. The IOCTL will take as input a buffer object handle, and a timeout in nanoseconds (flags is currently optional but will likely be used for permutations of flush operations). Users may specify 0 nanoseconds to instantly check. The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any non-zero timeout parameter the wait ioctl will wait for the given number of nanoseconds on an object becoming unbusy. Since the wait itself does so holding struct_mutex the object may become re-busied before this completes. A similar but shorter race condition exists in the busy ioctl. v2: ETIME/ERESTARTSYS instead of changing to EBUSY, and EGAIN (Chris) Flush the object from the gpu write domain (Chris + Daniel) Fix leaked refcount in good case (Chris) Naturally align ioctl struct (Chris) v3: Drop lock after getting seqno to avoid ugly dance (Chris) v4: check for 0 timeout after olr check to allow polling (Chris) v5: Updated the comment. (Chris) v6: Return -ETIME instead of -EBUSY when timeout_ns is 0 (Daniel) Fix the commit message comment to be less ugly (Ben) Add a warning to check the return timespec (Ben) v7: Use DRM_AUTH for the ioctl. (Eugeni) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-25 06:03:10 +08:00
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
drm/i915: wait render timeout ioctl This helps implement GL_ARB_sync but stops short of allowing full blown sync objects. Finally we can use the new timed seqno waiting function to allow userspace to wait on a buffer object with a timeout. This implements that interface. The IOCTL will take as input a buffer object handle, and a timeout in nanoseconds (flags is currently optional but will likely be used for permutations of flush operations). Users may specify 0 nanoseconds to instantly check. The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any non-zero timeout parameter the wait ioctl will wait for the given number of nanoseconds on an object becoming unbusy. Since the wait itself does so holding struct_mutex the object may become re-busied before this completes. A similar but shorter race condition exists in the busy ioctl. v2: ETIME/ERESTARTSYS instead of changing to EBUSY, and EGAIN (Chris) Flush the object from the gpu write domain (Chris + Daniel) Fix leaked refcount in good case (Chris) Naturally align ioctl struct (Chris) v3: Drop lock after getting seqno to avoid ugly dance (Chris) v4: check for 0 timeout after olr check to allow polling (Chris) v5: Updated the comment. (Chris) v6: Return -ETIME instead of -EBUSY when timeout_ns is 0 (Daniel) Fix the commit message comment to be less ugly (Ben) Add a warning to check the return timespec (Ben) v7: Use DRM_AUTH for the ioctl. (Eugeni) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-25 06:03:10 +08:00
*
* Returns 0 if successful, else an error is returned with the remaining time in
* the timeout parameter.
* -ETIME: object is still busy after timeout
* -ERESTARTSYS: signal interrupted the wait
* -ENONENT: object doesn't exist
* Also possible, but rare:
* -EAGAIN: incomplete, restart syscall
drm/i915: wait render timeout ioctl This helps implement GL_ARB_sync but stops short of allowing full blown sync objects. Finally we can use the new timed seqno waiting function to allow userspace to wait on a buffer object with a timeout. This implements that interface. The IOCTL will take as input a buffer object handle, and a timeout in nanoseconds (flags is currently optional but will likely be used for permutations of flush operations). Users may specify 0 nanoseconds to instantly check. The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any non-zero timeout parameter the wait ioctl will wait for the given number of nanoseconds on an object becoming unbusy. Since the wait itself does so holding struct_mutex the object may become re-busied before this completes. A similar but shorter race condition exists in the busy ioctl. v2: ETIME/ERESTARTSYS instead of changing to EBUSY, and EGAIN (Chris) Flush the object from the gpu write domain (Chris + Daniel) Fix leaked refcount in good case (Chris) Naturally align ioctl struct (Chris) v3: Drop lock after getting seqno to avoid ugly dance (Chris) v4: check for 0 timeout after olr check to allow polling (Chris) v5: Updated the comment. (Chris) v6: Return -ETIME instead of -EBUSY when timeout_ns is 0 (Daniel) Fix the commit message comment to be less ugly (Ben) Add a warning to check the return timespec (Ben) v7: Use DRM_AUTH for the ioctl. (Eugeni) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-25 06:03:10 +08:00
* -ENOMEM: damn
* -ENODEV: Internal IRQ fail
* -E?: The add request failed
*
* The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
* non-zero timeout parameter the wait ioctl will wait for the given number of
* nanoseconds on an object becoming unbusy. Since the wait itself does so
* without holding struct_mutex the object may become re-busied before this
* function completes. A similar but shorter * race condition exists in the busy
* ioctl
*/
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
ktime_t start;
long ret;
drm/i915: wait render timeout ioctl This helps implement GL_ARB_sync but stops short of allowing full blown sync objects. Finally we can use the new timed seqno waiting function to allow userspace to wait on a buffer object with a timeout. This implements that interface. The IOCTL will take as input a buffer object handle, and a timeout in nanoseconds (flags is currently optional but will likely be used for permutations of flush operations). Users may specify 0 nanoseconds to instantly check. The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any non-zero timeout parameter the wait ioctl will wait for the given number of nanoseconds on an object becoming unbusy. Since the wait itself does so holding struct_mutex the object may become re-busied before this completes. A similar but shorter race condition exists in the busy ioctl. v2: ETIME/ERESTARTSYS instead of changing to EBUSY, and EGAIN (Chris) Flush the object from the gpu write domain (Chris + Daniel) Fix leaked refcount in good case (Chris) Naturally align ioctl struct (Chris) v3: Drop lock after getting seqno to avoid ugly dance (Chris) v4: check for 0 timeout after olr check to allow polling (Chris) v5: Updated the comment. (Chris) v6: Return -ETIME instead of -EBUSY when timeout_ns is 0 (Daniel) Fix the commit message comment to be less ugly (Ben) Add a warning to check the return timespec (Ben) v7: Use DRM_AUTH for the ioctl. (Eugeni) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-25 06:03:10 +08:00
if (args->flags != 0)
return -EINVAL;
obj = i915_gem_object_lookup(file, args->bo_handle);
if (!obj)
drm/i915: wait render timeout ioctl This helps implement GL_ARB_sync but stops short of allowing full blown sync objects. Finally we can use the new timed seqno waiting function to allow userspace to wait on a buffer object with a timeout. This implements that interface. The IOCTL will take as input a buffer object handle, and a timeout in nanoseconds (flags is currently optional but will likely be used for permutations of flush operations). Users may specify 0 nanoseconds to instantly check. The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any non-zero timeout parameter the wait ioctl will wait for the given number of nanoseconds on an object becoming unbusy. Since the wait itself does so holding struct_mutex the object may become re-busied before this completes. A similar but shorter race condition exists in the busy ioctl. v2: ETIME/ERESTARTSYS instead of changing to EBUSY, and EGAIN (Chris) Flush the object from the gpu write domain (Chris + Daniel) Fix leaked refcount in good case (Chris) Naturally align ioctl struct (Chris) v3: Drop lock after getting seqno to avoid ugly dance (Chris) v4: check for 0 timeout after olr check to allow polling (Chris) v5: Updated the comment. (Chris) v6: Return -ETIME instead of -EBUSY when timeout_ns is 0 (Daniel) Fix the commit message comment to be less ugly (Ben) Add a warning to check the return timespec (Ben) v7: Use DRM_AUTH for the ioctl. (Eugeni) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-25 06:03:10 +08:00
return -ENOENT;
start = ktime_get();
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
I915_WAIT_ALL,
to_wait_timeout(args->timeout_ns),
to_rps_client(file));
if (args->timeout_ns > 0) {
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
if (args->timeout_ns < 0)
args->timeout_ns = 0;
/*
* Apparently ktime isn't accurate enough and occasionally has a
* bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
* things up to make the test happy. We allow up to 1 jiffy.
*
* This is a regression from the timespec->ktime conversion.
*/
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
args->timeout_ns = 0;
/* Asked to wait beyond the jiffie/scheduler precision? */
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
drm/i915: Implement inter-engine read-read optimisations Currently, we only track the last request globally across all engines. This prevents us from issuing concurrent read requests on e.g. the RCS and BCS engines (or more likely the render and media engines). Without semaphores, we incur costly stalls as we synchronise between rings - greatly impacting the current performance of Broadwell versus Haswell in certain workloads (like video decode). With the introduction of reference counted requests, it is much easier to track the last request per ring, as well as the last global write request so that we can optimise inter-engine read read requests (as well as better optimise certain CPU waits). v2: Fix inverted readonly condition for nonblocking waits. v3: Handle non-continguous engine array after waits v4: Rebase, tidy, rewrite ring list debugging v5: Use obj->active as a bitfield, it looks cool v6: Micro-optimise, mostly involving moving code around v7: Fix retire-requests-upto for execlists (and multiple rq->ringbuf) v8: Rebase v9: Refactor i915_gem_object_sync() to allow the compiler to better optimise it. Benchmark: igt/gem_read_read_speed hsw:gt3e (with semaphores): Before: Time to read-read 1024k: 275.794µs After: Time to read-read 1024k: 123.260µs hsw:gt3e (w/o semaphores): Before: Time to read-read 1024k: 230.433µs After: Time to read-read 1024k: 124.593µs bdw-u (w/o semaphores): Before After Time to read-read 1x1: 26.274µs 10.350µs Time to read-read 128x128: 40.097µs 21.366µs Time to read-read 256x256: 77.087µs 42.608µs Time to read-read 512x512: 281.999µs 181.155µs Time to read-read 1024x1024: 1196.141µs 1118.223µs Time to read-read 2048x2048: 5639.072µs 5225.837µs Time to read-read 4096x4096: 22401.662µs 21137.067µs Time to read-read 8192x8192: 89617.735µs 85637.681µs Testcase: igt/gem_concurrent_blit (read-read and friends) Cc: Lionel Landwerlin <lionel.g.landwerlin@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> [v8] [danvet: s/\<rq\>/req/g] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-04-27 20:41:17 +08:00
}
i915_gem_object_put(obj);
return ret;
drm/i915: wait render timeout ioctl This helps implement GL_ARB_sync but stops short of allowing full blown sync objects. Finally we can use the new timed seqno waiting function to allow userspace to wait on a buffer object with a timeout. This implements that interface. The IOCTL will take as input a buffer object handle, and a timeout in nanoseconds (flags is currently optional but will likely be used for permutations of flush operations). Users may specify 0 nanoseconds to instantly check. The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any non-zero timeout parameter the wait ioctl will wait for the given number of nanoseconds on an object becoming unbusy. Since the wait itself does so holding struct_mutex the object may become re-busied before this completes. A similar but shorter race condition exists in the busy ioctl. v2: ETIME/ERESTARTSYS instead of changing to EBUSY, and EGAIN (Chris) Flush the object from the gpu write domain (Chris + Daniel) Fix leaked refcount in good case (Chris) Naturally align ioctl struct (Chris) v3: Drop lock after getting seqno to avoid ugly dance (Chris) v4: check for 0 timeout after olr check to allow polling (Chris) v5: Updated the comment. (Chris) v6: Return -ETIME instead of -EBUSY when timeout_ns is 0 (Daniel) Fix the commit message comment to be less ugly (Ben) Add a warning to check the return timespec (Ben) v7: Use DRM_AUTH for the ioctl. (Eugeni) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-05-25 06:03:10 +08:00
}
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
static long wait_for_timeline(struct i915_timeline *tl,
unsigned int flags, long timeout)
{
struct i915_request *rq;
rq = i915_gem_active_get_unlocked(&tl->last_request);
if (!rq)
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
return timeout;
/*
* "Race-to-idle".
*
* Switching to the kernel context is often used a synchronous
* step prior to idling, e.g. in suspend for flushing all
* current operations to memory before sleeping. These we
* want to complete as quickly as possible to avoid prolonged
* stalls, so allow the gpu to boost to maximum clocks.
*/
if (flags & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq, NULL);
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
timeout = i915_request_wait(rq, flags, timeout);
i915_request_put(rq);
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
return timeout;
}
static int wait_for_engines(struct drm_i915_private *i915)
{
if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
dev_err(i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
return -EIO;
}
return 0;
}
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
2018-05-24 16:11:35 +08:00
drm/i915: Short-circuit i915_gem_wait_for_idle() if already idle If the device is asleep (no GT wakeref), we know the GPU is already idle. If we add an early return, we can avoid touching registers and checking hw state outside of the assumed GT wakelock. This prevents causing such errors whilst debugging: [ 2613.401647] RPM wakelock ref not held during HW access [ 2613.401684] ------------[ cut here ]------------ [ 2613.401720] WARNING: CPU: 5 PID: 7739 at drivers/gpu/drm/i915/intel_drv.h:1787 gen6_read32+0x21f/0x2b0 [i915] [ 2613.401731] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_hdmi x86_pkg_temp_thermal intel_powerclamp snd_hda_codec_realtek coretemp snd_hda_codec_generic crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm r8169 mii mei_me lpc_ich mei prime_numbers [last unloaded: i915] [ 2613.401823] CPU: 5 PID: 7739 Comm: drv_missed_irq Tainted: G U 4.12.0-rc2-CI-CI_DRM_421+ #1 [ 2613.401825] Hardware name: MSI MS-7924/Z97M-G43(MS-7924), BIOS V1.12 02/15/2016 [ 2613.401840] task: ffff880409e3a740 task.stack: ffffc900084dc000 [ 2613.401861] RIP: 0010:gen6_read32+0x21f/0x2b0 [i915] [ 2613.401863] RSP: 0018:ffffc900084dfce8 EFLAGS: 00010292 [ 2613.401869] RAX: 000000000000002a RBX: ffff8804016a8000 RCX: 0000000000000006 [ 2613.401871] RDX: 0000000000000006 RSI: ffffffff81cbf2d9 RDI: ffffffff81c9e3a7 [ 2613.401874] RBP: ffffc900084dfd18 R08: ffff880409e3afc8 R09: 0000000000000000 [ 2613.401877] R10: 000000008a1c483f R11: 0000000000000000 R12: 000000000000209c [ 2613.401879] R13: 0000000000000001 R14: ffff8804016a8000 R15: ffff8804016ac150 [ 2613.401882] FS: 00007f39ef3dd8c0(0000) GS:ffff88041fb40000(0000) knlGS:0000000000000000 [ 2613.401885] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2613.401887] CR2: 00000000023717c8 CR3: 00000002e7b34000 CR4: 00000000001406e0 [ 2613.401889] Call Trace: [ 2613.401912] intel_engine_is_idle+0x76/0x90 [i915] [ 2613.401931] i915_gem_wait_for_idle+0xe6/0x1e0 [i915] [ 2613.401951] fault_irq_set+0x40/0x90 [i915] [ 2613.401970] i915_ring_test_irq_set+0x42/0x50 [i915] [ 2613.401976] simple_attr_write+0xc7/0xe0 [ 2613.401981] full_proxy_write+0x4f/0x70 [ 2613.401987] __vfs_write+0x23/0x120 [ 2613.401992] ? rcu_read_lock_sched_held+0x75/0x80 [ 2613.401996] ? rcu_sync_lockdep_assert+0x2a/0x50 [ 2613.401999] ? __sb_start_write+0xfa/0x1f0 [ 2613.402004] vfs_write+0xc5/0x1d0 [ 2613.402008] ? trace_hardirqs_on_caller+0xe7/0x1c0 [ 2613.402013] SyS_write+0x44/0xb0 [ 2613.402020] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 2613.402022] RIP: 0033:0x7f39eded6670 [ 2613.402025] RSP: 002b:00007fffdcdcb1a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2613.402030] RAX: ffffffffffffffda RBX: ffffffff81470203 RCX: 00007f39eded6670 [ 2613.402033] RDX: 0000000000000001 RSI: 000000000041bc33 RDI: 0000000000000006 [ 2613.402036] RBP: ffffc900084dff88 R08: 00007f39ef3dd8c0 R09: 0000000000000001 [ 2613.402038] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000041bc33 [ 2613.402041] R13: 0000000000000006 R14: 0000000000000000 R15: 0000000000000000 [ 2613.402046] ? __this_cpu_preempt_check+0x13/0x20 [ 2613.402052] Code: 01 9b fa e0 0f ff e9 28 fe ff ff 80 3d 6a dd 0e 00 00 0f 85 29 fe ff ff 48 c7 c7 48 19 29 a0 c6 05 56 dd 0e 00 01 e8 da 9a fa e0 <0f> ff e9 0f fe ff ff b9 01 00 00 00 ba 01 00 00 00 44 89 e6 48 [ 2613.402199] ---[ end trace 31f0cfa93ab632bf ]--- Fixes: 25112b64b3d2 ("drm/i915: Wait for all engines to be idle as part of i915_gem_wait_for_idle()") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170530121334.17364-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-05-30 20:13:32 +08:00
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
return 0;
drm/i915: Only wait upon the execution timeline when unlocked In order to walk the list of all timelines, we currently require the struct_mutex. We are sometimes called prior to the struct_mutex being taken by the caller (i.e !I915_WAIT_LOCKED) in which case we can only trust the global execution timelines (as these are owned by the device). This means in the unlocked phase we can only wait upon the currently executing requests and not all queued. [ 175.743243] general protection fault: 0000 [#1] SMP [ 175.743263] Modules linked in: nls_iso8859_1 intel_rapl x86_pkg_temp_thermal coretemp kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel iwlwifi aesni_intel aes_x86_64 lrw snd_soc_rt5640 gf128mul snd_soc_rl6231 snd_soc_core glue_helper snd_compress snd_pcm_dmaengine snd_hda_codec_hdmi ablk_helper snd_hda_codec_realtek cryptd snd_hda_codec_generic serio_raw cfg80211 snd_hda_intel snd_hda_codec ir_lirc_codec snd_hda_core lirc_dev snd_hwdep snd_pcm lpc_ich mei_me mei snd_seq_midi shpchp snd_seq_midi_event snd_rawmidi snd_seq snd_seq_device snd_timer rc_rc6_mce acpi_als nuvoton_cir kfifo_buf rc_core snd industrialio snd_soc_sst_acpi soundcore snd_soc_sst_match i2c_designware_platform 8250_dw i2c_designware_core dw_dmac spi_pxa2xx_platform mac_hid acpi_pad parport_pc ppdev lp parport [ 175.743509] autofs4 i915 e1000e psmouse ptp pps_core xhci_pci ehci_pci ahci xhci_hcd ehci_hcd libahci video sdhci_acpi sdhci i2c_hid hid [ 175.743560] CPU: 2 PID: 2386 Comm: wtdg_monitor.sh Tainted: G U 4.9.0-rc4-nightly+ #2 [ 175.743581] Hardware name: /NUC5i7RYB, BIOS RYBDWi35.86A.0358.2016.0606.1423 06/06/2016 [ 175.743603] task: ffff88024509ba80 task.stack: ffffc9007bd18000 [ 175.743618] RIP: 0010:[<ffffffffa01af29b>] [<ffffffffa01af29b>] i915_gem_wait_for_idle+0x3b/0x140 [i915] [ 175.743660] RSP: 0000:ffffc9007bd1b9b8 EFLAGS: 00010297 [ 175.743674] RAX: ffff88024489d248 RBX: 0000000000000000 RCX: 0000000000000000 [ 175.743691] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880244898000 [ 175.743708] RBP: ffffc9007bd1b9f0 R08: 0000000000000000 R09: 0000000000000001 [ 175.743724] R10: 00000028eaf42792 R11: 0000000000000001 R12: dead000000000100 [ 175.743741] R13: dead000000000148 R14: ffffc9007bd1ba5f R15: 0000000000000005 [ 175.743758] FS: 00007f2638330700(0000) GS:ffff880256d00000(0000) knlGS:0000000000000000 [ 175.743777] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 175.743791] CR2: 00007f885c8cea40 CR3: 00000002416b5000 CR4: 00000000003406e0 [ 175.743808] Stack: [ 175.743816] ffff88024489d248 000000004509ba80 ffff880244898000 ffff88024509ba80 [ 175.743840] 00000000ffff8b69 ffffc9007bd1ba5f ffffc9007bd1ba5e ffffc9007bd1ba28 [ 175.743863] ffffffffa01b661d 00000000ffffffff 0000000000000000 ffff880244898000 [ 175.743886] Call Trace: [ 175.743906] [<ffffffffa01b661d>] i915_gem_shrinker_lock_uninterruptible.constprop.5+0x5d/0xc0 [i915] [ 175.743937] [<ffffffffa01b6cd0>] i915_gem_shrinker_oom+0x30/0x1b0 [i915] [ 175.743955] [<ffffffff8109ca79>] notifier_call_chain+0x49/0x70 [ 175.743971] [<ffffffff8109cd9d>] __blocking_notifier_call_chain+0x4d/0x70 [ 175.743988] [<ffffffff8109cdd6>] blocking_notifier_call_chain+0x16/0x20 [ 175.744005] [<ffffffff811885dc>] out_of_memory+0x22c/0x480 [ 175.744020] [<ffffffff81205542>] __alloc_pages_slowpath+0x851/0x8ec [ 175.744037] [<ffffffff8118ca51>] __alloc_pages_nodemask+0x2c1/0x310 [ 175.744054] [<ffffffff811d8ea8>] alloc_pages_current+0x88/0x120 [ 175.744070] [<ffffffff811833a4>] __page_cache_alloc+0xb4/0xc0 [ 175.744086] [<ffffffff811865ca>] filemap_fault+0x29a/0x500 [ 175.744101] [<ffffffff81299aa6>] ext4_filemap_fault+0x36/0x50 [ 175.744117] [<ffffffff811b3d4a>] __do_fault+0x6a/0xe0 [ 175.744131] [<ffffffff811b97ee>] handle_mm_fault+0xd0e/0x1330 [ 175.744147] [<ffffffff8106738c>] __do_page_fault+0x23c/0x4d0 [ 175.744162] [<ffffffff81067650>] do_page_fault+0x30/0x80 [ 175.744177] [<ffffffff817ffbe8>] page_fault+0x28/0x30 [ 175.744191] Code: 41 57 41 56 41 55 41 54 53 48 83 ec 10 4c 8b a7 48 52 00 00 89 75 d4 48 89 45 c8 49 39 c4 74 78 4d 8d 6c 24 48 41 bf 05 00 00 00 <49> 8b 5d 00 48 85 db 74 50 8b 83 20 01 00 00 85 c0 74 15 48 8b [ 175.744320] RIP [<ffffffffa01af29b>] i915_gem_wait_for_idle+0x3b/0x140 [i915] [ 175.744351] RSP <ffffc9007bd1b9b8> Fixes: 80b204bce8f2 ("drm/i915: Enable multiple timelines") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161111145809.9701-1-chris@chris-wilson.co.uk
2016-11-11 22:58:08 +08:00
if (flags & I915_WAIT_LOCKED) {
struct i915_timeline *tl;
int err;
drm/i915: Only wait upon the execution timeline when unlocked In order to walk the list of all timelines, we currently require the struct_mutex. We are sometimes called prior to the struct_mutex being taken by the caller (i.e !I915_WAIT_LOCKED) in which case we can only trust the global execution timelines (as these are owned by the device). This means in the unlocked phase we can only wait upon the currently executing requests and not all queued. [ 175.743243] general protection fault: 0000 [#1] SMP [ 175.743263] Modules linked in: nls_iso8859_1 intel_rapl x86_pkg_temp_thermal coretemp kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel iwlwifi aesni_intel aes_x86_64 lrw snd_soc_rt5640 gf128mul snd_soc_rl6231 snd_soc_core glue_helper snd_compress snd_pcm_dmaengine snd_hda_codec_hdmi ablk_helper snd_hda_codec_realtek cryptd snd_hda_codec_generic serio_raw cfg80211 snd_hda_intel snd_hda_codec ir_lirc_codec snd_hda_core lirc_dev snd_hwdep snd_pcm lpc_ich mei_me mei snd_seq_midi shpchp snd_seq_midi_event snd_rawmidi snd_seq snd_seq_device snd_timer rc_rc6_mce acpi_als nuvoton_cir kfifo_buf rc_core snd industrialio snd_soc_sst_acpi soundcore snd_soc_sst_match i2c_designware_platform 8250_dw i2c_designware_core dw_dmac spi_pxa2xx_platform mac_hid acpi_pad parport_pc ppdev lp parport [ 175.743509] autofs4 i915 e1000e psmouse ptp pps_core xhci_pci ehci_pci ahci xhci_hcd ehci_hcd libahci video sdhci_acpi sdhci i2c_hid hid [ 175.743560] CPU: 2 PID: 2386 Comm: wtdg_monitor.sh Tainted: G U 4.9.0-rc4-nightly+ #2 [ 175.743581] Hardware name: /NUC5i7RYB, BIOS RYBDWi35.86A.0358.2016.0606.1423 06/06/2016 [ 175.743603] task: ffff88024509ba80 task.stack: ffffc9007bd18000 [ 175.743618] RIP: 0010:[<ffffffffa01af29b>] [<ffffffffa01af29b>] i915_gem_wait_for_idle+0x3b/0x140 [i915] [ 175.743660] RSP: 0000:ffffc9007bd1b9b8 EFLAGS: 00010297 [ 175.743674] RAX: ffff88024489d248 RBX: 0000000000000000 RCX: 0000000000000000 [ 175.743691] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880244898000 [ 175.743708] RBP: ffffc9007bd1b9f0 R08: 0000000000000000 R09: 0000000000000001 [ 175.743724] R10: 00000028eaf42792 R11: 0000000000000001 R12: dead000000000100 [ 175.743741] R13: dead000000000148 R14: ffffc9007bd1ba5f R15: 0000000000000005 [ 175.743758] FS: 00007f2638330700(0000) GS:ffff880256d00000(0000) knlGS:0000000000000000 [ 175.743777] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 175.743791] CR2: 00007f885c8cea40 CR3: 00000002416b5000 CR4: 00000000003406e0 [ 175.743808] Stack: [ 175.743816] ffff88024489d248 000000004509ba80 ffff880244898000 ffff88024509ba80 [ 175.743840] 00000000ffff8b69 ffffc9007bd1ba5f ffffc9007bd1ba5e ffffc9007bd1ba28 [ 175.743863] ffffffffa01b661d 00000000ffffffff 0000000000000000 ffff880244898000 [ 175.743886] Call Trace: [ 175.743906] [<ffffffffa01b661d>] i915_gem_shrinker_lock_uninterruptible.constprop.5+0x5d/0xc0 [i915] [ 175.743937] [<ffffffffa01b6cd0>] i915_gem_shrinker_oom+0x30/0x1b0 [i915] [ 175.743955] [<ffffffff8109ca79>] notifier_call_chain+0x49/0x70 [ 175.743971] [<ffffffff8109cd9d>] __blocking_notifier_call_chain+0x4d/0x70 [ 175.743988] [<ffffffff8109cdd6>] blocking_notifier_call_chain+0x16/0x20 [ 175.744005] [<ffffffff811885dc>] out_of_memory+0x22c/0x480 [ 175.744020] [<ffffffff81205542>] __alloc_pages_slowpath+0x851/0x8ec [ 175.744037] [<ffffffff8118ca51>] __alloc_pages_nodemask+0x2c1/0x310 [ 175.744054] [<ffffffff811d8ea8>] alloc_pages_current+0x88/0x120 [ 175.744070] [<ffffffff811833a4>] __page_cache_alloc+0xb4/0xc0 [ 175.744086] [<ffffffff811865ca>] filemap_fault+0x29a/0x500 [ 175.744101] [<ffffffff81299aa6>] ext4_filemap_fault+0x36/0x50 [ 175.744117] [<ffffffff811b3d4a>] __do_fault+0x6a/0xe0 [ 175.744131] [<ffffffff811b97ee>] handle_mm_fault+0xd0e/0x1330 [ 175.744147] [<ffffffff8106738c>] __do_page_fault+0x23c/0x4d0 [ 175.744162] [<ffffffff81067650>] do_page_fault+0x30/0x80 [ 175.744177] [<ffffffff817ffbe8>] page_fault+0x28/0x30 [ 175.744191] Code: 41 57 41 56 41 55 41 54 53 48 83 ec 10 4c 8b a7 48 52 00 00 89 75 d4 48 89 45 c8 49 39 c4 74 78 4d 8d 6c 24 48 41 bf 05 00 00 00 <49> 8b 5d 00 48 85 db 74 50 8b 83 20 01 00 00 85 c0 74 15 48 8b [ 175.744320] RIP [<ffffffffa01af29b>] i915_gem_wait_for_idle+0x3b/0x140 [i915] [ 175.744351] RSP <ffffc9007bd1b9b8> Fixes: 80b204bce8f2 ("drm/i915: Enable multiple timelines") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161111145809.9701-1-chris@chris-wilson.co.uk
2016-11-11 22:58:08 +08:00
lockdep_assert_held(&i915->drm.struct_mutex);
list_for_each_entry(tl, &i915->gt.timelines, link) {
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
timeout = wait_for_timeline(tl, flags, timeout);
if (timeout < 0)
return timeout;
drm/i915: Only wait upon the execution timeline when unlocked In order to walk the list of all timelines, we currently require the struct_mutex. We are sometimes called prior to the struct_mutex being taken by the caller (i.e !I915_WAIT_LOCKED) in which case we can only trust the global execution timelines (as these are owned by the device). This means in the unlocked phase we can only wait upon the currently executing requests and not all queued. [ 175.743243] general protection fault: 0000 [#1] SMP [ 175.743263] Modules linked in: nls_iso8859_1 intel_rapl x86_pkg_temp_thermal coretemp kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel iwlwifi aesni_intel aes_x86_64 lrw snd_soc_rt5640 gf128mul snd_soc_rl6231 snd_soc_core glue_helper snd_compress snd_pcm_dmaengine snd_hda_codec_hdmi ablk_helper snd_hda_codec_realtek cryptd snd_hda_codec_generic serio_raw cfg80211 snd_hda_intel snd_hda_codec ir_lirc_codec snd_hda_core lirc_dev snd_hwdep snd_pcm lpc_ich mei_me mei snd_seq_midi shpchp snd_seq_midi_event snd_rawmidi snd_seq snd_seq_device snd_timer rc_rc6_mce acpi_als nuvoton_cir kfifo_buf rc_core snd industrialio snd_soc_sst_acpi soundcore snd_soc_sst_match i2c_designware_platform 8250_dw i2c_designware_core dw_dmac spi_pxa2xx_platform mac_hid acpi_pad parport_pc ppdev lp parport [ 175.743509] autofs4 i915 e1000e psmouse ptp pps_core xhci_pci ehci_pci ahci xhci_hcd ehci_hcd libahci video sdhci_acpi sdhci i2c_hid hid [ 175.743560] CPU: 2 PID: 2386 Comm: wtdg_monitor.sh Tainted: G U 4.9.0-rc4-nightly+ #2 [ 175.743581] Hardware name: /NUC5i7RYB, BIOS RYBDWi35.86A.0358.2016.0606.1423 06/06/2016 [ 175.743603] task: ffff88024509ba80 task.stack: ffffc9007bd18000 [ 175.743618] RIP: 0010:[<ffffffffa01af29b>] [<ffffffffa01af29b>] i915_gem_wait_for_idle+0x3b/0x140 [i915] [ 175.743660] RSP: 0000:ffffc9007bd1b9b8 EFLAGS: 00010297 [ 175.743674] RAX: ffff88024489d248 RBX: 0000000000000000 RCX: 0000000000000000 [ 175.743691] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880244898000 [ 175.743708] RBP: ffffc9007bd1b9f0 R08: 0000000000000000 R09: 0000000000000001 [ 175.743724] R10: 00000028eaf42792 R11: 0000000000000001 R12: dead000000000100 [ 175.743741] R13: dead000000000148 R14: ffffc9007bd1ba5f R15: 0000000000000005 [ 175.743758] FS: 00007f2638330700(0000) GS:ffff880256d00000(0000) knlGS:0000000000000000 [ 175.743777] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 175.743791] CR2: 00007f885c8cea40 CR3: 00000002416b5000 CR4: 00000000003406e0 [ 175.743808] Stack: [ 175.743816] ffff88024489d248 000000004509ba80 ffff880244898000 ffff88024509ba80 [ 175.743840] 00000000ffff8b69 ffffc9007bd1ba5f ffffc9007bd1ba5e ffffc9007bd1ba28 [ 175.743863] ffffffffa01b661d 00000000ffffffff 0000000000000000 ffff880244898000 [ 175.743886] Call Trace: [ 175.743906] [<ffffffffa01b661d>] i915_gem_shrinker_lock_uninterruptible.constprop.5+0x5d/0xc0 [i915] [ 175.743937] [<ffffffffa01b6cd0>] i915_gem_shrinker_oom+0x30/0x1b0 [i915] [ 175.743955] [<ffffffff8109ca79>] notifier_call_chain+0x49/0x70 [ 175.743971] [<ffffffff8109cd9d>] __blocking_notifier_call_chain+0x4d/0x70 [ 175.743988] [<ffffffff8109cdd6>] blocking_notifier_call_chain+0x16/0x20 [ 175.744005] [<ffffffff811885dc>] out_of_memory+0x22c/0x480 [ 175.744020] [<ffffffff81205542>] __alloc_pages_slowpath+0x851/0x8ec [ 175.744037] [<ffffffff8118ca51>] __alloc_pages_nodemask+0x2c1/0x310 [ 175.744054] [<ffffffff811d8ea8>] alloc_pages_current+0x88/0x120 [ 175.744070] [<ffffffff811833a4>] __page_cache_alloc+0xb4/0xc0 [ 175.744086] [<ffffffff811865ca>] filemap_fault+0x29a/0x500 [ 175.744101] [<ffffffff81299aa6>] ext4_filemap_fault+0x36/0x50 [ 175.744117] [<ffffffff811b3d4a>] __do_fault+0x6a/0xe0 [ 175.744131] [<ffffffff811b97ee>] handle_mm_fault+0xd0e/0x1330 [ 175.744147] [<ffffffff8106738c>] __do_page_fault+0x23c/0x4d0 [ 175.744162] [<ffffffff81067650>] do_page_fault+0x30/0x80 [ 175.744177] [<ffffffff817ffbe8>] page_fault+0x28/0x30 [ 175.744191] Code: 41 57 41 56 41 55 41 54 53 48 83 ec 10 4c 8b a7 48 52 00 00 89 75 d4 48 89 45 c8 49 39 c4 74 78 4d 8d 6c 24 48 41 bf 05 00 00 00 <49> 8b 5d 00 48 85 db 74 50 8b 83 20 01 00 00 85 c0 74 15 48 8b [ 175.744320] RIP [<ffffffffa01af29b>] i915_gem_wait_for_idle+0x3b/0x140 [i915] [ 175.744351] RSP <ffffc9007bd1b9b8> Fixes: 80b204bce8f2 ("drm/i915: Enable multiple timelines") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161111145809.9701-1-chris@chris-wilson.co.uk
2016-11-11 22:58:08 +08:00
}
if (GEM_SHOW_DEBUG() && !timeout) {
/* Presume that timeout was non-zero to begin with! */
dev_warn(&i915->drm.pdev->dev,
"Missed idle-completion interrupt!\n");
GEM_TRACE_DUMP();
}
err = wait_for_engines(i915);
if (err)
return err;
i915_retire_requests(i915);
2018-05-24 16:11:35 +08:00
GEM_BUG_ON(i915->gt.active_requests);
drm/i915: Only wait upon the execution timeline when unlocked In order to walk the list of all timelines, we currently require the struct_mutex. We are sometimes called prior to the struct_mutex being taken by the caller (i.e !I915_WAIT_LOCKED) in which case we can only trust the global execution timelines (as these are owned by the device). This means in the unlocked phase we can only wait upon the currently executing requests and not all queued. [ 175.743243] general protection fault: 0000 [#1] SMP [ 175.743263] Modules linked in: nls_iso8859_1 intel_rapl x86_pkg_temp_thermal coretemp kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel iwlwifi aesni_intel aes_x86_64 lrw snd_soc_rt5640 gf128mul snd_soc_rl6231 snd_soc_core glue_helper snd_compress snd_pcm_dmaengine snd_hda_codec_hdmi ablk_helper snd_hda_codec_realtek cryptd snd_hda_codec_generic serio_raw cfg80211 snd_hda_intel snd_hda_codec ir_lirc_codec snd_hda_core lirc_dev snd_hwdep snd_pcm lpc_ich mei_me mei snd_seq_midi shpchp snd_seq_midi_event snd_rawmidi snd_seq snd_seq_device snd_timer rc_rc6_mce acpi_als nuvoton_cir kfifo_buf rc_core snd industrialio snd_soc_sst_acpi soundcore snd_soc_sst_match i2c_designware_platform 8250_dw i2c_designware_core dw_dmac spi_pxa2xx_platform mac_hid acpi_pad parport_pc ppdev lp parport [ 175.743509] autofs4 i915 e1000e psmouse ptp pps_core xhci_pci ehci_pci ahci xhci_hcd ehci_hcd libahci video sdhci_acpi sdhci i2c_hid hid [ 175.743560] CPU: 2 PID: 2386 Comm: wtdg_monitor.sh Tainted: G U 4.9.0-rc4-nightly+ #2 [ 175.743581] Hardware name: /NUC5i7RYB, BIOS RYBDWi35.86A.0358.2016.0606.1423 06/06/2016 [ 175.743603] task: ffff88024509ba80 task.stack: ffffc9007bd18000 [ 175.743618] RIP: 0010:[<ffffffffa01af29b>] [<ffffffffa01af29b>] i915_gem_wait_for_idle+0x3b/0x140 [i915] [ 175.743660] RSP: 0000:ffffc9007bd1b9b8 EFLAGS: 00010297 [ 175.743674] RAX: ffff88024489d248 RBX: 0000000000000000 RCX: 0000000000000000 [ 175.743691] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880244898000 [ 175.743708] RBP: ffffc9007bd1b9f0 R08: 0000000000000000 R09: 0000000000000001 [ 175.743724] R10: 00000028eaf42792 R11: 0000000000000001 R12: dead000000000100 [ 175.743741] R13: dead000000000148 R14: ffffc9007bd1ba5f R15: 0000000000000005 [ 175.743758] FS: 00007f2638330700(0000) GS:ffff880256d00000(0000) knlGS:0000000000000000 [ 175.743777] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 175.743791] CR2: 00007f885c8cea40 CR3: 00000002416b5000 CR4: 00000000003406e0 [ 175.743808] Stack: [ 175.743816] ffff88024489d248 000000004509ba80 ffff880244898000 ffff88024509ba80 [ 175.743840] 00000000ffff8b69 ffffc9007bd1ba5f ffffc9007bd1ba5e ffffc9007bd1ba28 [ 175.743863] ffffffffa01b661d 00000000ffffffff 0000000000000000 ffff880244898000 [ 175.743886] Call Trace: [ 175.743906] [<ffffffffa01b661d>] i915_gem_shrinker_lock_uninterruptible.constprop.5+0x5d/0xc0 [i915] [ 175.743937] [<ffffffffa01b6cd0>] i915_gem_shrinker_oom+0x30/0x1b0 [i915] [ 175.743955] [<ffffffff8109ca79>] notifier_call_chain+0x49/0x70 [ 175.743971] [<ffffffff8109cd9d>] __blocking_notifier_call_chain+0x4d/0x70 [ 175.743988] [<ffffffff8109cdd6>] blocking_notifier_call_chain+0x16/0x20 [ 175.744005] [<ffffffff811885dc>] out_of_memory+0x22c/0x480 [ 175.744020] [<ffffffff81205542>] __alloc_pages_slowpath+0x851/0x8ec [ 175.744037] [<ffffffff8118ca51>] __alloc_pages_nodemask+0x2c1/0x310 [ 175.744054] [<ffffffff811d8ea8>] alloc_pages_current+0x88/0x120 [ 175.744070] [<ffffffff811833a4>] __page_cache_alloc+0xb4/0xc0 [ 175.744086] [<ffffffff811865ca>] filemap_fault+0x29a/0x500 [ 175.744101] [<ffffffff81299aa6>] ext4_filemap_fault+0x36/0x50 [ 175.744117] [<ffffffff811b3d4a>] __do_fault+0x6a/0xe0 [ 175.744131] [<ffffffff811b97ee>] handle_mm_fault+0xd0e/0x1330 [ 175.744147] [<ffffffff8106738c>] __do_page_fault+0x23c/0x4d0 [ 175.744162] [<ffffffff81067650>] do_page_fault+0x30/0x80 [ 175.744177] [<ffffffff817ffbe8>] page_fault+0x28/0x30 [ 175.744191] Code: 41 57 41 56 41 55 41 54 53 48 83 ec 10 4c 8b a7 48 52 00 00 89 75 d4 48 89 45 c8 49 39 c4 74 78 4d 8d 6c 24 48 41 bf 05 00 00 00 <49> 8b 5d 00 48 85 db 74 50 8b 83 20 01 00 00 85 c0 74 15 48 8b [ 175.744320] RIP [<ffffffffa01af29b>] i915_gem_wait_for_idle+0x3b/0x140 [i915] [ 175.744351] RSP <ffffc9007bd1b9b8> Fixes: 80b204bce8f2 ("drm/i915: Enable multiple timelines") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161111145809.9701-1-chris@chris-wilson.co.uk
2016-11-11 22:58:08 +08:00
} else {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
struct i915_timeline *tl = &engine->timeline;
timeout = wait_for_timeline(tl, flags, timeout);
if (timeout < 0)
return timeout;
}
}
return 0;
}
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
/*
* We manually flush the CPU domain so that we can override and
* force the flush for the display, and perform it asyncrhonously.
*/
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->write_domain = 0;
}
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
if (!READ_ONCE(obj->pin_global))
return;
mutex_lock(&obj->base.dev->struct_mutex);
__i915_gem_object_flush_for_display(obj);
mutex_unlock(&obj->base.dev->struct_mutex);
}
/**
* Moves a single object to the WC read, and possibly write domain.
* @obj: object to act on
* @write: ask for write access or read only
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
int
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
{
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT,
NULL);
if (ret)
return ret;
if (obj->write_domain == I915_GEM_DOMAIN_WC)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
* For example, if the obj->filp was moved to swap without us
* being notified and releasing the pages, we would mistakenly
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
* WC domain upon first access.
*/
if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
obj->read_domains |= I915_GEM_DOMAIN_WC;
if (write) {
obj->read_domains = I915_GEM_DOMAIN_WC;
obj->write_domain = I915_GEM_DOMAIN_WC;
obj->mm.dirty = true;
}
i915_gem_object_unpin_pages(obj);
return 0;
}
/**
* Moves a single object to the GTT read, and possibly write domain.
* @obj: object to act on
* @write: ask for write access or read only
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT,
NULL);
if (ret)
return ret;
if (obj->write_domain == I915_GEM_DOMAIN_GTT)
drm/i915: Wait on external rendering for GEM objects When transitioning to the GTT or CPU domain we wait on all rendering from i915 to complete (with the optimisation of allowing concurrent read access by both the GPU and client). We don't yet ensure all rendering from third parties (tracked by implicit fences on the dma-buf) is complete. Since implicitly tracked rendering by third parties will ignore our cache-domain tracking, we have to always wait upon rendering from third-parties when transitioning to direct access to the backing store. We still rely on clients notifying us of cache domain changes (i.e. they need to move to the GTT read or write domain after doing a CPU access before letting the third party render again). v2: This introduces a potential WARN_ON into i915_gem_object_free() as the current i915_vma_unbind() calls i915_gem_object_wait_rendering(). To hit this path we first need to render with the GPU, have a dma-buf attached with an unsignaled fence and then interrupt the wait. It does get fixed later in the series (when i915_vma_unbind() only waits on the active VMA and not all, including third-party, rendering. To offset that risk, use the __i915_vma_unbind_no_wait hack. Testcase: igt/prime_vgem/basic-fence-read Testcase: igt/prime_vgem/basic-fence-mmap Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1469002875-2335-8-git-send-email-chris@chris-wilson.co.uk
2016-07-20 16:21:15 +08:00
return 0;
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
* For example, if the obj->filp was moved to swap without us
* being notified and releasing the pages, we would mistakenly
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
*/
if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
}
i915_gem_object_unpin_pages(obj);
return 0;
}
/**
* Changes the cache-level of an object across all VMA.
* @obj: object to act on
* @cache_level: new cache level to set for the object
*
* After this function returns, the object will be in the new cache-level
* across all GTT and the contents of the backing storage will be coherent,
* with respect to the new cache-level. In order to keep the backing storage
* coherent for all users, we only allow a single cache level to be set
* globally on the object and prevent it from being changed whilst the
* hardware is reading from the object. That is if the object is currently
* on the scanout it will be set to uncached (or equivalent display
* cache coherency) and all non-MOCS GPU access will also be uncached so
* that all direct access to the scanout remains coherent.
*/
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct i915_vma *vma;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->cache_level == cache_level)
return 0;
/* Inspect the list of currently bound VMA and unbind any that would
* be invalid given the new cache-level. This is principally to
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
restart:
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
if (i915_vma_is_pinned(vma)) {
DRM_DEBUG("can not change the cache level of pinned objects\n");
return -EBUSY;
}
if (!i915_vma_is_closed(vma) &&
i915_gem_valid_gtt_space(vma, cache_level))
continue;
ret = i915_vma_unbind(vma);
if (ret)
return ret;
/* As unbinding may affect other elements in the
* obj->vma_list (due to side-effects from retiring
* an active vma), play safe and restart the iterator.
*/
goto restart;
}
/* We can reuse the existing drm_mm nodes but need to change the
* cache-level on the PTE. We could simply unbind them all and
* rebind with the correct cache-level on next use. However since
* we already have a valid slot, dma mapping, pages etc, we may as
* rewrite the PTE in the belief that doing so tramples upon less
* state and so involves less work.
*/
if (obj->bind_count) {
/* Before we change the PTE, the GPU must not be accessing it.
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
*/
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT,
NULL);
if (ret)
return ret;
if (!HAS_LLC(to_i915(obj->base.dev)) &&
cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
* userspace to refault in the pages and we can
* then double check if the GTT mapping is still
* valid for that pointer access.
*/
i915_gem_release_mmap(obj);
/* As we no longer need a fence for GTT access,
* we can relinquish it now (and so prevent having
* to steal a fence from someone else on the next
* fence request). Note GPU activity would have
* dropped the fence as all snoopable access is
* supposed to be linear.
*/
for_each_ggtt_vma(vma, obj) {
ret = i915_vma_put_fence(vma);
if (ret)
return ret;
}
} else {
/* We either have incoherent backing store and
* so no GTT access or the architecture is fully
* coherent. In such cases, existing GTT mmaps
* ignore the cache bit in the PTE and we can
* rewrite it without confusing the GPU or having
* to force userspace to fault back in its mmaps.
*/
}
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
if (ret)
return ret;
}
}
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
return 0;
}
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
int err = 0;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
if (!obj) {
err = -ENOENT;
goto out;
}
switch (obj->cache_level) {
case I915_CACHE_LLC:
case I915_CACHE_L3_LLC:
args->caching = I915_CACHING_CACHED;
break;
case I915_CACHE_WT:
args->caching = I915_CACHING_DISPLAY;
break;
default:
args->caching = I915_CACHING_NONE;
break;
}
out:
rcu_read_unlock();
return err;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
int ret = 0;
switch (args->caching) {
case I915_CACHING_NONE:
level = I915_CACHE_NONE;
break;
case I915_CACHING_CACHED:
/*
* Due to a HW issue on BXT A stepping, GPU stores via a
* snooped mapping may leave stale data in a corresponding CPU
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
return -ENODEV;
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
}
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
drm/i915: Introduce GEM proxy GEM proxy is a kind of GEM, whose backing physical memory is pinned and produced by guest VM and is used by host as read only. With GEM proxy, host is able to access guest physical memory through GEM object interface. As GEM proxy is such a special kind of GEM, a new flag I915_GEM_OBJECT_IS_PROXY is introduced to ban host from changing the backing storage of GEM proxy. v3: - update "Reviewed-by". (Joonas) v2: - return -ENXIO when pin and map pages of GEM proxy to kernel space. (Chris) Here are the histories of this patch in "Dma-buf support for Gvt-g" patch-set: v14: - return -ENXIO when gem proxy object is banned by ioctl. (Chris) (Daniel) v13: - add comments to GEM proxy. (Chris) - don't ban GEM proxy in i915_gem_sw_finish_ioctl. (Chris) - check GEM proxy bar after finishing i915_gem_object_wait. (Chris) - remove GEM proxy bar in i915_gem_madvise_ioctl. v6: - add gem proxy barrier in the following ioctls. (Chris) i915_gem_set_caching_ioctl i915_gem_set_domain_ioctl i915_gem_sw_finish_ioctl i915_gem_set_tiling_ioctl i915_gem_madvise_ioctl Signed-off-by: Tina Zhang <tina.zhang@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/1510555798-21079-2-git-send-email-tina.zhang@intel.com Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171114102513.22269-2-chris@chris-wilson.co.uk
2017-11-14 18:25:13 +08:00
/*
* The caching mode of proxy object is handled by its generator, and
* not allowed to be changed by userspace.
*/
if (i915_gem_object_is_proxy(obj)) {
ret = -ENXIO;
goto out;
}
if (obj->cache_level == level)
goto out;
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
if (ret)
goto out;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto out;
ret = i915_gem_object_set_cache_level(obj, level);
mutex_unlock(&dev->struct_mutex);
out:
i915_gem_object_put(obj);
return ret;
}
/*
* Prepare buffer for display plane (scanout, cursors, etc). Can be called from
* an uninterruptible phase (modesetting) and allows any flushes to be pipelined
* (for pageflips). We only flush the caches while preparing the buffer for
* display, the callers are responsible for frontbuffer flush.
*/
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
drm/i915: Move the policy for placement of the GGTT vma into the caller Currently we make the unilateral decision inside i915_gem_object_pin_to_display() where the VMA should resided (inside the fence and mappable region or above?). This is not our decision to make as it impacts on how the display engine can use the resulting scanout object, and it would rather instruct us where to place the VMA so that it can enable the features it wants. As such, make the pin flags an argument to i915_gem_object_pin_to_display() and control them from intel_pin_and_fence_fb_obj() Whilst taking control of the mapping for ourselves, start tracking how we use it to avoid trying to free a fence we never claimed: <3>[ 227.151869] GEM_BUG_ON(vma->fence->pin_count <= 0) <4>[ 227.152064] ------------[ cut here ]------------ <2>[ 227.152068] kernel BUG at drivers/gpu/drm/i915/i915_vma.h:391! <4>[ 227.152084] invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI <0>[ 227.152092] Dumping ftrace buffer: <0>[ 227.152099] (ftrace buffer empty) <4>[ 227.152102] Modules linked in: i915 snd_hda_codec_analog snd_hda_codec_generic coretemp snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm lpc_ich e1000e mei_me mei prime_numbers <4>[ 227.152131] CPU: 1 PID: 1587 Comm: kworker/u16:49 Tainted: G U 4.16.0-rc1-gbab67b2f6177-kasan_7+ #1 <4>[ 227.152134] Hardware name: Dell Inc. OptiPlex 755 /0PU052, BIOS A08 02/19/2008 <4>[ 227.152236] Workqueue: events_unbound intel_atomic_commit_work [i915] <4>[ 227.152292] RIP: 0010:intel_unpin_fb_vma+0x23a/0x2a0 [i915] <4>[ 227.152295] RSP: 0018:ffff88005aad7b68 EFLAGS: 00010286 <4>[ 227.152300] RAX: 0000000000000026 RBX: ffff88005c359580 RCX: 0000000000000000 <4>[ 227.152304] RDX: 0000000000000026 RSI: ffffffff8707d840 RDI: ffffed000b55af63 <4>[ 227.152307] RBP: ffff880056817e58 R08: 0000000000000001 R09: 0000000000000000 <4>[ 227.152311] R10: ffff88005aad7b88 R11: 0000000000000000 R12: ffff8800568184d0 <4>[ 227.152314] R13: ffff880065b5ab08 R14: 0000000000000000 R15: dffffc0000000000 <4>[ 227.152318] FS: 0000000000000000(0000) GS:ffff88006ac40000(0000) knlGS:0000000000000000 <4>[ 227.152322] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 227.152325] CR2: 00007f5fb25550a8 CR3: 0000000068c78000 CR4: 00000000000006e0 <4>[ 227.152328] Call Trace: <4>[ 227.152385] intel_cleanup_plane_fb+0x6b/0xd0 [i915] <4>[ 227.152395] drm_atomic_helper_cleanup_planes+0x166/0x280 <4>[ 227.152452] intel_atomic_commit_tail+0x159d/0x3380 [i915] <4>[ 227.152463] ? process_one_work+0x66e/0x1460 <4>[ 227.152516] ? skl_update_crtcs+0x9c0/0x9c0 [i915] <4>[ 227.152523] ? lock_acquire+0x13d/0x390 <4>[ 227.152527] ? lock_acquire+0x13d/0x390 <4>[ 227.152534] process_one_work+0x71a/0x1460 <4>[ 227.152540] ? __schedule+0x815/0x1e20 <4>[ 227.152547] ? pwq_dec_nr_in_flight+0x2b0/0x2b0 <4>[ 227.152553] ? _raw_spin_lock_irq+0xa/0x40 <4>[ 227.152559] worker_thread+0xdf/0xf60 <4>[ 227.152569] ? process_one_work+0x1460/0x1460 <4>[ 227.152573] kthread+0x2cf/0x3c0 <4>[ 227.152578] ? _kthread_create_on_node+0xa0/0xa0 <4>[ 227.152583] ret_from_fork+0x3a/0x50 <4>[ 227.152591] Code: c6 00 11 86 c0 48 c7 c7 e0 bd 85 c0 e8 60 e7 a9 c4 0f ff e9 1f fe ff ff 48 c7 c6 40 10 86 c0 48 c7 c7 e0 ca 85 c0 e8 2b 95 bd c4 <0f> 0b 48 89 ef e8 4c 44 e8 c4 e9 ef fd ff ff e8 42 44 e8 c4 e9 <1>[ 227.152720] RIP: intel_unpin_fb_vma+0x23a/0x2a0 [i915] RSP: ffff88005aad7b68 v2: i915_vma_pin_fence() is a no-op if a fence isn't required, so check vma->fence as well. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180220134208.24988-2-chris@chris-wilson.co.uk
2018-02-20 21:42:06 +08:00
const struct i915_ggtt_view *view,
unsigned int flags)
{
struct i915_vma *vma;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
/* Mark the global pin early so that we account for the
* display coherency whilst setting up the cache domains.
*/
obj->pin_global++;
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
* chipsets.
*
* However for gen6+, we could do better by using the GFDT bit instead
* of uncaching, which would allow us to flush all the LLC-cached data
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(to_i915(obj->base.dev)) ?
I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
goto err_unpin_global;
}
/* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers. However,
* it may simply be too big to fit into mappable, in which case
* put it anyway and hope that userspace can cope (but always first
* try to preserve the existing ABI).
*/
vma = ERR_PTR(-ENOSPC);
drm/i915: Move the policy for placement of the GGTT vma into the caller Currently we make the unilateral decision inside i915_gem_object_pin_to_display() where the VMA should resided (inside the fence and mappable region or above?). This is not our decision to make as it impacts on how the display engine can use the resulting scanout object, and it would rather instruct us where to place the VMA so that it can enable the features it wants. As such, make the pin flags an argument to i915_gem_object_pin_to_display() and control them from intel_pin_and_fence_fb_obj() Whilst taking control of the mapping for ourselves, start tracking how we use it to avoid trying to free a fence we never claimed: <3>[ 227.151869] GEM_BUG_ON(vma->fence->pin_count <= 0) <4>[ 227.152064] ------------[ cut here ]------------ <2>[ 227.152068] kernel BUG at drivers/gpu/drm/i915/i915_vma.h:391! <4>[ 227.152084] invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI <0>[ 227.152092] Dumping ftrace buffer: <0>[ 227.152099] (ftrace buffer empty) <4>[ 227.152102] Modules linked in: i915 snd_hda_codec_analog snd_hda_codec_generic coretemp snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm lpc_ich e1000e mei_me mei prime_numbers <4>[ 227.152131] CPU: 1 PID: 1587 Comm: kworker/u16:49 Tainted: G U 4.16.0-rc1-gbab67b2f6177-kasan_7+ #1 <4>[ 227.152134] Hardware name: Dell Inc. OptiPlex 755 /0PU052, BIOS A08 02/19/2008 <4>[ 227.152236] Workqueue: events_unbound intel_atomic_commit_work [i915] <4>[ 227.152292] RIP: 0010:intel_unpin_fb_vma+0x23a/0x2a0 [i915] <4>[ 227.152295] RSP: 0018:ffff88005aad7b68 EFLAGS: 00010286 <4>[ 227.152300] RAX: 0000000000000026 RBX: ffff88005c359580 RCX: 0000000000000000 <4>[ 227.152304] RDX: 0000000000000026 RSI: ffffffff8707d840 RDI: ffffed000b55af63 <4>[ 227.152307] RBP: ffff880056817e58 R08: 0000000000000001 R09: 0000000000000000 <4>[ 227.152311] R10: ffff88005aad7b88 R11: 0000000000000000 R12: ffff8800568184d0 <4>[ 227.152314] R13: ffff880065b5ab08 R14: 0000000000000000 R15: dffffc0000000000 <4>[ 227.152318] FS: 0000000000000000(0000) GS:ffff88006ac40000(0000) knlGS:0000000000000000 <4>[ 227.152322] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 227.152325] CR2: 00007f5fb25550a8 CR3: 0000000068c78000 CR4: 00000000000006e0 <4>[ 227.152328] Call Trace: <4>[ 227.152385] intel_cleanup_plane_fb+0x6b/0xd0 [i915] <4>[ 227.152395] drm_atomic_helper_cleanup_planes+0x166/0x280 <4>[ 227.152452] intel_atomic_commit_tail+0x159d/0x3380 [i915] <4>[ 227.152463] ? process_one_work+0x66e/0x1460 <4>[ 227.152516] ? skl_update_crtcs+0x9c0/0x9c0 [i915] <4>[ 227.152523] ? lock_acquire+0x13d/0x390 <4>[ 227.152527] ? lock_acquire+0x13d/0x390 <4>[ 227.152534] process_one_work+0x71a/0x1460 <4>[ 227.152540] ? __schedule+0x815/0x1e20 <4>[ 227.152547] ? pwq_dec_nr_in_flight+0x2b0/0x2b0 <4>[ 227.152553] ? _raw_spin_lock_irq+0xa/0x40 <4>[ 227.152559] worker_thread+0xdf/0xf60 <4>[ 227.152569] ? process_one_work+0x1460/0x1460 <4>[ 227.152573] kthread+0x2cf/0x3c0 <4>[ 227.152578] ? _kthread_create_on_node+0xa0/0xa0 <4>[ 227.152583] ret_from_fork+0x3a/0x50 <4>[ 227.152591] Code: c6 00 11 86 c0 48 c7 c7 e0 bd 85 c0 e8 60 e7 a9 c4 0f ff e9 1f fe ff ff 48 c7 c6 40 10 86 c0 48 c7 c7 e0 ca 85 c0 e8 2b 95 bd c4 <0f> 0b 48 89 ef e8 4c 44 e8 c4 e9 ef fd ff ff e8 42 44 e8 c4 e9 <1>[ 227.152720] RIP: intel_unpin_fb_vma+0x23a/0x2a0 [i915] RSP: ffff88005aad7b68 v2: i915_vma_pin_fence() is a no-op if a fence isn't required, so check vma->fence as well. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180220134208.24988-2-chris@chris-wilson.co.uk
2018-02-20 21:42:06 +08:00
if ((flags & PIN_MAPPABLE) == 0 &&
(!view || view->type == I915_GGTT_VIEW_NORMAL))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
drm/i915: Move the policy for placement of the GGTT vma into the caller Currently we make the unilateral decision inside i915_gem_object_pin_to_display() where the VMA should resided (inside the fence and mappable region or above?). This is not our decision to make as it impacts on how the display engine can use the resulting scanout object, and it would rather instruct us where to place the VMA so that it can enable the features it wants. As such, make the pin flags an argument to i915_gem_object_pin_to_display() and control them from intel_pin_and_fence_fb_obj() Whilst taking control of the mapping for ourselves, start tracking how we use it to avoid trying to free a fence we never claimed: <3>[ 227.151869] GEM_BUG_ON(vma->fence->pin_count <= 0) <4>[ 227.152064] ------------[ cut here ]------------ <2>[ 227.152068] kernel BUG at drivers/gpu/drm/i915/i915_vma.h:391! <4>[ 227.152084] invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI <0>[ 227.152092] Dumping ftrace buffer: <0>[ 227.152099] (ftrace buffer empty) <4>[ 227.152102] Modules linked in: i915 snd_hda_codec_analog snd_hda_codec_generic coretemp snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm lpc_ich e1000e mei_me mei prime_numbers <4>[ 227.152131] CPU: 1 PID: 1587 Comm: kworker/u16:49 Tainted: G U 4.16.0-rc1-gbab67b2f6177-kasan_7+ #1 <4>[ 227.152134] Hardware name: Dell Inc. OptiPlex 755 /0PU052, BIOS A08 02/19/2008 <4>[ 227.152236] Workqueue: events_unbound intel_atomic_commit_work [i915] <4>[ 227.152292] RIP: 0010:intel_unpin_fb_vma+0x23a/0x2a0 [i915] <4>[ 227.152295] RSP: 0018:ffff88005aad7b68 EFLAGS: 00010286 <4>[ 227.152300] RAX: 0000000000000026 RBX: ffff88005c359580 RCX: 0000000000000000 <4>[ 227.152304] RDX: 0000000000000026 RSI: ffffffff8707d840 RDI: ffffed000b55af63 <4>[ 227.152307] RBP: ffff880056817e58 R08: 0000000000000001 R09: 0000000000000000 <4>[ 227.152311] R10: ffff88005aad7b88 R11: 0000000000000000 R12: ffff8800568184d0 <4>[ 227.152314] R13: ffff880065b5ab08 R14: 0000000000000000 R15: dffffc0000000000 <4>[ 227.152318] FS: 0000000000000000(0000) GS:ffff88006ac40000(0000) knlGS:0000000000000000 <4>[ 227.152322] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 227.152325] CR2: 00007f5fb25550a8 CR3: 0000000068c78000 CR4: 00000000000006e0 <4>[ 227.152328] Call Trace: <4>[ 227.152385] intel_cleanup_plane_fb+0x6b/0xd0 [i915] <4>[ 227.152395] drm_atomic_helper_cleanup_planes+0x166/0x280 <4>[ 227.152452] intel_atomic_commit_tail+0x159d/0x3380 [i915] <4>[ 227.152463] ? process_one_work+0x66e/0x1460 <4>[ 227.152516] ? skl_update_crtcs+0x9c0/0x9c0 [i915] <4>[ 227.152523] ? lock_acquire+0x13d/0x390 <4>[ 227.152527] ? lock_acquire+0x13d/0x390 <4>[ 227.152534] process_one_work+0x71a/0x1460 <4>[ 227.152540] ? __schedule+0x815/0x1e20 <4>[ 227.152547] ? pwq_dec_nr_in_flight+0x2b0/0x2b0 <4>[ 227.152553] ? _raw_spin_lock_irq+0xa/0x40 <4>[ 227.152559] worker_thread+0xdf/0xf60 <4>[ 227.152569] ? process_one_work+0x1460/0x1460 <4>[ 227.152573] kthread+0x2cf/0x3c0 <4>[ 227.152578] ? _kthread_create_on_node+0xa0/0xa0 <4>[ 227.152583] ret_from_fork+0x3a/0x50 <4>[ 227.152591] Code: c6 00 11 86 c0 48 c7 c7 e0 bd 85 c0 e8 60 e7 a9 c4 0f ff e9 1f fe ff ff 48 c7 c6 40 10 86 c0 48 c7 c7 e0 ca 85 c0 e8 2b 95 bd c4 <0f> 0b 48 89 ef e8 4c 44 e8 c4 e9 ef fd ff ff e8 42 44 e8 c4 e9 <1>[ 227.152720] RIP: intel_unpin_fb_vma+0x23a/0x2a0 [i915] RSP: ffff88005aad7b68 v2: i915_vma_pin_fence() is a no-op if a fence isn't required, so check vma->fence as well. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180220134208.24988-2-chris@chris-wilson.co.uk
2018-02-20 21:42:06 +08:00
flags |
PIN_MAPPABLE |
PIN_NONBLOCK);
if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
if (IS_ERR(vma))
goto err_unpin_global;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
__i915_gem_object_flush_for_display(obj);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
err_unpin_global:
obj->pin_global--;
return vma;
}
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(vma->obj->pin_global == 0))
return;
if (--vma->obj->pin_global == 0)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */
i915_gem_object_bump_inactive_ggtt(vma->obj);
i915_vma_unpin(vma);
}
/**
* Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on
* @write: requesting write or read-only access
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT,
NULL);
if (ret)
return ret;
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write)
__start_cpu_write(obj);
return 0;
}
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
* Note that if we were to use the current jiffies each time around the loop,
* we wouldn't escape the function with any frames outstanding if the time to
* render a frame was over 20ms.
*
* This should get us reasonable parallelism between CPU and GPU but also
* relatively low latency when blocking on a particular request to finish.
*/
static int
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct i915_request *request, *target = NULL;
long ret;
drm/i915: Prevent leaking of -EIO from i915_wait_request() Reporting -EIO from i915_wait_request() has proven very troublematic over the years, with numerous hard-to-reproduce bugs cropping up in the corner case of where a reset occurs and the code wasn't expecting such an error. If the we reset the GPU or have detected a hang and wish to reset the GPU, the request is forcibly complete and the wait broken. Currently, we report either -EAGAIN or -EIO in order for the caller to retreat and restart the wait (if appropriate) after dropping and then reacquiring the struct_mutex (essential to allow the GPU reset to proceed). However, if we take the view that the request is complete (no further work will be done on it by the GPU because it is dead and soon to be reset), then we can proceed with the task at hand and then drop the struct_mutex allowing the reset to occur. This transfers the burden of checking whether it is safe to proceed to the caller, which in all but one instance it is safe - completely eliminating the source of all spurious -EIO. Of note, we only have two API entry points where we expect that userspace can observe an EIO. First is when submitting an execbuf, if the GPU is terminally wedged, then the operation cannot succeed and an -EIO is reported. Secondly, existing userspace uses the throttle ioctl to detect an already wedged GPU before starting using HW acceleration (or to confirm that the GPU is wedged after an error condition). So if the GPU is wedged when the user calls throttle, also report -EIO. v2: Split more carefully the change to i915_wait_request() and assorted ABI from the reset handling. v3: Add a couple of WARN_ON(EIO) to the interruptible modesetting code so that we don't start to leak EIO there in future (and break our hang resistant modesetting). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-9-git-send-email-chris@chris-wilson.co.uk Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-1-git-send-email-chris@chris-wilson.co.uk
2016-04-14 00:35:08 +08:00
/* ABI: return -EIO if already wedged */
if (i915_terminally_wedged(&dev_priv->gpu_error))
return -EIO;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
if (target) {
list_del(&target->client_link);
target->file_priv = NULL;
}
drm/i915: Move the request/file and request/pid association to creation time In _i915_add_request(), the request is associated with a userland client. Specifically it is linked to the 'file' structure and the current user process is recorded. One problem here is that the current user process is not necessarily the same as when the request was submitted to the driver. This is especially true when the GPU scheduler arrives and decouples driver submission from hardware submission. Note also that it is only in the case where the add request comes from an execbuff call that there is a client to associate. Any other add request call is kernel only so does not need to do it. This patch moves the client association into a separate function. This is then called from the execbuffer code path itself at a sensible time. It also removes the now redundant 'file' pointer from the add request parameter list. An extra cleanup of the client association is also added to the request clean up code for the eventuality where the request is killed after association but before being submitted (e.g. due to out of memory error somewhere). Once the submission has happened, the request is on the request list and the regular request list removal will clear the association. Note that this still needs to happen at this point in time because the request might be kept floating around much longer (due to someone holding a reference count) and the client should not be worrying about this request after it has been retired. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-05-30 00:44:12 +08:00
target = request;
}
if (target)
i915_request_get(target);
spin_unlock(&file_priv->mm.lock);
if (target == NULL)
return 0;
ret = i915_request_wait(target,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
i915_request_put(target);
return ret < 0 ? ret : 0;
}
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
u64 size,
u64 alignment,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct i915_vma *vma;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
* vain. Worse, doing so may cause us to ping-pong
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
if (obj->base.size > dev_priv->ggtt.mappable_end)
return ERR_PTR(-E2BIG);
/* If NONBLOCK is set the caller is optimistically
* trying to cache the full object within the mappable
* aperture, and *must* have a fallback in place for
* situations where we cannot bind the object. We
* can be a little more lax here and use the fallback
* more often to avoid costly migrations of ourselves
* and other objects within the aperture.
*
* Half-the-aperture is used as a simple heuristic.
* More interesting would to do search for a free
* block prior to making the commitment to unbind.
* That caters for the self-harm case, and with a
* little more heuristics (e.g. NOFAULT, NOEVICT)
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
obj->base.size > dev_priv->ggtt.mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
vma = i915_vma_instance(obj, vm, view);
if (unlikely(IS_ERR(vma)))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
if (flags & PIN_NONBLOCK) {
if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE &&
vma->fence_size > dev_priv->ggtt.mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:"
" offset=%08x, req.alignment=%llx,"
" req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
i915_ggtt_offset(vma), alignment,
!!(flags & PIN_MAPPABLE),
i915_vma_is_map_and_fenceable(vma));
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
}
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
return vma;
}
static __always_inline unsigned int __busy_read_flag(unsigned int id)
{
/* Note that we could alias engines in the execbuf API, but
* that would be very unwise as it prevents userspace from
* fine control over engine selection. Ahem.
*
* This should be something like EXEC_MAX_ENGINE instead of
* I915_NUM_ENGINES.
*/
BUILD_BUG_ON(I915_NUM_ENGINES > 16);
return 0x10000 << id;
}
static __always_inline unsigned int __busy_write_id(unsigned int id)
{
/* The uABI guarantees an active writer is also amongst the read
* engines. This would be true if we accessed the activity tracking
* under the lock, but as we perform the lookup of the object and
* its activity locklessly we can not guarantee that the last_write
* being active implies that we have set the same engine flag from
* last_read - hence we always set both read and write busy for
* last_write.
*/
return id | __busy_read_flag(id);
}
static __always_inline unsigned int
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
{
struct i915_request *rq;
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
/* We have to check the current hw status of the fence as the uABI
* guarantees forward progress. We could rely on the idle worker
* to eventually flush us, but to minimise latency just ask the
* hardware.
*
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
* Note we only report on the status of native fences.
*/
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
if (!dma_fence_is_i915(fence))
return 0;
/* opencode to_request() in order to avoid const warnings */
rq = container_of(fence, struct i915_request, fence);
if (i915_request_completed(rq))
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
return 0;
return flag(rq->engine->uabi_id);
}
static __always_inline unsigned int
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
busy_check_reader(const struct dma_fence *fence)
{
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
return __busy_set_if_active(fence, __busy_read_flag);
}
static __always_inline unsigned int
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
busy_check_writer(const struct dma_fence *fence)
{
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
if (!fence)
return 0;
return __busy_set_if_active(fence, __busy_write_id);
}
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
struct reservation_object_list *list;
unsigned int seq;
int err;
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
err = -ENOENT;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
if (!obj)
goto out;
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
/* A discrepancy here is that we do not report the status of
* non-i915 fences, i.e. even though we may report the object as idle,
* a call to set-domain may still stall waiting for foreign rendering.
* This also means that wait-ioctl may report an object as busy,
* where busy-ioctl considers it idle.
*
* We trade the ability to warn of foreign fences to report on which
* i915 engines are active for the object.
*
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
* !reservation_object_test_signaled_rcu(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
retry:
seq = raw_read_seqcount(&obj->resv->seq);
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
/* Translate the exclusive fence to the READ *and* WRITE engine */
args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
/* Translate shared fences to READ set of engines */
list = rcu_dereference(obj->resv->fence);
if (list) {
unsigned int shared_count = list->shared_count, i;
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence =
rcu_dereference(list->shared[i]);
args->busy |= busy_check_reader(fence);
}
}
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
goto retry;
err = 0;
out:
rcu_read_unlock();
return err;
}
int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return i915_gem_ring_throttle(dev, file_priv);
}
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int err;
switch (args->madv) {
case I915_MADV_DONTNEED:
case I915_MADV_WILLNEED:
break;
default:
return -EINVAL;
}
obj = i915_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
err = mutex_lock_interruptible(&obj->mm.lock);
if (err)
goto out;
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
drm/i915: Track pages pinned due to swizzling quirk If we have a tiled object and an unknown CPU swizzle pattern, we pin the pages to prevent the object from being swapped out (and us corrupting the contents as we do not know the access pattern and so cannot convert it to linear and back to tiled on reuse). This requires us to remember to drop the extra pinning when freeing the object, or else we trigger warnings about the pin leak. In commit fbbd37b36fa5 ("drm/i915: Move object release to a freelist + worker"), the object free path was deferred to a worker, but the unpinning of the quirk, along with marking the object as reclaimable, was left on the immediate path (so that if required we could reclaim the pages under memory pressure as early as possible). However, this split introduced a bug where the pages were no longer being unpinned if they were marked as unneeded. [ 231.800401] WARNING: CPU: 1 PID: 90 at drivers/gpu/drm/i915/i915_gem.c:4275 __i915_gem_free_objects+0x326/0x3c0 [i915] [ 231.800403] WARN_ON(i915_gem_object_has_pinned_pages(obj)) [ 231.800405] Modules linked in: [ 231.800406] snd_hda_intel i915 snd_hda_codec_generic mei_me snd_hda_codec coretemp snd_hwdep mei lpc_ich snd_hda_core snd_pcm e1000e ptp pps_core [last unloaded: i915] [ 231.800426] CPU: 1 PID: 90 Comm: kworker/1:4 Tainted: G U 4.9.0-rc2-CI-CI_DRM_1780+ #1 [ 231.800428] Hardware name: LENOVO 7465CTO/7465CTO, BIOS 6DET44WW (2.08 ) 04/22/2009 [ 231.800456] Workqueue: events __i915_gem_free_work [i915] [ 231.800459] ffffc9000034fc80 ffffffff8142dd65 ffffc9000034fcd0 0000000000000000 [ 231.800465] ffffc9000034fcc0 ffffffff8107e4e6 000010b300000001 0000000000001000 [ 231.800469] ffff88011d3db740 ffff880130ef0000 0000000000000000 ffff880130ef5ea0 [ 231.800474] Call Trace: [ 231.800479] [<ffffffff8142dd65>] dump_stack+0x67/0x92 [ 231.800484] [<ffffffff8107e4e6>] __warn+0xc6/0xe0 [ 231.800487] [<ffffffff8107e54a>] warn_slowpath_fmt+0x4a/0x50 [ 231.800491] [<ffffffff811d12ac>] ? kmem_cache_free+0x2dc/0x340 [ 231.800520] [<ffffffffa009ef36>] __i915_gem_free_objects+0x326/0x3c0 [i915] [ 231.800548] [<ffffffffa009effe>] __i915_gem_free_work+0x2e/0x50 [i915] [ 231.800552] [<ffffffff8109c27c>] process_one_work+0x1ec/0x6b0 [ 231.800555] [<ffffffff8109c1f6>] ? process_one_work+0x166/0x6b0 [ 231.800558] [<ffffffff8109c789>] worker_thread+0x49/0x490 [ 231.800561] [<ffffffff8109c740>] ? process_one_work+0x6b0/0x6b0 [ 231.800563] [<ffffffff8109c740>] ? process_one_work+0x6b0/0x6b0 [ 231.800566] [<ffffffff810a2aab>] kthread+0xeb/0x110 [ 231.800569] [<ffffffff810a29c0>] ? kthread_park+0x60/0x60 [ 231.800573] [<ffffffff818164a7>] ret_from_fork+0x27/0x40 Moving to a separate flag for tracking the quirked pin is overkill for the bug (since we only have to interchange the two tests in i915_gem_free_object) but it does reduce a complicated test on all objects and provide a sanitycheck for uncommon code paths. Fixes: fbbd37b36fa5 ("drm/i915: Move object release to a freelist + worker") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161101100317.11129-2-chris@chris-wilson.co.uk
2016-11-01 18:03:17 +08:00
if (obj->mm.madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_unpin_pages(obj);
drm/i915: Track pages pinned due to swizzling quirk If we have a tiled object and an unknown CPU swizzle pattern, we pin the pages to prevent the object from being swapped out (and us corrupting the contents as we do not know the access pattern and so cannot convert it to linear and back to tiled on reuse). This requires us to remember to drop the extra pinning when freeing the object, or else we trigger warnings about the pin leak. In commit fbbd37b36fa5 ("drm/i915: Move object release to a freelist + worker"), the object free path was deferred to a worker, but the unpinning of the quirk, along with marking the object as reclaimable, was left on the immediate path (so that if required we could reclaim the pages under memory pressure as early as possible). However, this split introduced a bug where the pages were no longer being unpinned if they were marked as unneeded. [ 231.800401] WARNING: CPU: 1 PID: 90 at drivers/gpu/drm/i915/i915_gem.c:4275 __i915_gem_free_objects+0x326/0x3c0 [i915] [ 231.800403] WARN_ON(i915_gem_object_has_pinned_pages(obj)) [ 231.800405] Modules linked in: [ 231.800406] snd_hda_intel i915 snd_hda_codec_generic mei_me snd_hda_codec coretemp snd_hwdep mei lpc_ich snd_hda_core snd_pcm e1000e ptp pps_core [last unloaded: i915] [ 231.800426] CPU: 1 PID: 90 Comm: kworker/1:4 Tainted: G U 4.9.0-rc2-CI-CI_DRM_1780+ #1 [ 231.800428] Hardware name: LENOVO 7465CTO/7465CTO, BIOS 6DET44WW (2.08 ) 04/22/2009 [ 231.800456] Workqueue: events __i915_gem_free_work [i915] [ 231.800459] ffffc9000034fc80 ffffffff8142dd65 ffffc9000034fcd0 0000000000000000 [ 231.800465] ffffc9000034fcc0 ffffffff8107e4e6 000010b300000001 0000000000001000 [ 231.800469] ffff88011d3db740 ffff880130ef0000 0000000000000000 ffff880130ef5ea0 [ 231.800474] Call Trace: [ 231.800479] [<ffffffff8142dd65>] dump_stack+0x67/0x92 [ 231.800484] [<ffffffff8107e4e6>] __warn+0xc6/0xe0 [ 231.800487] [<ffffffff8107e54a>] warn_slowpath_fmt+0x4a/0x50 [ 231.800491] [<ffffffff811d12ac>] ? kmem_cache_free+0x2dc/0x340 [ 231.800520] [<ffffffffa009ef36>] __i915_gem_free_objects+0x326/0x3c0 [i915] [ 231.800548] [<ffffffffa009effe>] __i915_gem_free_work+0x2e/0x50 [i915] [ 231.800552] [<ffffffff8109c27c>] process_one_work+0x1ec/0x6b0 [ 231.800555] [<ffffffff8109c1f6>] ? process_one_work+0x166/0x6b0 [ 231.800558] [<ffffffff8109c789>] worker_thread+0x49/0x490 [ 231.800561] [<ffffffff8109c740>] ? process_one_work+0x6b0/0x6b0 [ 231.800563] [<ffffffff8109c740>] ? process_one_work+0x6b0/0x6b0 [ 231.800566] [<ffffffff810a2aab>] kthread+0xeb/0x110 [ 231.800569] [<ffffffff810a29c0>] ? kthread_park+0x60/0x60 [ 231.800573] [<ffffffff818164a7>] ret_from_fork+0x27/0x40 Moving to a separate flag for tracking the quirked pin is overkill for the bug (since we only have to interchange the two tests in i915_gem_free_object) but it does reduce a complicated test on all objects and provide a sanitycheck for uncommon code paths. Fixes: fbbd37b36fa5 ("drm/i915: Move object release to a freelist + worker") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161101100317.11129-2-chris@chris-wilson.co.uk
2016-11-01 18:03:17 +08:00
obj->mm.quirked = false;
}
if (args->madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
drm/i915: Track pages pinned due to swizzling quirk If we have a tiled object and an unknown CPU swizzle pattern, we pin the pages to prevent the object from being swapped out (and us corrupting the contents as we do not know the access pattern and so cannot convert it to linear and back to tiled on reuse). This requires us to remember to drop the extra pinning when freeing the object, or else we trigger warnings about the pin leak. In commit fbbd37b36fa5 ("drm/i915: Move object release to a freelist + worker"), the object free path was deferred to a worker, but the unpinning of the quirk, along with marking the object as reclaimable, was left on the immediate path (so that if required we could reclaim the pages under memory pressure as early as possible). However, this split introduced a bug where the pages were no longer being unpinned if they were marked as unneeded. [ 231.800401] WARNING: CPU: 1 PID: 90 at drivers/gpu/drm/i915/i915_gem.c:4275 __i915_gem_free_objects+0x326/0x3c0 [i915] [ 231.800403] WARN_ON(i915_gem_object_has_pinned_pages(obj)) [ 231.800405] Modules linked in: [ 231.800406] snd_hda_intel i915 snd_hda_codec_generic mei_me snd_hda_codec coretemp snd_hwdep mei lpc_ich snd_hda_core snd_pcm e1000e ptp pps_core [last unloaded: i915] [ 231.800426] CPU: 1 PID: 90 Comm: kworker/1:4 Tainted: G U 4.9.0-rc2-CI-CI_DRM_1780+ #1 [ 231.800428] Hardware name: LENOVO 7465CTO/7465CTO, BIOS 6DET44WW (2.08 ) 04/22/2009 [ 231.800456] Workqueue: events __i915_gem_free_work [i915] [ 231.800459] ffffc9000034fc80 ffffffff8142dd65 ffffc9000034fcd0 0000000000000000 [ 231.800465] ffffc9000034fcc0 ffffffff8107e4e6 000010b300000001 0000000000001000 [ 231.800469] ffff88011d3db740 ffff880130ef0000 0000000000000000 ffff880130ef5ea0 [ 231.800474] Call Trace: [ 231.800479] [<ffffffff8142dd65>] dump_stack+0x67/0x92 [ 231.800484] [<ffffffff8107e4e6>] __warn+0xc6/0xe0 [ 231.800487] [<ffffffff8107e54a>] warn_slowpath_fmt+0x4a/0x50 [ 231.800491] [<ffffffff811d12ac>] ? kmem_cache_free+0x2dc/0x340 [ 231.800520] [<ffffffffa009ef36>] __i915_gem_free_objects+0x326/0x3c0 [i915] [ 231.800548] [<ffffffffa009effe>] __i915_gem_free_work+0x2e/0x50 [i915] [ 231.800552] [<ffffffff8109c27c>] process_one_work+0x1ec/0x6b0 [ 231.800555] [<ffffffff8109c1f6>] ? process_one_work+0x166/0x6b0 [ 231.800558] [<ffffffff8109c789>] worker_thread+0x49/0x490 [ 231.800561] [<ffffffff8109c740>] ? process_one_work+0x6b0/0x6b0 [ 231.800563] [<ffffffff8109c740>] ? process_one_work+0x6b0/0x6b0 [ 231.800566] [<ffffffff810a2aab>] kthread+0xeb/0x110 [ 231.800569] [<ffffffff810a29c0>] ? kthread_park+0x60/0x60 [ 231.800573] [<ffffffff818164a7>] ret_from_fork+0x27/0x40 Moving to a separate flag for tracking the quirked pin is overkill for the bug (since we only have to interchange the two tests in i915_gem_free_object) but it does reduce a complicated test on all objects and provide a sanitycheck for uncommon code paths. Fixes: fbbd37b36fa5 ("drm/i915: Move object release to a freelist + worker") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161101100317.11129-2-chris@chris-wilson.co.uk
2016-11-01 18:03:17 +08:00
obj->mm.quirked = true;
}
}
if (obj->mm.madv != __I915_MADV_PURGED)
obj->mm.madv = args->madv;
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
/* if the object is no longer attached, discard its backing storage */
if (obj->mm.madv == I915_MADV_DONTNEED &&
!i915_gem_object_has_pages(obj))
i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
mutex_unlock(&obj->mm.lock);
out:
i915_gem_object_put(obj);
return err;
}
static void
frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
{
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
intel_fb_obj_flush(obj, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
mutex_init(&obj->mm.lock);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
init_rcu_head(&obj->rcu);
obj->ops = ops;
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
reservation_object_init(&obj->__builtin_resv);
obj->resv = &obj->__builtin_resv;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
.pwrite = i915_gem_object_pwrite_gtt,
};
static int i915_gem_object_create_shmem(struct drm_device *dev,
struct drm_gem_object *obj,
size_t size)
{
struct drm_i915_private *i915 = to_i915(dev);
unsigned long flags = VM_NORESERVE;
struct file *filp;
drm_gem_private_object_init(dev, obj, size);
if (i915->mm.gemfs)
filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
flags);
else
filp = shmem_file_setup("i915", size, flags);
if (IS_ERR(filp))
return PTR_ERR(filp);
obj->filp = filp;
return 0;
}
struct drm_i915_gem_object *
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
unsigned int cache_level;
gfp_t mask;
int ret;
/* There is a prevalence of the assumption that we fit the object's
* page count inside a 32bit _signed_ variable. Let's document this and
* catch if we ever need to fix it. In the meantime, if you do spot
* such a local variable, please consider fixing!
*/
if (size >> PAGE_SHIFT > INT_MAX)
return ERR_PTR(-E2BIG);
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
if (ret)
goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
/* 965gm cannot relocate objects above 4GiB. */
mask &= ~__GFP_HIGHMEM;
mask |= __GFP_DMA32;
}
mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask);
drm/i915: Encourage our shrinker more when our shmemfs allocations fails Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk
2017-06-09 19:03:46 +08:00
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_object_ops);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
if (HAS_LLC(dev_priv))
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
* display scanout are coherent with the CPU in
* accessing this cache. This means in this mode we
* don't need to clflush on the CPU side, and on the
* GPU side we only need to flush internal caches to
* get data visible to the CPU.
*
* However, we maintain the display planes as UC, and so
* need to rebind when first used as such.
*/
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
cache_level = I915_CACHE_LLC;
else
cache_level = I915_CACHE_NONE;
drm/i915: Split obj->cache_coherent to track r/w Another month, another story in the cache coherency saga. This time, we come to the realisation that i915_gem_object_is_coherent() has been reporting whether we can read from the target without requiring a cache invalidate; but we were using it in places for testing whether we could write into the object without requiring a cache flush. So split the tracking into two, one to decide before reads, one after writes. See commit e27ab73d17ef ("drm/i915: Mark CPU cache as dirty on every transition for CPU writes") for the previous entry in this saga. v2: Be verbose v3: Remove unused function (i915_gem_object_is_coherent) v4: Fix inverted coherency check prior to execbuf (from v2) v5: Add comment for nasty code where we are optimising on gcc's behalf. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101109 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101555 Testcase: igt/kms_mmap_write_crc Testcase: igt/kms_pwrite_crc Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170811111116.10373-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-08-11 19:11:16 +08:00
i915_gem_object_set_cache_coherency(obj, cache_level);
trace_i915_gem_object_create(obj);
return obj;
fail:
i915_gem_object_free(obj);
return ERR_PTR(ret);
}
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
/* If we are the last user of the backing storage (be it shmemfs
* pages or stolen etc), we know that the pages are going to be
* immediately released. In this case, we can then skip copying
* back the contents from the GPU.
*/
if (obj->mm.madv != I915_MADV_WILLNEED)
return false;
if (obj->base.filp == NULL)
return true;
/* At first glance, this looks racy, but then again so would be
* userspace racing mmap against close. However, the first external
* reference to the filp can only be obtained through the
* i915_gem_mmap_ioctl() which safeguards us against the user
* acquiring such a reference whilst we are in the middle of
* freeing the object.
*/
return atomic_long_read(&obj->base.filp->f_count) == 1;
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
trace_i915_gem_object_destroy(obj);
mutex_lock(&i915->drm.struct_mutex);
GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn,
&obj->vma_list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
drm/i915: Lazily unbind vma on close When userspace is passing around swapbuffers using DRI, we frequently have to open and close the same object in the foreign address space. This shows itself as the same object being rebound at roughly 30fps (with a second object also being rebound at 30fps), which involves us having to rewrite the page tables and maintain the drm_mm range manager every time. However, since the object still exists and it is only the local handle that disappears, if we are lazy and do not unbind the VMA immediately when the local user closes the object but defer it until the GPU is idle, then we can reuse the same VMA binding. We still have to be careful to mark the handle and lookup tables as closed to maintain the uABI, just allowing the underlying VMA to be resurrected if the user is able to access the same object from the same context again. If the object itself is destroyed (neither userspace keeping a handle to it), the VMA will be reaped immediately as usual. In the future, this will be even more useful as instantiating a new VMA for use on the GPU will become heavier. A nuisance indeed, so nip it in the bud. v2: s/__i915_vma_final_close/i915_vma_destroy/ etc. v3: Leave a hint as to why we deferred the unbind on close. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180503195115.22309-1-chris@chris-wilson.co.uk
2018-05-04 03:51:14 +08:00
i915_vma_destroy(vma);
}
GEM_BUG_ON(!list_empty(&obj->vma_list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
/* This serializes freeing with the shrinker. Since the free
* is delayed, first by RCU then by the workqueue, we want the
* shrinker to be able to free pages of unreferenced objects,
* or else we may oom whilst there are plenty of deferred
* freed objects.
*/
if (i915_gem_object_has_pages(obj)) {
spin_lock(&i915->mm.obj_lock);
list_del_init(&obj->mm.link);
spin_unlock(&i915->mm.obj_lock);
}
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(obj->bind_count);
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->ops->release)
obj->ops->release(obj);
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
drm/i915: Move GEM activity tracking into a common struct reservation_object In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
reservation_object_fini(&obj->__builtin_resv);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(i915, obj->base.size);
kfree(obj->bit_17);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
atomic_dec(&i915->mm.free_count);
if (on)
cond_resched();
}
intel_runtime_pm_put(i915, wakeref);
}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
struct llist_node *freed;
/* Free the oldest, most stale object to keep the free_list short */
freed = NULL;
if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
/* Only one consumer of llist_del_first() allowed */
spin_lock(&i915->mm.free_lock);
freed = llist_del_first(&i915->mm.free_list);
spin_unlock(&i915->mm.free_lock);
}
if (unlikely(freed)) {
freed->next = NULL;
__i915_gem_free_objects(i915, freed);
}
}
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, struct drm_i915_private, mm.free_work);
struct llist_node *freed;
/*
* All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close().
* However, the object may also be bound into the global GTT (e.g.
* older GPUs without per-process support, or for direct access through
* the GTT either for the user or for scanout). Those VMA still need to
* unbound now.
*/
drm/i915: Lock llist_del_first() vs llist_del_all() An oversight in commit 87701b4b5593 ("drm/i915: Only free the oldest stale object before a fresh allocation") was that not only do we have to serialise concurrent users of llist_del_first(), but we also have to lock llist_del_first() vs llist_del_all(). From llist.h, * This can be summarized as follows: * * | add | del_first | del_all * add | - | - | - * del_first | | L | L * del_all | | | - * * Where, a particular row's operation can happen concurrently with a column's * operation, with "-" being no lock needed, while "L" being lock is needed. This should hopefully explain: <4>[ 89.287106] general protection fault: 0000 [#1] PREEMPT SMP <4>[ 89.287126] Modules linked in: snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp i915 crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core r8169 mii mei_me mei snd_pcm prime_numbers i2c_hid pinctrl_geminilake pinctrl_intel <4>[ 89.287226] CPU: 2 PID: 23 Comm: ksoftirqd/2 Tainted: G U 4.14.0-rc8-CI-CI_DRM_3315+ #1 <4>[ 89.287247] Hardware name: Intel Corp. Geminilake/GLK RVP2 LP4SD (07), BIOS GELKRVPA.X64.0062.B30.1708222146 08/22/2017 <4>[ 89.287270] task: ffff88017ab34ec0 task.stack: ffffc90000128000 <4>[ 89.287290] RIP: 0010:llist_add_batch+0x4/0x20 <4>[ 89.287301] RSP: 0018:ffffc9000012bdb8 EFLAGS: 00010296 <4>[ 89.287314] RAX: ffffffff811017ad RBX: 6e468801a1560000 RCX: ef3e53fceecdeb81 <4>[ 89.287330] RDX: 6e468801a1566130 RSI: ffff880103d73d98 RDI: ffff880103d73d98 <4>[ 89.287346] RBP: ffffc9000012bdb8 R08: ffff88017ab35780 R09: 0000000000000000 <4>[ 89.287361] R10: ffffc9000012bd68 R11: 00000000abb18c3d R12: ffffffffa01369e0 <4>[ 89.287377] R13: ffff88017fd1b8f8 R14: ffff88017ab34ec0 R15: 000000000000000a <4>[ 89.287393] FS: 0000000000000000(0000) GS:ffff88017fd00000(0000) knlGS:0000000000000000 <4>[ 89.287411] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 89.287424] CR2: 00007ff0c0755018 CR3: 000000016df9b000 CR4: 00000000003406e0 <4>[ 89.287440] Call Trace: <4>[ 89.287511] __i915_gem_free_object_rcu+0x20/0x40 [i915] <4>[ 89.287527] rcu_process_callbacks+0x27a/0x730 <4>[ 89.287544] __do_softirq+0xc0/0x4ae <4>[ 89.287559] ? smpboot_thread_fn+0x2d/0x280 <4>[ 89.287571] run_ksoftirqd+0x1f/0x70 <4>[ 89.287582] smpboot_thread_fn+0x18a/0x280 <4>[ 89.287595] kthread+0x114/0x150 <4>[ 89.287605] ? sort_range+0x30/0x30 <4>[ 89.287615] ? kthread_create_on_node+0x40/0x40 <4>[ 89.287628] ret_from_fork+0x27/0x40 <4>[ 89.287641] Code: 0d 48 83 ea 01 4c 89 c1 48 83 fa ff 74 12 48 23 0c d7 74 ed 48 c1 e2 06 48 0f bd c9 48 8d 04 0a 5d c3 90 90 90 90 90 55 48 89 e5 <48> 8b 0a 48 89 0e 48 89 c8 f0 48 0f b1 3a 48 39 c1 75 ed 48 85 <1>[ 89.287774] RIP: llist_add_batch+0x4/0x20 RSP: ffffc9000012bdb8 <4>[ 89.287826] ---[ end trace e775d15174d8ae02 ]--- (Lockless lists are only easy (and lockless) when only using llist_add/llist_del_all!) Fixes: 87701b4b5593 ("drm/i915: Only free the oldest stale object before a fresh allocation") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171106111508.11941-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2017-11-06 19:15:08 +08:00
spin_lock(&i915->mm.free_lock);
while ((freed = llist_del_all(&i915->mm.free_list))) {
drm/i915: Lock llist_del_first() vs llist_del_all() An oversight in commit 87701b4b5593 ("drm/i915: Only free the oldest stale object before a fresh allocation") was that not only do we have to serialise concurrent users of llist_del_first(), but we also have to lock llist_del_first() vs llist_del_all(). From llist.h, * This can be summarized as follows: * * | add | del_first | del_all * add | - | - | - * del_first | | L | L * del_all | | | - * * Where, a particular row's operation can happen concurrently with a column's * operation, with "-" being no lock needed, while "L" being lock is needed. This should hopefully explain: <4>[ 89.287106] general protection fault: 0000 [#1] PREEMPT SMP <4>[ 89.287126] Modules linked in: snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp i915 crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core r8169 mii mei_me mei snd_pcm prime_numbers i2c_hid pinctrl_geminilake pinctrl_intel <4>[ 89.287226] CPU: 2 PID: 23 Comm: ksoftirqd/2 Tainted: G U 4.14.0-rc8-CI-CI_DRM_3315+ #1 <4>[ 89.287247] Hardware name: Intel Corp. Geminilake/GLK RVP2 LP4SD (07), BIOS GELKRVPA.X64.0062.B30.1708222146 08/22/2017 <4>[ 89.287270] task: ffff88017ab34ec0 task.stack: ffffc90000128000 <4>[ 89.287290] RIP: 0010:llist_add_batch+0x4/0x20 <4>[ 89.287301] RSP: 0018:ffffc9000012bdb8 EFLAGS: 00010296 <4>[ 89.287314] RAX: ffffffff811017ad RBX: 6e468801a1560000 RCX: ef3e53fceecdeb81 <4>[ 89.287330] RDX: 6e468801a1566130 RSI: ffff880103d73d98 RDI: ffff880103d73d98 <4>[ 89.287346] RBP: ffffc9000012bdb8 R08: ffff88017ab35780 R09: 0000000000000000 <4>[ 89.287361] R10: ffffc9000012bd68 R11: 00000000abb18c3d R12: ffffffffa01369e0 <4>[ 89.287377] R13: ffff88017fd1b8f8 R14: ffff88017ab34ec0 R15: 000000000000000a <4>[ 89.287393] FS: 0000000000000000(0000) GS:ffff88017fd00000(0000) knlGS:0000000000000000 <4>[ 89.287411] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 89.287424] CR2: 00007ff0c0755018 CR3: 000000016df9b000 CR4: 00000000003406e0 <4>[ 89.287440] Call Trace: <4>[ 89.287511] __i915_gem_free_object_rcu+0x20/0x40 [i915] <4>[ 89.287527] rcu_process_callbacks+0x27a/0x730 <4>[ 89.287544] __do_softirq+0xc0/0x4ae <4>[ 89.287559] ? smpboot_thread_fn+0x2d/0x280 <4>[ 89.287571] run_ksoftirqd+0x1f/0x70 <4>[ 89.287582] smpboot_thread_fn+0x18a/0x280 <4>[ 89.287595] kthread+0x114/0x150 <4>[ 89.287605] ? sort_range+0x30/0x30 <4>[ 89.287615] ? kthread_create_on_node+0x40/0x40 <4>[ 89.287628] ret_from_fork+0x27/0x40 <4>[ 89.287641] Code: 0d 48 83 ea 01 4c 89 c1 48 83 fa ff 74 12 48 23 0c d7 74 ed 48 c1 e2 06 48 0f bd c9 48 8d 04 0a 5d c3 90 90 90 90 90 55 48 89 e5 <48> 8b 0a 48 89 0e 48 89 c8 f0 48 0f b1 3a 48 39 c1 75 ed 48 85 <1>[ 89.287774] RIP: llist_add_batch+0x4/0x20 RSP: ffffc9000012bdb8 <4>[ 89.287826] ---[ end trace e775d15174d8ae02 ]--- (Lockless lists are only easy (and lockless) when only using llist_add/llist_del_all!) Fixes: 87701b4b5593 ("drm/i915: Only free the oldest stale object before a fresh allocation") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171106111508.11941-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2017-11-06 19:15:08 +08:00
spin_unlock(&i915->mm.free_lock);
__i915_gem_free_objects(i915, freed);
if (need_resched())
drm/i915: Lock llist_del_first() vs llist_del_all() An oversight in commit 87701b4b5593 ("drm/i915: Only free the oldest stale object before a fresh allocation") was that not only do we have to serialise concurrent users of llist_del_first(), but we also have to lock llist_del_first() vs llist_del_all(). From llist.h, * This can be summarized as follows: * * | add | del_first | del_all * add | - | - | - * del_first | | L | L * del_all | | | - * * Where, a particular row's operation can happen concurrently with a column's * operation, with "-" being no lock needed, while "L" being lock is needed. This should hopefully explain: <4>[ 89.287106] general protection fault: 0000 [#1] PREEMPT SMP <4>[ 89.287126] Modules linked in: snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp i915 crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core r8169 mii mei_me mei snd_pcm prime_numbers i2c_hid pinctrl_geminilake pinctrl_intel <4>[ 89.287226] CPU: 2 PID: 23 Comm: ksoftirqd/2 Tainted: G U 4.14.0-rc8-CI-CI_DRM_3315+ #1 <4>[ 89.287247] Hardware name: Intel Corp. Geminilake/GLK RVP2 LP4SD (07), BIOS GELKRVPA.X64.0062.B30.1708222146 08/22/2017 <4>[ 89.287270] task: ffff88017ab34ec0 task.stack: ffffc90000128000 <4>[ 89.287290] RIP: 0010:llist_add_batch+0x4/0x20 <4>[ 89.287301] RSP: 0018:ffffc9000012bdb8 EFLAGS: 00010296 <4>[ 89.287314] RAX: ffffffff811017ad RBX: 6e468801a1560000 RCX: ef3e53fceecdeb81 <4>[ 89.287330] RDX: 6e468801a1566130 RSI: ffff880103d73d98 RDI: ffff880103d73d98 <4>[ 89.287346] RBP: ffffc9000012bdb8 R08: ffff88017ab35780 R09: 0000000000000000 <4>[ 89.287361] R10: ffffc9000012bd68 R11: 00000000abb18c3d R12: ffffffffa01369e0 <4>[ 89.287377] R13: ffff88017fd1b8f8 R14: ffff88017ab34ec0 R15: 000000000000000a <4>[ 89.287393] FS: 0000000000000000(0000) GS:ffff88017fd00000(0000) knlGS:0000000000000000 <4>[ 89.287411] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 89.287424] CR2: 00007ff0c0755018 CR3: 000000016df9b000 CR4: 00000000003406e0 <4>[ 89.287440] Call Trace: <4>[ 89.287511] __i915_gem_free_object_rcu+0x20/0x40 [i915] <4>[ 89.287527] rcu_process_callbacks+0x27a/0x730 <4>[ 89.287544] __do_softirq+0xc0/0x4ae <4>[ 89.287559] ? smpboot_thread_fn+0x2d/0x280 <4>[ 89.287571] run_ksoftirqd+0x1f/0x70 <4>[ 89.287582] smpboot_thread_fn+0x18a/0x280 <4>[ 89.287595] kthread+0x114/0x150 <4>[ 89.287605] ? sort_range+0x30/0x30 <4>[ 89.287615] ? kthread_create_on_node+0x40/0x40 <4>[ 89.287628] ret_from_fork+0x27/0x40 <4>[ 89.287641] Code: 0d 48 83 ea 01 4c 89 c1 48 83 fa ff 74 12 48 23 0c d7 74 ed 48 c1 e2 06 48 0f bd c9 48 8d 04 0a 5d c3 90 90 90 90 90 55 48 89 e5 <48> 8b 0a 48 89 0e 48 89 c8 f0 48 0f b1 3a 48 39 c1 75 ed 48 85 <1>[ 89.287774] RIP: llist_add_batch+0x4/0x20 RSP: ffffc9000012bdb8 <4>[ 89.287826] ---[ end trace e775d15174d8ae02 ]--- (Lockless lists are only easy (and lockless) when only using llist_add/llist_del_all!) Fixes: 87701b4b5593 ("drm/i915: Only free the oldest stale object before a fresh allocation") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171106111508.11941-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2017-11-06 19:15:08 +08:00
return;
spin_lock(&i915->mm.free_lock);
}
drm/i915: Lock llist_del_first() vs llist_del_all() An oversight in commit 87701b4b5593 ("drm/i915: Only free the oldest stale object before a fresh allocation") was that not only do we have to serialise concurrent users of llist_del_first(), but we also have to lock llist_del_first() vs llist_del_all(). From llist.h, * This can be summarized as follows: * * | add | del_first | del_all * add | - | - | - * del_first | | L | L * del_all | | | - * * Where, a particular row's operation can happen concurrently with a column's * operation, with "-" being no lock needed, while "L" being lock is needed. This should hopefully explain: <4>[ 89.287106] general protection fault: 0000 [#1] PREEMPT SMP <4>[ 89.287126] Modules linked in: snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp i915 crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core r8169 mii mei_me mei snd_pcm prime_numbers i2c_hid pinctrl_geminilake pinctrl_intel <4>[ 89.287226] CPU: 2 PID: 23 Comm: ksoftirqd/2 Tainted: G U 4.14.0-rc8-CI-CI_DRM_3315+ #1 <4>[ 89.287247] Hardware name: Intel Corp. Geminilake/GLK RVP2 LP4SD (07), BIOS GELKRVPA.X64.0062.B30.1708222146 08/22/2017 <4>[ 89.287270] task: ffff88017ab34ec0 task.stack: ffffc90000128000 <4>[ 89.287290] RIP: 0010:llist_add_batch+0x4/0x20 <4>[ 89.287301] RSP: 0018:ffffc9000012bdb8 EFLAGS: 00010296 <4>[ 89.287314] RAX: ffffffff811017ad RBX: 6e468801a1560000 RCX: ef3e53fceecdeb81 <4>[ 89.287330] RDX: 6e468801a1566130 RSI: ffff880103d73d98 RDI: ffff880103d73d98 <4>[ 89.287346] RBP: ffffc9000012bdb8 R08: ffff88017ab35780 R09: 0000000000000000 <4>[ 89.287361] R10: ffffc9000012bd68 R11: 00000000abb18c3d R12: ffffffffa01369e0 <4>[ 89.287377] R13: ffff88017fd1b8f8 R14: ffff88017ab34ec0 R15: 000000000000000a <4>[ 89.287393] FS: 0000000000000000(0000) GS:ffff88017fd00000(0000) knlGS:0000000000000000 <4>[ 89.287411] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 89.287424] CR2: 00007ff0c0755018 CR3: 000000016df9b000 CR4: 00000000003406e0 <4>[ 89.287440] Call Trace: <4>[ 89.287511] __i915_gem_free_object_rcu+0x20/0x40 [i915] <4>[ 89.287527] rcu_process_callbacks+0x27a/0x730 <4>[ 89.287544] __do_softirq+0xc0/0x4ae <4>[ 89.287559] ? smpboot_thread_fn+0x2d/0x280 <4>[ 89.287571] run_ksoftirqd+0x1f/0x70 <4>[ 89.287582] smpboot_thread_fn+0x18a/0x280 <4>[ 89.287595] kthread+0x114/0x150 <4>[ 89.287605] ? sort_range+0x30/0x30 <4>[ 89.287615] ? kthread_create_on_node+0x40/0x40 <4>[ 89.287628] ret_from_fork+0x27/0x40 <4>[ 89.287641] Code: 0d 48 83 ea 01 4c 89 c1 48 83 fa ff 74 12 48 23 0c d7 74 ed 48 c1 e2 06 48 0f bd c9 48 8d 04 0a 5d c3 90 90 90 90 90 55 48 89 e5 <48> 8b 0a 48 89 0e 48 89 c8 f0 48 0f b1 3a 48 39 c1 75 ed 48 85 <1>[ 89.287774] RIP: llist_add_batch+0x4/0x20 RSP: ffffc9000012bdb8 <4>[ 89.287826] ---[ end trace e775d15174d8ae02 ]--- (Lockless lists are only easy (and lockless) when only using llist_add/llist_del_all!) Fixes: 87701b4b5593 ("drm/i915: Only free the oldest stale object before a fresh allocation") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171106111508.11941-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2017-11-06 19:15:08 +08:00
spin_unlock(&i915->mm.free_lock);
}
drm/i915: Introduce accurate frontbuffer tracking So from just a quick look we seem to have enough information to accurately figure out whether a given gem bo is used as a frontbuffer and where exactly: We have obj->pin_count as a first check with no false negatives and only negligible false positives. And then we can just walk the modeset objects and figure out where exactly a buffer is used as scanout. Except that we can't due to locking order: If we already hold dev->struct_mutex we can't acquire any modeset locks, so could potential chase freed pointers and other evil stuff. So we need something else. For that introduce a new set of bits obj->frontbuffer_bits to track where a buffer object is used. That we can then chase without grabbing any modeset locks. Of course the consumers of this (DRRS, PSR, FBC, ...) still need to be able to do their magic both when called from modeset and from gem code. But that can be easily achieved by adding locks for these specific subsystems which always nest within either kms or gem locking. This patch just adds the relevant update code to all places. Note that if we ever support multi-planar scanout targets then we need one frontbuffer tracking bit per attachment point that we expose to userspace. v2: - Fix more oopsen. Oops. - WARN if we leak obj->frontbuffer_bits when freeing a gem buffer. Fix the bugs this brought to light. - s/update_frontbuffer_bits/update_fb_bits/. More consistent with the fb tracking functions (fb for gem object, frontbuffer for raw bits). And the function name was way too long. v3: Size obj->frontbuffer_bits correctly so that all pipes fit in. v4: Don't update fb bits in set_base on failure. Noticed by Chris. v5: s/i915_gem_update_fb_bits/i915_gem_track_fb/ Also remove a few local enum pipe variables which are now no longer needed to make the function arguments no drop over the 80 char limit. Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-06-19 05:28:09 +08:00
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
struct drm_i915_gem_object *obj =
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/*
* We reuse obj->rcu for the freed list, so we had better not treat
* it like a rcu_head from this point forwards. And we expect all
* objects to be freed via this path.
*/
destroy_rcu_head(&obj->rcu);
/*
* Since we require blocking on struct_mutex to unbind the freed
* object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
* be a softirq context), but must instead then defer that work onto a
* kthread. We use the RCU callback rather than move the freed object
* directly onto the work queue so that we can mix between using the
* worker and performing frees directly from subsequent allocations for
* crude but effective memory throttling.
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
queue_work(i915->wq, &i915->mm.free_work);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
drm/i915: Track pages pinned due to swizzling quirk If we have a tiled object and an unknown CPU swizzle pattern, we pin the pages to prevent the object from being swapped out (and us corrupting the contents as we do not know the access pattern and so cannot convert it to linear and back to tiled on reuse). This requires us to remember to drop the extra pinning when freeing the object, or else we trigger warnings about the pin leak. In commit fbbd37b36fa5 ("drm/i915: Move object release to a freelist + worker"), the object free path was deferred to a worker, but the unpinning of the quirk, along with marking the object as reclaimable, was left on the immediate path (so that if required we could reclaim the pages under memory pressure as early as possible). However, this split introduced a bug where the pages were no longer being unpinned if they were marked as unneeded. [ 231.800401] WARNING: CPU: 1 PID: 90 at drivers/gpu/drm/i915/i915_gem.c:4275 __i915_gem_free_objects+0x326/0x3c0 [i915] [ 231.800403] WARN_ON(i915_gem_object_has_pinned_pages(obj)) [ 231.800405] Modules linked in: [ 231.800406] snd_hda_intel i915 snd_hda_codec_generic mei_me snd_hda_codec coretemp snd_hwdep mei lpc_ich snd_hda_core snd_pcm e1000e ptp pps_core [last unloaded: i915] [ 231.800426] CPU: 1 PID: 90 Comm: kworker/1:4 Tainted: G U 4.9.0-rc2-CI-CI_DRM_1780+ #1 [ 231.800428] Hardware name: LENOVO 7465CTO/7465CTO, BIOS 6DET44WW (2.08 ) 04/22/2009 [ 231.800456] Workqueue: events __i915_gem_free_work [i915] [ 231.800459] ffffc9000034fc80 ffffffff8142dd65 ffffc9000034fcd0 0000000000000000 [ 231.800465] ffffc9000034fcc0 ffffffff8107e4e6 000010b300000001 0000000000001000 [ 231.800469] ffff88011d3db740 ffff880130ef0000 0000000000000000 ffff880130ef5ea0 [ 231.800474] Call Trace: [ 231.800479] [<ffffffff8142dd65>] dump_stack+0x67/0x92 [ 231.800484] [<ffffffff8107e4e6>] __warn+0xc6/0xe0 [ 231.800487] [<ffffffff8107e54a>] warn_slowpath_fmt+0x4a/0x50 [ 231.800491] [<ffffffff811d12ac>] ? kmem_cache_free+0x2dc/0x340 [ 231.800520] [<ffffffffa009ef36>] __i915_gem_free_objects+0x326/0x3c0 [i915] [ 231.800548] [<ffffffffa009effe>] __i915_gem_free_work+0x2e/0x50 [i915] [ 231.800552] [<ffffffff8109c27c>] process_one_work+0x1ec/0x6b0 [ 231.800555] [<ffffffff8109c1f6>] ? process_one_work+0x166/0x6b0 [ 231.800558] [<ffffffff8109c789>] worker_thread+0x49/0x490 [ 231.800561] [<ffffffff8109c740>] ? process_one_work+0x6b0/0x6b0 [ 231.800563] [<ffffffff8109c740>] ? process_one_work+0x6b0/0x6b0 [ 231.800566] [<ffffffff810a2aab>] kthread+0xeb/0x110 [ 231.800569] [<ffffffff810a29c0>] ? kthread_park+0x60/0x60 [ 231.800573] [<ffffffff818164a7>] ret_from_fork+0x27/0x40 Moving to a separate flag for tracking the quirked pin is overkill for the bug (since we only have to interchange the two tests in i915_gem_free_object) but it does reduce a complicated test on all objects and provide a sanitycheck for uncommon code paths. Fixes: fbbd37b36fa5 ("drm/i915: Move object release to a freelist + worker") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161101100317.11129-2-chris@chris-wilson.co.uk
2016-11-01 18:03:17 +08:00
if (obj->mm.quirked)
__i915_gem_object_unpin_pages(obj);
if (discard_backing_storage(obj))
obj->mm.madv = I915_MADV_DONTNEED;
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
* i915_gem_busy_ioctl(). For the corresponding synchronized
* lookup see i915_gem_object_lookup_rcu().
*/
atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (!i915_gem_object_has_active_reference(obj) &&
i915_gem_object_is_active(obj))
i915_gem_object_set_active_reference(obj);
else
i915_gem_object_put(obj);
}
void i915_gem_sanitize(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
/*
* As we have just resumed the machine and woken the device up from
* deep PCI sleep (presumably D3_cold), assume the HW has been reset
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
if (i915_terminally_wedged(&i915->gpu_error))
i915_gem_unset_wedged(i915);
/*
* If we inherit context state from the BIOS or earlier occupants
* of the GPU, the GPU may be in an inconsistent state when we
* try to take over. The only way to remove the earlier state
* is by resetting. However, resetting on earlier gen is tricky as
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_runtime_pm_put(i915, wakeref);
mutex_lock(&i915->drm.struct_mutex);
i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
}
int i915_gem_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
int ret;
2018-05-24 16:11:35 +08:00
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
intel_suspend_gt_powersave(i915);
flush_workqueue(i915->wq);
mutex_lock(&i915->drm.struct_mutex);
/*
* We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
* context image is coherent, we have to switch away from it. That
* leaves the i915->kernel_context still active when
* we actually suspend, and its image in memory may not match the GPU
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
if (!i915_terminally_wedged(&i915->gpu_error)) {
ret = i915_gem_switch_to_kernel_context(i915);
if (ret)
goto err_unlock;
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
I915_WAIT_FOR_IDLE_BOOST,
MAX_SCHEDULE_TIMEOUT);
if (ret && ret != -EIO)
goto err_unlock;
assert_kernel_context_is_current(i915);
}
i915_retire_requests(i915); /* ensure we flush after wedging */
mutex_unlock(&i915->drm.struct_mutex);
i915_reset_flush(i915);
drain_delayed_work(&i915->gt.retire_work);
/*
* As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle.
*/
drain_delayed_work(&i915->gt.idle_work);
intel_uc_suspend(i915);
/*
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
WARN_ON(i915->gt.awake);
if (WARN_ON(!intel_engines_are_idle(i915)))
i915_gem_set_wedged(i915); /* no hope, discard everything */
intel_runtime_pm_put(i915, wakeref);
return 0;
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
return ret;
}
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
struct list_head *phases[] = {
&i915->mm.unbound_list,
&i915->mm.bound_list,
NULL
}, **phase;
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode. And the only
* known way to disable logical contexts is through a GPU reset.
*
* So in order to leave the system in a known default configuration,
* always reset the GPU upon unload and suspend. Afterwards we then
* clean up the GEM state tracking, flushing off the requests and
* leaving the system in a known idle state.
*
* Note that is of the upmost importance that the GPU is idle and
* all stray writes are flushed *before* we dismantle the backing
* storage for the pinned objects.
*
* However, since we are uncertain that resetting the GPU on older
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
mutex_lock(&i915->drm.struct_mutex);
for (phase = phases; *phase; phase++) {
list_for_each_entry(obj, *phase, mm.link)
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
mutex_unlock(&i915->drm.struct_mutex);
intel_uc_sanitize(i915);
i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
i915->gt.resume(i915);
if (i915_gem_init_hw(i915))
goto err_wedged;
intel_uc_resume(i915);
/* Always reload a context for powersaving. */
if (i915_gem_switch_to_kernel_context(i915))
goto err_wedged;
out_unlock:
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
if (!i915_terminally_wedged(&i915->gpu_error)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
i915_gem_set_wedged(i915);
}
goto out_unlock;
}
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
if (IS_GEN(dev_priv, 5))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN(dev_priv, 6))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (IS_GEN(dev_priv, 7))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (IS_GEN(dev_priv, 8))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
{
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
I915_WRITE(RING_TAIL(base), 0);
I915_WRITE(RING_START(base), 0);
}
static void init_unused_rings(struct drm_i915_private *dev_priv)
{
if (IS_I830(dev_priv)) {
init_unused_ring(dev_priv, PRB1_BASE);
init_unused_ring(dev_priv, SRB0_BASE);
init_unused_ring(dev_priv, SRB1_BASE);
init_unused_ring(dev_priv, SRB2_BASE);
init_unused_ring(dev_priv, SRB3_BASE);
} else if (IS_GEN(dev_priv, 2)) {
init_unused_ring(dev_priv, SRB0_BASE);
init_unused_ring(dev_priv, SRB1_BASE);
} else if (IS_GEN(dev_priv, 3)) {
init_unused_ring(dev_priv, PRB1_BASE);
init_unused_ring(dev_priv, PRB2_BASE);
}
}
static int __i915_gem_restart_engines(void *data)
{
struct drm_i915_private *i915 = data;
struct intel_engine_cs *engine;
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 01:14:48 +08:00
enum intel_engine_id id;
int err;
for_each_engine(engine, i915, id) {
err = engine->init_hw(engine);
if (err) {
DRM_ERROR("Failed to restart %s (%d)\n",
engine->name, err);
return err;
}
}
return 0;
}
int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
int ret;
dev_priv->gt.last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (IS_HASWELL(dev_priv))
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
2018-12-03 21:33:19 +08:00
intel_gt_apply_workarounds(dev_priv);
/* ...and determine whether they are sticking. */
intel_gt_verify_workarounds(dev_priv, "init");
i915_gem_init_swizzling(dev_priv);
/*
* At least 830 can leave some of the unused rings
* "active" (ie. head != tail) after resume which
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
if (i915_terminally_wedged(&dev_priv->gpu_error)) {
ret = -EIO;
goto out;
}
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
goto out;
}
drm/i915/guc: Check the locking status of GuC WOPCM registers GuC WOPCM registers are write-once registers. Current driver code accesses these registers without checking the accessibility to these registers which will lead to unpredictable driver behaviors if these registers were touch by other components (such as faulty BIOS code). This patch moves the GuC WOPCM registers updating code into intel_wopcm.c and adds check before and after the update to GuC WOPCM registers so that we can make sure the driver is in a known state after writing to these write-once registers. v6: - Made sure module reloading won't bug the kernel while doing locking status checking v7: - Fixed patch format issues v8: - Fixed coding style issue on register lock bit macro definition (Sagar) v9: - Avoided to use redundant !! to cast uint to bool (Chris) - Return error code instead of GEM_BUG_ON for locked with invalid register values case (Sagar) - Updated guc_wopcm_hw_init to use guc_wopcm as first parameter (Michal) - Added code to set and validate the HuC_LOADING_AGENT_GUC bit in GuC WOPCM offset register based on the presence of HuC firmware (Michal) - Use bit fields instead of macros for GuC WOPCM flags (Michal) v10: - Refined variable names, removed redundant comments (Joonas) - Introduced lockable_reg to handle the write once register write and propagate the write error to caller (Joonas) - Used lockable_reg abstraction to avoid locking bit check on generic i915_reg_t (Michal) - Added log message for error paths (Michal) - Removed hw_updated flag and only relies on real hardware status v11: - Replaced lockable_reg with simplified function (Michal) - Used new macros for locking bits of WOPCM size/offset registers instead of using BIT(0) directly (Michal) - use intel_wopcm_init_hw() called from intel_gem_init_hw() to do GuC WOPCM register setup instead of calling from intel_uc_init_hw() (Michal) v12: - Updated function kernel-doc to align with code changes (Michal) - Updated code to use wopcm pointer directly (Michal) v13: - Updated the ordering of s-o-b/cc/r-b tags (Sagar) BSpec: 10875, 10833 Signed-off-by: Jackie Li <yaodong.li@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> (v11) Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v12) Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1520987574-19351-5-git-send-email-yaodong.li@intel.com
2018-03-14 08:32:53 +08:00
ret = intel_wopcm_init_hw(&dev_priv->wopcm);
if (ret) {
DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(dev_priv);
if (ret) {
DRM_ERROR("Enabling uc failed (%d)\n", ret);
goto out;
}
intel_mocs_init_l3cc_table(dev_priv);
/* Only when the HW is re-initialised, can we replay the requests */
ret = __i915_gem_restart_engines(dev_priv);
if (ret)
goto cleanup_uc;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return 0;
cleanup_uc:
intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
/*
* As we reset the gpu during very early sanitisation, the current
* register state on the GPU should reflect its defaults values.
* We load a context onto the hw (with restore-inhibit), then switch
* over to a second context to save that default register state. We
* can then prime every new context with that state so they all start
* from the same default HW values.
*/
ctx = i915_gem_context_create_kernel(i915, 0);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ctx;
}
err = 0;
if (engine->init_context)
err = engine->init_context(rq);
i915_request_add(rq);
if (err)
goto err_active;
}
err = i915_gem_switch_to_kernel_context(i915);
if (err)
goto err_active;
if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
i915_gem_set_wedged(i915);
err = -EIO; /* Caller will declare us wedged */
goto err_active;
}
assert_kernel_context_is_current(i915);
/*
* Immediately park the GPU so that we enable powersaving and
* treat it as idle. The next time we issue a request, we will
* unpark and start using the engine->pinned_default_state, otherwise
* it is in limbo and an early reset may fail.
*/
__i915_gem_park(i915);
for_each_engine(engine, i915, id) {
struct i915_vma *state;
void *vaddr;
GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
state = to_intel_context(ctx, engine)->state;
if (!state)
continue;
/*
* As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the
* object will hold onto its vma (making it possible for a
* stray GTT write to corrupt our defaults). Unmap the vma
* from the GTT to prevent such accidents and reclaim the
* space.
*/
err = i915_vma_unbind(state);
if (err)
goto err_active;
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
if (err)
goto err_active;
engine->default_state = i915_gem_object_get(state->obj);
/* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(engine->default_state,
I915_MAP_FORCE_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_active;
}
i915_gem_object_unpin_map(engine->default_state);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
unsigned int found = intel_engines_has_context_isolation(i915);
/*
* Make sure that classes with multiple engine instances all
* share the same basic configuration.
*/
for_each_engine(engine, i915, id) {
unsigned int bit = BIT(engine->uabi_class);
unsigned int expected = engine->default_state ? bit : 0;
if ((found & bit) != expected) {
DRM_ERROR("mismatching default context state for class %d on engine %s\n",
engine->uabi_class, engine->name);
}
}
}
out_ctx:
i915_gem_context_set_closed(ctx);
i915_gem_context_put(ctx);
return err;
err_active:
/*
* If we have to abandon now, we expect the engines to be idle
* and ready to be torn-down. First try to flush any remaining
* request, ensure we are pointing at the kernel context and
* then remove it.
*/
if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
goto out_ctx;
drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk
2018-07-09 20:20:42 +08:00
if (WARN_ON(i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT)))
goto out_ctx;
i915_gem_contexts_lost(i915);
goto out_ctx;
}
static int
i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
obj = i915_gem_object_create_stolen(i915, size);
if (!obj)
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
}
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
goto err_unref;
i915->gt.scratch = vma;
return 0;
err_unref:
i915_gem_object_put(obj);
return ret;
}
static void i915_gem_fini_scratch(struct drm_i915_private *i915)
{
i915_vma_unpin_and_release(&i915->gt.scratch, 0);
}
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
drm/i915: Update reset path to fix incomplete requests Update reset path in preparation for engine reset which requires identification of incomplete requests and associated context and fixing their state so that engine can resume correctly after reset. The request that caused the hang will be skipped and head is reset to the start of breadcrumb. This allows us to resume from where we left-off. Since this request didn't complete normally we also need to cleanup elsp queue manually. This is vital if we employ nonblocking request submission where we may have a web of dependencies upon the hung request and so advancing the seqno manually is no longer trivial. ABI: gem_reset_stats / DRM_IOCTL_I915_GET_RESET_STATS We change the way we count pending batches. Only the active context involved in the reset is marked as either innocent or guilty, and not mark the entire world as pending. By inspection this only affects igt/gem_reset_stats (which assumes implementation details) and not piglit. ARB_robustness gives this guide on how we expect the user of this interface to behave: * Provide a mechanism for an OpenGL application to learn about graphics resets that affect the context. When a graphics reset occurs, the OpenGL context becomes unusable and the application must create a new context to continue operation. Detecting a graphics reset happens through an inexpensive query. And with regards to the actual meaning of the reset values: Certain events can result in a reset of the GL context. Such a reset causes all context state to be lost. Recovery from such events requires recreation of all objects in the affected context. The current status of the graphics reset state is returned by enum GetGraphicsResetStatusARB(); The symbolic constant returned indicates if the GL context has been in a reset state at any point since the last call to GetGraphicsResetStatusARB. NO_ERROR indicates that the GL context has not been in a reset state since the last call. GUILTY_CONTEXT_RESET_ARB indicates that a reset has been detected that is attributable to the current GL context. INNOCENT_CONTEXT_RESET_ARB indicates a reset has been detected that is not attributable to the current GL context. UNKNOWN_CONTEXT_RESET_ARB indicates a detected graphics reset whose cause is unknown. The language here is explicit in that we must mark up the guilty batch, but is loose enough for us to relax the innocent (i.e. pending) accounting as only the active batches are involved with the reset. In the future, we are looking towards single engine resetting (with minimal locking), where it seems inappropriate to mark the entire world as innocent since the reset occurred on a different engine. Reducing the information available means we only have to encounter the pain once, and also reduces the information leaking from one context to another. v2: Legacy ringbuffer submission required a reset following hibernation, or else we restore stale values to the RING_HEAD and walked over stolen garbage. v3: GuC requires replaying the requests after a reset. v4: Restore engine IRQ after reset (so waiters will be woken!) Rearm hangcheck if resetting with a waiter. Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Arun Siluvery <arun.siluvery@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-13-chris@chris-wilson.co.uk
2016-09-09 21:11:53 +08:00
dev_priv->gt.resume = intel_lr_context_resume;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
} else {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
}
drm/i915: Call i915_gem_init_userptr() before taking struct_mutex We don't need struct_mutex to initialise userptr (it just allocates a workqueue for itself etc), but we do need struct_mutex later on in i915_gem_init() in order to feed requests onto the HW. This should break the chain [ 385.697902] ====================================================== [ 385.697907] WARNING: possible circular locking dependency detected [ 385.697913] 4.14.0-CI-Patchwork_7234+ #1 Tainted: G U [ 385.697917] ------------------------------------------------------ [ 385.697922] perf_pmu/2631 is trying to acquire lock: [ 385.697927] (&mm->mmap_sem){++++}, at: [<ffffffff811bfe1e>] __might_fault+0x3e/0x90 [ 385.697941] but task is already holding lock: [ 385.697946] (&cpuctx_mutex){+.+.}, at: [<ffffffff8116fe8c>] perf_event_ctx_lock_nested+0xbc/0x1d0 [ 385.697957] which lock already depends on the new lock. [ 385.697963] the existing dependency chain (in reverse order) is: [ 385.697970] -> #4 (&cpuctx_mutex){+.+.}: [ 385.697980] __mutex_lock+0x86/0x9b0 [ 385.697985] perf_event_init_cpu+0x5a/0x90 [ 385.697991] perf_event_init+0x178/0x1a4 [ 385.697997] start_kernel+0x27f/0x3f1 [ 385.698003] verify_cpu+0x0/0xfb [ 385.698006] -> #3 (pmus_lock){+.+.}: [ 385.698015] __mutex_lock+0x86/0x9b0 [ 385.698020] perf_event_init_cpu+0x21/0x90 [ 385.698025] cpuhp_invoke_callback+0xca/0xc00 [ 385.698030] _cpu_up+0xa7/0x170 [ 385.698035] do_cpu_up+0x57/0x70 [ 385.698039] smp_init+0x62/0xa6 [ 385.698044] kernel_init_freeable+0x97/0x193 [ 385.698050] kernel_init+0xa/0x100 [ 385.698055] ret_from_fork+0x27/0x40 [ 385.698058] -> #2 (cpu_hotplug_lock.rw_sem){++++}: [ 385.698068] cpus_read_lock+0x39/0xa0 [ 385.698073] apply_workqueue_attrs+0x12/0x50 [ 385.698078] __alloc_workqueue_key+0x1d8/0x4d8 [ 385.698134] i915_gem_init_userptr+0x5f/0x80 [i915] [ 385.698176] i915_gem_init+0x7c/0x390 [i915] [ 385.698213] i915_driver_load+0x99e/0x15c0 [i915] [ 385.698250] i915_pci_probe+0x33/0x90 [i915] [ 385.698256] pci_device_probe+0xa1/0x130 [ 385.698262] driver_probe_device+0x293/0x440 [ 385.698267] __driver_attach+0xde/0xe0 [ 385.698272] bus_for_each_dev+0x5c/0x90 [ 385.698277] bus_add_driver+0x16d/0x260 [ 385.698282] driver_register+0x57/0xc0 [ 385.698287] do_one_initcall+0x3e/0x160 [ 385.698292] do_init_module+0x5b/0x1fa [ 385.698297] load_module+0x2374/0x2dc0 [ 385.698302] SyS_finit_module+0xaa/0xe0 [ 385.698307] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 385.698311] -> #1 (&dev->struct_mutex){+.+.}: [ 385.698320] __mutex_lock+0x86/0x9b0 [ 385.698361] i915_mutex_lock_interruptible+0x4c/0x130 [i915] [ 385.698403] i915_gem_fault+0x206/0x760 [i915] [ 385.698409] __do_fault+0x1a/0x70 [ 385.698413] __handle_mm_fault+0x7c4/0xdb0 [ 385.698417] handle_mm_fault+0x154/0x300 [ 385.698440] __do_page_fault+0x2d6/0x570 [ 385.698445] page_fault+0x22/0x30 [ 385.698449] -> #0 (&mm->mmap_sem){++++}: [ 385.698459] lock_acquire+0xaf/0x200 [ 385.698464] __might_fault+0x68/0x90 [ 385.698470] _copy_to_user+0x1e/0x70 [ 385.698475] perf_read+0x1aa/0x290 [ 385.698480] __vfs_read+0x23/0x120 [ 385.698484] vfs_read+0xa3/0x150 [ 385.698488] SyS_read+0x45/0xb0 [ 385.698493] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 385.698497] other info that might help us debug this: [ 385.698505] Chain exists of: &mm->mmap_sem --> pmus_lock --> &cpuctx_mutex [ 385.698517] Possible unsafe locking scenario: [ 385.698522] CPU0 CPU1 [ 385.698526] ---- ---- [ 385.698529] lock(&cpuctx_mutex); [ 385.698553] lock(pmus_lock); [ 385.698558] lock(&cpuctx_mutex); [ 385.698564] lock(&mm->mmap_sem); [ 385.698568] *** DEADLOCK *** [ 385.698574] 1 lock held by perf_pmu/2631: [ 385.698578] #0: (&cpuctx_mutex){+.+.}, at: [<ffffffff8116fe8c>] perf_event_ctx_lock_nested+0xbc/0x1d0 [ 385.698589] stack backtrace: [ 385.698595] CPU: 3 PID: 2631 Comm: perf_pmu Tainted: G U 4.14.0-CI-Patchwork_7234+ #1 [ 385.698602] Hardware name: /NUC6CAYB, BIOS AYAPLCEL.86A.0040.2017.0619.1722 06/19/2017 [ 385.698609] Call Trace: [ 385.698615] dump_stack+0x5f/0x86 [ 385.698621] print_circular_bug.isra.18+0x1d0/0x2c0 [ 385.698627] __lock_acquire+0x19c3/0x1b60 [ 385.698634] ? generic_exec_single+0x77/0xe0 [ 385.698640] ? lock_acquire+0xaf/0x200 [ 385.698644] lock_acquire+0xaf/0x200 [ 385.698650] ? __might_fault+0x3e/0x90 [ 385.698655] __might_fault+0x68/0x90 [ 385.698660] ? __might_fault+0x3e/0x90 [ 385.698665] _copy_to_user+0x1e/0x70 [ 385.698670] perf_read+0x1aa/0x290 [ 385.698675] __vfs_read+0x23/0x120 [ 385.698682] ? __fget+0x101/0x1f0 [ 385.698686] vfs_read+0xa3/0x150 [ 385.698691] SyS_read+0x45/0xb0 [ 385.698696] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 385.698701] RIP: 0033:0x7ff1c46876ed [ 385.698705] RSP: 002b:00007fff13552f90 EFLAGS: 00000293 ORIG_RAX: 0000000000000000 [ 385.698712] RAX: ffffffffffffffda RBX: ffffc90000647ff0 RCX: 00007ff1c46876ed [ 385.698718] RDX: 0000000000000010 RSI: 00007fff13552fa0 RDI: 0000000000000005 [ 385.698723] RBP: 000056063d300580 R08: 0000000000000000 R09: 0000000000000060 [ 385.698729] R10: 0000000000000000 R11: 0000000000000293 R12: 0000000000000046 [ 385.698734] R13: 00007fff13552c6f R14: 00007ff1c6279d00 R15: 00007ff1c6279a40 Testcase: igt/perf_pmu Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171122172621.16158-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-11-23 01:26:21 +08:00
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
ret = intel_uc_init_misc(dev_priv);
drm/i915: Implement dynamic GuC WOPCM offset and size calculation Hardware may have specific restrictions on GuC WOPCM offset and size. On Gen9, the value of the GuC WOPCM size register needs to be larger than the value of GuC WOPCM offset register + a Gen9 specific offset (144KB) for reserved GuC WOPCM. Fail to enforce such a restriction on GuC WOPCM size will lead to GuC firmware execution failures. On the other hand, with current static GuC WOPCM offset and size values (512KB for both offset and size), the GuC WOPCM size verification will fail on Gen9 even if it can be fixed by lowering the GuC WOPCM offset by calculating its value based on HuC firmware size (which is likely less than 200KB on Gen9), so that we can have a GuC WOPCM size value which is large enough to pass the GuC WOPCM size check. This patch updates the reserved GuC WOPCM size for RC6 context on Gen9 to 24KB to strictly align with the Gen9 GuC WOPCM layout. It also adds support to verify the GuC WOPCM size aganist the Gen9 hardware restrictions. To meet all above requirements, let's provide dynamic partitioning of the WOPCM that will be based on platform specific HuC/GuC firmware sizes. v2: - Removed intel_wopcm_init (Ville/Sagar/Joonas) - Renamed and Moved the intel_wopcm_partition into intel_guc (Sagar) - Removed unnecessary function calls (Joonas) - Init GuC WOPCM partition as soon as firmware fetching is completed v3: - Fixed indentation issues (Chris) - Removed layering violation code (Chris/Michal) - Created separat files for GuC wopcm code (Michal) - Used inline function to avoid code duplication (Michal) v4: - Preset the GuC WOPCM top during early GuC init (Chris) - Fail intel_uc_init_hw() as soon as GuC WOPCM partitioning failed v5: - Moved GuC DMA WOPCM register updating code into intel_wopcm.c - Took care of the locking status before writing to GuC DMA Write-Once registers. (Joonas) v6: - Made sure the GuC WOPCM size to be multiple of 4K (4K aligned) v8: - Updated comments and fixed naming issues (Sagar/Joonas) - Updated commit message to include more description about the hardware restriction on GuC WOPCM size (Sagar) v9: - Minor changes variable names and code comments (Sagar) - Added detailed GuC WOPCM layout drawing (Sagar/Michal) - Refined macro definitions to be reader friendly (Michal) - Removed redundent check to valid flag (Michal) - Unified first parameter for exported GuC WOPCM functions (Michal) - Refined the name and parameter list of hardware restriction checking functions (Michal) v10: - Used shorter function name for internal functions (Joonas) - Moved init-ealry function into c file (Joonas) - Consolidated and removed redundant size checks (Joonas/Michal) - Removed unnecessary unlikely() from code which is only called once during boot (Joonas) - More fixes to kernel-doc format and content (Michal) - Avoided the use of PAGE_MASK for 4K pages (Michal) - Added error log messages to error paths (Michal) v11: - Replaced intel_guc_wopcm with more generic intel_wopcm and attached intel_wopcm to drm_i915_private instead intel_guc (Michal) - dynamic calculation of GuC non-wopcm memory start (a.k.a WOPCM Top offset from GuC WOPCM base) (Michal) - Moved WOPCM marco definitions into .c source file (Michal) - Exported WOPCM layout diagram as kernel-doc (Michal) v12: - Updated naming, function kernel-doc to align with new changes (Michal) v13: - Updated the ordering of s-o-b/cc/r-b tags (Sagar) - Corrected one tense error in comment (Sagar) - Corrected typos and removed spurious comments (Joonas) Bspec: 12690 Signed-off-by: Jackie Li <yaodong.li@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com> Cc: Sujaritha Sundaresan <sujaritha.sundaresan@intel.com> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: John Spotswood <john.a.spotswood@intel.com> Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com> (v8) Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v9) Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> (v11) Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v12) Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1520987574-19351-2-git-send-email-yaodong.li@intel.com
2018-03-14 08:32:50 +08:00
if (ret)
return ret;
ret = intel_wopcm_init(&dev_priv->wopcm);
drm/i915/guc: Move GuC workqueue allocations outside of the mutex This gets rid of the following lockdep splat: ====================================================== WARNING: possible circular locking dependency detected 4.15.0-rc2-CI-Patchwork_7428+ #1 Not tainted ------------------------------------------------------ debugfs_test/1351 is trying to acquire lock: (&dev->struct_mutex){+.+.}, at: [<000000009d90d1a3>] i915_mutex_lock_interruptible+0x47/0x130 [i915] but task is already holding lock: (&mm->mmap_sem){++++}, at: [<000000005df01c1e>] __do_page_fault+0x106/0x560 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #6 (&mm->mmap_sem){++++}: __might_fault+0x63/0x90 _copy_to_user+0x1e/0x70 filldir+0x8c/0xf0 dcache_readdir+0xeb/0x160 iterate_dir+0xe6/0x150 SyS_getdents+0xa0/0x130 entry_SYSCALL_64_fastpath+0x1c/0x89 -> #5 (&sb->s_type->i_mutex_key#5){++++}: lockref_get+0x9/0x20 -> #4 ((completion)&req.done){+.+.}: wait_for_common+0x54/0x210 devtmpfs_create_node+0x130/0x150 device_add+0x5ad/0x5e0 device_create_groups_vargs+0xd4/0xe0 device_create+0x35/0x40 msr_device_create+0x22/0x40 cpuhp_invoke_callback+0xc5/0xbf0 cpuhp_thread_fun+0x167/0x210 smpboot_thread_fn+0x17f/0x270 kthread+0x173/0x1b0 ret_from_fork+0x24/0x30 -> #3 (cpuhp_state-up){+.+.}: cpuhp_issue_call+0x132/0x1c0 __cpuhp_setup_state_cpuslocked+0x12f/0x2a0 __cpuhp_setup_state+0x3a/0x50 page_writeback_init+0x3a/0x5c start_kernel+0x393/0x3e2 secondary_startup_64+0xa5/0xb0 -> #2 (cpuhp_state_mutex){+.+.}: __mutex_lock+0x81/0x9b0 __cpuhp_setup_state_cpuslocked+0x4b/0x2a0 __cpuhp_setup_state+0x3a/0x50 page_alloc_init+0x1f/0x26 start_kernel+0x139/0x3e2 secondary_startup_64+0xa5/0xb0 -> #1 (cpu_hotplug_lock.rw_sem){++++}: cpus_read_lock+0x34/0xa0 apply_workqueue_attrs+0xd/0x40 __alloc_workqueue_key+0x2c7/0x4e1 intel_guc_submission_init+0x10c/0x650 [i915] intel_uc_init_hw+0x29e/0x460 [i915] i915_gem_init_hw+0xca/0x290 [i915] i915_gem_init+0x115/0x3a0 [i915] i915_driver_load+0x9a8/0x16c0 [i915] i915_pci_probe+0x2e/0x90 [i915] pci_device_probe+0x9c/0x120 driver_probe_device+0x2a3/0x480 __driver_attach+0xd9/0xe0 bus_for_each_dev+0x57/0x90 bus_add_driver+0x168/0x260 driver_register+0x52/0xc0 do_one_initcall+0x39/0x150 do_init_module+0x56/0x1ef load_module+0x231c/0x2d70 SyS_finit_module+0xa5/0xe0 entry_SYSCALL_64_fastpath+0x1c/0x89 -> #0 (&dev->struct_mutex){+.+.}: lock_acquire+0xaf/0x200 __mutex_lock+0x81/0x9b0 i915_mutex_lock_interruptible+0x47/0x130 [i915] i915_gem_fault+0x201/0x760 [i915] __do_fault+0x15/0x70 __handle_mm_fault+0x85b/0xe40 handle_mm_fault+0x14f/0x2f0 __do_page_fault+0x2d1/0x560 page_fault+0x22/0x30 other info that might help us debug this: Chain exists of: &dev->struct_mutex --> &sb->s_type->i_mutex_key#5 --> &mm->mmap_sem Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&mm->mmap_sem); lock(&sb->s_type->i_mutex_key#5); lock(&mm->mmap_sem); lock(&dev->struct_mutex); *** DEADLOCK *** 1 lock held by debugfs_test/1351: #0: (&mm->mmap_sem){++++}, at: [<000000005df01c1e>] __do_page_fault+0x106/0x560 stack backtrace: CPU: 2 PID: 1351 Comm: debugfs_test Not tainted 4.15.0-rc2-CI-Patchwork_7428+ #1 Hardware name: /NUC6i5SYB, BIOS SYSKLi35.86A.0057.2017.0119.1758 01/19/2017 Call Trace: dump_stack+0x5f/0x86 print_circular_bug+0x230/0x3b0 check_prev_add+0x439/0x7b0 ? lockdep_init_map_crosslock+0x20/0x20 ? unwind_get_return_address+0x16/0x30 ? __lock_acquire+0x1385/0x15a0 __lock_acquire+0x1385/0x15a0 lock_acquire+0xaf/0x200 ? i915_mutex_lock_interruptible+0x47/0x130 [i915] __mutex_lock+0x81/0x9b0 ? i915_mutex_lock_interruptible+0x47/0x130 [i915] ? i915_mutex_lock_interruptible+0x47/0x130 [i915] ? i915_mutex_lock_interruptible+0x47/0x130 [i915] i915_mutex_lock_interruptible+0x47/0x130 [i915] ? __pm_runtime_resume+0x4f/0x80 i915_gem_fault+0x201/0x760 [i915] __do_fault+0x15/0x70 __handle_mm_fault+0x85b/0xe40 handle_mm_fault+0x14f/0x2f0 __do_page_fault+0x2d1/0x560 page_fault+0x22/0x30 RIP: 0033:0x7f98d6f49116 RSP: 002b:00007ffd6ffc3278 EFLAGS: 00010283 RAX: 00007f98d39a2bc0 RBX: 0000000000000000 RCX: 0000000000001680 RDX: 0000000000001680 RSI: 00007ffd6ffc3400 RDI: 00007f98d39a2bc0 RBP: 00007ffd6ffc33a0 R08: 0000000000000000 R09: 00000000000005a0 R10: 000055e847c2a830 R11: 0000000000000002 R12: 0000000000000001 R13: 000055e847c1d040 R14: 00007ffd6ffc3400 R15: 00007f98d6752ba0 v2: Init preempt_work unconditionally (Chris) v3: Mention that we need the enable_guc=1 for lockdep splat (Chris) Testcase: igt/debugfs_test/read_all_entries # with i915.enable_guc=1 Signed-off-by: Michał Winiarski <michal.winiarski@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171213221352.7173-2-michal.winiarski@intel.com
2017-12-14 06:13:47 +08:00
if (ret)
goto err_uc_misc;
drm/i915/guc: Move GuC workqueue allocations outside of the mutex This gets rid of the following lockdep splat: ====================================================== WARNING: possible circular locking dependency detected 4.15.0-rc2-CI-Patchwork_7428+ #1 Not tainted ------------------------------------------------------ debugfs_test/1351 is trying to acquire lock: (&dev->struct_mutex){+.+.}, at: [<000000009d90d1a3>] i915_mutex_lock_interruptible+0x47/0x130 [i915] but task is already holding lock: (&mm->mmap_sem){++++}, at: [<000000005df01c1e>] __do_page_fault+0x106/0x560 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #6 (&mm->mmap_sem){++++}: __might_fault+0x63/0x90 _copy_to_user+0x1e/0x70 filldir+0x8c/0xf0 dcache_readdir+0xeb/0x160 iterate_dir+0xe6/0x150 SyS_getdents+0xa0/0x130 entry_SYSCALL_64_fastpath+0x1c/0x89 -> #5 (&sb->s_type->i_mutex_key#5){++++}: lockref_get+0x9/0x20 -> #4 ((completion)&req.done){+.+.}: wait_for_common+0x54/0x210 devtmpfs_create_node+0x130/0x150 device_add+0x5ad/0x5e0 device_create_groups_vargs+0xd4/0xe0 device_create+0x35/0x40 msr_device_create+0x22/0x40 cpuhp_invoke_callback+0xc5/0xbf0 cpuhp_thread_fun+0x167/0x210 smpboot_thread_fn+0x17f/0x270 kthread+0x173/0x1b0 ret_from_fork+0x24/0x30 -> #3 (cpuhp_state-up){+.+.}: cpuhp_issue_call+0x132/0x1c0 __cpuhp_setup_state_cpuslocked+0x12f/0x2a0 __cpuhp_setup_state+0x3a/0x50 page_writeback_init+0x3a/0x5c start_kernel+0x393/0x3e2 secondary_startup_64+0xa5/0xb0 -> #2 (cpuhp_state_mutex){+.+.}: __mutex_lock+0x81/0x9b0 __cpuhp_setup_state_cpuslocked+0x4b/0x2a0 __cpuhp_setup_state+0x3a/0x50 page_alloc_init+0x1f/0x26 start_kernel+0x139/0x3e2 secondary_startup_64+0xa5/0xb0 -> #1 (cpu_hotplug_lock.rw_sem){++++}: cpus_read_lock+0x34/0xa0 apply_workqueue_attrs+0xd/0x40 __alloc_workqueue_key+0x2c7/0x4e1 intel_guc_submission_init+0x10c/0x650 [i915] intel_uc_init_hw+0x29e/0x460 [i915] i915_gem_init_hw+0xca/0x290 [i915] i915_gem_init+0x115/0x3a0 [i915] i915_driver_load+0x9a8/0x16c0 [i915] i915_pci_probe+0x2e/0x90 [i915] pci_device_probe+0x9c/0x120 driver_probe_device+0x2a3/0x480 __driver_attach+0xd9/0xe0 bus_for_each_dev+0x57/0x90 bus_add_driver+0x168/0x260 driver_register+0x52/0xc0 do_one_initcall+0x39/0x150 do_init_module+0x56/0x1ef load_module+0x231c/0x2d70 SyS_finit_module+0xa5/0xe0 entry_SYSCALL_64_fastpath+0x1c/0x89 -> #0 (&dev->struct_mutex){+.+.}: lock_acquire+0xaf/0x200 __mutex_lock+0x81/0x9b0 i915_mutex_lock_interruptible+0x47/0x130 [i915] i915_gem_fault+0x201/0x760 [i915] __do_fault+0x15/0x70 __handle_mm_fault+0x85b/0xe40 handle_mm_fault+0x14f/0x2f0 __do_page_fault+0x2d1/0x560 page_fault+0x22/0x30 other info that might help us debug this: Chain exists of: &dev->struct_mutex --> &sb->s_type->i_mutex_key#5 --> &mm->mmap_sem Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&mm->mmap_sem); lock(&sb->s_type->i_mutex_key#5); lock(&mm->mmap_sem); lock(&dev->struct_mutex); *** DEADLOCK *** 1 lock held by debugfs_test/1351: #0: (&mm->mmap_sem){++++}, at: [<000000005df01c1e>] __do_page_fault+0x106/0x560 stack backtrace: CPU: 2 PID: 1351 Comm: debugfs_test Not tainted 4.15.0-rc2-CI-Patchwork_7428+ #1 Hardware name: /NUC6i5SYB, BIOS SYSKLi35.86A.0057.2017.0119.1758 01/19/2017 Call Trace: dump_stack+0x5f/0x86 print_circular_bug+0x230/0x3b0 check_prev_add+0x439/0x7b0 ? lockdep_init_map_crosslock+0x20/0x20 ? unwind_get_return_address+0x16/0x30 ? __lock_acquire+0x1385/0x15a0 __lock_acquire+0x1385/0x15a0 lock_acquire+0xaf/0x200 ? i915_mutex_lock_interruptible+0x47/0x130 [i915] __mutex_lock+0x81/0x9b0 ? i915_mutex_lock_interruptible+0x47/0x130 [i915] ? i915_mutex_lock_interruptible+0x47/0x130 [i915] ? i915_mutex_lock_interruptible+0x47/0x130 [i915] i915_mutex_lock_interruptible+0x47/0x130 [i915] ? __pm_runtime_resume+0x4f/0x80 i915_gem_fault+0x201/0x760 [i915] __do_fault+0x15/0x70 __handle_mm_fault+0x85b/0xe40 handle_mm_fault+0x14f/0x2f0 __do_page_fault+0x2d1/0x560 page_fault+0x22/0x30 RIP: 0033:0x7f98d6f49116 RSP: 002b:00007ffd6ffc3278 EFLAGS: 00010283 RAX: 00007f98d39a2bc0 RBX: 0000000000000000 RCX: 0000000000001680 RDX: 0000000000001680 RSI: 00007ffd6ffc3400 RDI: 00007f98d39a2bc0 RBP: 00007ffd6ffc33a0 R08: 0000000000000000 R09: 00000000000005a0 R10: 000055e847c2a830 R11: 0000000000000002 R12: 0000000000000001 R13: 000055e847c1d040 R14: 00007ffd6ffc3400 R15: 00007f98d6752ba0 v2: Init preempt_work unconditionally (Chris) v3: Mention that we need the enable_guc=1 for lockdep splat (Chris) Testcase: igt/debugfs_test/read_all_entries # with i915.enable_guc=1 Signed-off-by: Michał Winiarski <michal.winiarski@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171213221352.7173-2-michal.winiarski@intel.com
2017-12-14 06:13:47 +08:00
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If
* we hold the forcewake during initialisation these problems
* just magically go away.
*/
drm/i915: Call i915_gem_init_userptr() before taking struct_mutex We don't need struct_mutex to initialise userptr (it just allocates a workqueue for itself etc), but we do need struct_mutex later on in i915_gem_init() in order to feed requests onto the HW. This should break the chain [ 385.697902] ====================================================== [ 385.697907] WARNING: possible circular locking dependency detected [ 385.697913] 4.14.0-CI-Patchwork_7234+ #1 Tainted: G U [ 385.697917] ------------------------------------------------------ [ 385.697922] perf_pmu/2631 is trying to acquire lock: [ 385.697927] (&mm->mmap_sem){++++}, at: [<ffffffff811bfe1e>] __might_fault+0x3e/0x90 [ 385.697941] but task is already holding lock: [ 385.697946] (&cpuctx_mutex){+.+.}, at: [<ffffffff8116fe8c>] perf_event_ctx_lock_nested+0xbc/0x1d0 [ 385.697957] which lock already depends on the new lock. [ 385.697963] the existing dependency chain (in reverse order) is: [ 385.697970] -> #4 (&cpuctx_mutex){+.+.}: [ 385.697980] __mutex_lock+0x86/0x9b0 [ 385.697985] perf_event_init_cpu+0x5a/0x90 [ 385.697991] perf_event_init+0x178/0x1a4 [ 385.697997] start_kernel+0x27f/0x3f1 [ 385.698003] verify_cpu+0x0/0xfb [ 385.698006] -> #3 (pmus_lock){+.+.}: [ 385.698015] __mutex_lock+0x86/0x9b0 [ 385.698020] perf_event_init_cpu+0x21/0x90 [ 385.698025] cpuhp_invoke_callback+0xca/0xc00 [ 385.698030] _cpu_up+0xa7/0x170 [ 385.698035] do_cpu_up+0x57/0x70 [ 385.698039] smp_init+0x62/0xa6 [ 385.698044] kernel_init_freeable+0x97/0x193 [ 385.698050] kernel_init+0xa/0x100 [ 385.698055] ret_from_fork+0x27/0x40 [ 385.698058] -> #2 (cpu_hotplug_lock.rw_sem){++++}: [ 385.698068] cpus_read_lock+0x39/0xa0 [ 385.698073] apply_workqueue_attrs+0x12/0x50 [ 385.698078] __alloc_workqueue_key+0x1d8/0x4d8 [ 385.698134] i915_gem_init_userptr+0x5f/0x80 [i915] [ 385.698176] i915_gem_init+0x7c/0x390 [i915] [ 385.698213] i915_driver_load+0x99e/0x15c0 [i915] [ 385.698250] i915_pci_probe+0x33/0x90 [i915] [ 385.698256] pci_device_probe+0xa1/0x130 [ 385.698262] driver_probe_device+0x293/0x440 [ 385.698267] __driver_attach+0xde/0xe0 [ 385.698272] bus_for_each_dev+0x5c/0x90 [ 385.698277] bus_add_driver+0x16d/0x260 [ 385.698282] driver_register+0x57/0xc0 [ 385.698287] do_one_initcall+0x3e/0x160 [ 385.698292] do_init_module+0x5b/0x1fa [ 385.698297] load_module+0x2374/0x2dc0 [ 385.698302] SyS_finit_module+0xaa/0xe0 [ 385.698307] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 385.698311] -> #1 (&dev->struct_mutex){+.+.}: [ 385.698320] __mutex_lock+0x86/0x9b0 [ 385.698361] i915_mutex_lock_interruptible+0x4c/0x130 [i915] [ 385.698403] i915_gem_fault+0x206/0x760 [i915] [ 385.698409] __do_fault+0x1a/0x70 [ 385.698413] __handle_mm_fault+0x7c4/0xdb0 [ 385.698417] handle_mm_fault+0x154/0x300 [ 385.698440] __do_page_fault+0x2d6/0x570 [ 385.698445] page_fault+0x22/0x30 [ 385.698449] -> #0 (&mm->mmap_sem){++++}: [ 385.698459] lock_acquire+0xaf/0x200 [ 385.698464] __might_fault+0x68/0x90 [ 385.698470] _copy_to_user+0x1e/0x70 [ 385.698475] perf_read+0x1aa/0x290 [ 385.698480] __vfs_read+0x23/0x120 [ 385.698484] vfs_read+0xa3/0x150 [ 385.698488] SyS_read+0x45/0xb0 [ 385.698493] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 385.698497] other info that might help us debug this: [ 385.698505] Chain exists of: &mm->mmap_sem --> pmus_lock --> &cpuctx_mutex [ 385.698517] Possible unsafe locking scenario: [ 385.698522] CPU0 CPU1 [ 385.698526] ---- ---- [ 385.698529] lock(&cpuctx_mutex); [ 385.698553] lock(pmus_lock); [ 385.698558] lock(&cpuctx_mutex); [ 385.698564] lock(&mm->mmap_sem); [ 385.698568] *** DEADLOCK *** [ 385.698574] 1 lock held by perf_pmu/2631: [ 385.698578] #0: (&cpuctx_mutex){+.+.}, at: [<ffffffff8116fe8c>] perf_event_ctx_lock_nested+0xbc/0x1d0 [ 385.698589] stack backtrace: [ 385.698595] CPU: 3 PID: 2631 Comm: perf_pmu Tainted: G U 4.14.0-CI-Patchwork_7234+ #1 [ 385.698602] Hardware name: /NUC6CAYB, BIOS AYAPLCEL.86A.0040.2017.0619.1722 06/19/2017 [ 385.698609] Call Trace: [ 385.698615] dump_stack+0x5f/0x86 [ 385.698621] print_circular_bug.isra.18+0x1d0/0x2c0 [ 385.698627] __lock_acquire+0x19c3/0x1b60 [ 385.698634] ? generic_exec_single+0x77/0xe0 [ 385.698640] ? lock_acquire+0xaf/0x200 [ 385.698644] lock_acquire+0xaf/0x200 [ 385.698650] ? __might_fault+0x3e/0x90 [ 385.698655] __might_fault+0x68/0x90 [ 385.698660] ? __might_fault+0x3e/0x90 [ 385.698665] _copy_to_user+0x1e/0x70 [ 385.698670] perf_read+0x1aa/0x290 [ 385.698675] __vfs_read+0x23/0x120 [ 385.698682] ? __fget+0x101/0x1f0 [ 385.698686] vfs_read+0xa3/0x150 [ 385.698691] SyS_read+0x45/0xb0 [ 385.698696] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 385.698701] RIP: 0033:0x7ff1c46876ed [ 385.698705] RSP: 002b:00007fff13552f90 EFLAGS: 00000293 ORIG_RAX: 0000000000000000 [ 385.698712] RAX: ffffffffffffffda RBX: ffffc90000647ff0 RCX: 00007ff1c46876ed [ 385.698718] RDX: 0000000000000010 RSI: 00007fff13552fa0 RDI: 0000000000000005 [ 385.698723] RBP: 000056063d300580 R08: 0000000000000000 R09: 0000000000000060 [ 385.698729] R10: 0000000000000000 R11: 0000000000000293 R12: 0000000000000046 [ 385.698734] R13: 00007fff13552c6f R14: 00007ff1c6279d00 R15: 00007ff1c6279a40 Testcase: igt/perf_pmu Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171122172621.16158-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-11-23 01:26:21 +08:00
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = i915_gem_init_ggtt(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
}
ret = i915_gem_init_scratch(dev_priv,
IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
ret = i915_gem_contexts_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_scratch;
}
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_context;
}
intel_init_gt_powersave(dev_priv);
ret = intel_uc_init(dev_priv);
if (ret)
goto err_pm;
ret = i915_gem_init_hw(dev_priv);
if (ret)
goto err_uc_init;
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
* GT power context workaround. Worse, sometimes it includes a context
* register workaround which we need to apply before we record the
* default HW state for all contexts.
*
* FIXME: break up the workarounds and apply them at the right time!
*/
intel_init_clock_gating(dev_priv);
ret = __intel_engines_record_defaults(dev_priv);
if (ret)
goto err_init_hw;
if (i915_inject_load_failure()) {
ret = -ENODEV;
goto err_init_hw;
}
if (i915_inject_load_failure()) {
ret = -EIO;
goto err_init_hw;
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
/*
* Unwinding is complicated by that we want to handle -EIO to mean
* disable GPU submission but keep KMS alive. We want to mark the
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
err_init_hw:
mutex_unlock(&dev_priv->drm.struct_mutex);
WARN_ON(i915_gem_suspend(dev_priv));
i915_gem_suspend_late(dev_priv);
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
err_pm:
if (ret != -EIO) {
intel_cleanup_gt_powersave(dev_priv);
i915_gem_cleanup_engines(dev_priv);
}
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
err_scratch:
i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_uc_misc:
drm/i915/guc: Fix lockdep due to log relay channel handling under struct_mutex This patch fixes lockdep issue due to circular locking dependency of struct_mutex, i_mutex_key, mmap_sem, relay_channels_mutex. For GuC log relay channel we create debugfs file that requires i_mutex_key lock and we are doing that under struct_mutex. So we introduced newer dependency as: &dev->struct_mutex --> &sb->s_type->i_mutex_key#3 --> &mm->mmap_sem However, there is dependency from mmap_sem to struct_mutex. Hence we separate the relay create/destroy operation from under struct_mutex. Also added runtime check of relay buffer status. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> ====================================================== WARNING: possible circular locking dependency detected 4.15.0-rc6-CI-Patchwork_7614+ #1 Not tainted ------------------------------------------------------ debugfs_test/1388 is trying to acquire lock: (&dev->struct_mutex){+.+.}, at: [<00000000d5e1d915>] i915_mutex_lock_interruptible+0x47/0x130 [i915] but task is already holding lock: (&mm->mmap_sem){++++}, at: [<0000000029a9c131>] __do_page_fault+0x106/0x560 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (&mm->mmap_sem){++++}: _copy_to_user+0x1e/0x70 filldir+0x8c/0xf0 dcache_readdir+0xeb/0x160 iterate_dir+0xdc/0x140 SyS_getdents+0xa0/0x130 entry_SYSCALL_64_fastpath+0x1c/0x89 -> #2 (&sb->s_type->i_mutex_key#3){++++}: start_creating+0x59/0x110 __debugfs_create_file+0x2e/0xe0 relay_create_buf_file+0x62/0x80 relay_late_setup_files+0x84/0x250 guc_log_late_setup+0x4f/0x110 [i915] i915_guc_log_register+0x32/0x40 [i915] i915_driver_load+0x7b6/0x1720 [i915] i915_pci_probe+0x2e/0x90 [i915] pci_device_probe+0x9c/0x120 driver_probe_device+0x2a3/0x480 __driver_attach+0xd9/0xe0 bus_for_each_dev+0x57/0x90 bus_add_driver+0x168/0x260 driver_register+0x52/0xc0 do_one_initcall+0x39/0x150 do_init_module+0x56/0x1ef load_module+0x231c/0x2d70 SyS_finit_module+0xa5/0xe0 entry_SYSCALL_64_fastpath+0x1c/0x89 -> #1 (relay_channels_mutex){+.+.}: relay_open+0x12c/0x2b0 intel_guc_log_runtime_create+0xab/0x230 [i915] intel_guc_init+0x81/0x120 [i915] intel_uc_init+0x29/0xa0 [i915] i915_gem_init+0x182/0x530 [i915] i915_driver_load+0xaa9/0x1720 [i915] i915_pci_probe+0x2e/0x90 [i915] pci_device_probe+0x9c/0x120 driver_probe_device+0x2a3/0x480 __driver_attach+0xd9/0xe0 bus_for_each_dev+0x57/0x90 bus_add_driver+0x168/0x260 driver_register+0x52/0xc0 do_one_initcall+0x39/0x150 do_init_module+0x56/0x1ef load_module+0x231c/0x2d70 SyS_finit_module+0xa5/0xe0 entry_SYSCALL_64_fastpath+0x1c/0x89 -> #0 (&dev->struct_mutex){+.+.}: __mutex_lock+0x81/0x9b0 i915_mutex_lock_interruptible+0x47/0x130 [i915] i915_gem_fault+0x201/0x790 [i915] __do_fault+0x15/0x70 __handle_mm_fault+0x677/0xdc0 handle_mm_fault+0x14f/0x2f0 __do_page_fault+0x2d1/0x560 page_fault+0x4c/0x60 other info that might help us debug this: Chain exists of: &dev->struct_mutex --> &sb->s_type->i_mutex_key#3 --> &mm->mmap_sem Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&mm->mmap_sem); lock(&sb->s_type->i_mutex_key#3); lock(&mm->mmap_sem); lock(&dev->struct_mutex); *** DEADLOCK *** 1 lock held by debugfs_test/1388: #0: (&mm->mmap_sem){++++}, at: [<0000000029a9c131>] __do_page_fault+0x106/0x560 stack backtrace: CPU: 2 PID: 1388 Comm: debugfs_test Not tainted 4.15.0-rc6-CI-Patchwork_7614+ #1 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./J4205-ITX, BIOS P1.10 09/29/2016 Call Trace: dump_stack+0x5f/0x86 print_circular_bug.isra.18+0x1d0/0x2c0 __lock_acquire+0x14ae/0x1b60 ? lock_acquire+0xaf/0x200 lock_acquire+0xaf/0x200 ? i915_mutex_lock_interruptible+0x47/0x130 [i915] __mutex_lock+0x81/0x9b0 ? i915_mutex_lock_interruptible+0x47/0x130 [i915] ? i915_mutex_lock_interruptible+0x47/0x130 [i915] ? i915_mutex_lock_interruptible+0x47/0x130 [i915] i915_mutex_lock_interruptible+0x47/0x130 [i915] ? __pm_runtime_resume+0x4f/0x80 i915_gem_fault+0x201/0x790 [i915] __do_fault+0x15/0x70 ? _raw_spin_unlock+0x29/0x40 __handle_mm_fault+0x677/0xdc0 handle_mm_fault+0x14f/0x2f0 __do_page_fault+0x2d1/0x560 ? page_fault+0x36/0x60 page_fault+0x4c/0x60 v2: Added lock protection to guc->log.runtime.relay_chan (Chris) Fixed locking inside guc_flush_logs uncovered by new lockdep. v3: Locking guc_read_update_log_buffer entirely with relay_lock. (Chris) Prepared intel_guc_init_early. Moved relay_lock inside relay_create relay_destroy, relay_file_create, guc_read_update_log_buffer. (Michal) Removed struct_mutex lock around guc_log_flush and removed usage of guc_log_has_relay() from runtime_create path as it needs struct_mutex lock. v4: Handle NULL relay sub buffer pointer earlier in read_update_log_buffer (Chris). Fixed comment suffix **/. (Michal) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104693 Testcase: igt/debugfs_test/read_all_entries # with enable_guc=1 and guc_log_level=1 Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Marta Lofstedt <marta.lofstedt@intel.com> Cc: Michal Winiarski <michal.winiarski@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/1516808821-3638-3-git-send-email-sagar.a.kamble@intel.com
2018-01-24 23:46:58 +08:00
intel_uc_fini_misc(dev_priv);
if (ret != -EIO)
i915_gem_cleanup_userptr(dev_priv);
if (ret == -EIO) {
mutex_lock(&dev_priv->drm.struct_mutex);
/*
* Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
i915_load_error(dev_priv,
"Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
}
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_gem_restore_gtt_mappings(dev_priv);
i915_gem_restore_fences(dev_priv);
intel_init_clock_gating(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
i915_gem_drain_freed_objects(dev_priv);
return ret;
}
void i915_gem_fini(struct drm_i915_private *dev_priv)
{
i915_gem_suspend_late(dev_priv);
intel_disable_gt_powersave(dev_priv);
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
2018-12-03 21:33:19 +08:00
intel_wa_list_free(&dev_priv->gt_wa_list);
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->contexts.list));
}
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
i915_gem_sanitize(i915);
}
void
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 01:14:48 +08:00
enum intel_engine_id id;
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 01:14:48 +08:00
for_each_engine(engine, dev_priv, id)
dev_priv->gt.cleanup_engine(engine);
}
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
int i;
if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
dev_priv->num_fence_regs = 32;
else if (INTEL_GEN(dev_priv) >= 4 ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
if (intel_vgpu_active(dev_priv))
dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num));
/* Initialize fence registers to zero */
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
fence->i915 = dev_priv;
fence->id = i;
list_add_tail(&fence->link, &dev_priv->mm.fence_list);
}
i915_gem_restore_fences(dev_priv);
i915_gem_detect_bit_6_swizzle(dev_priv);
}
drm/i915/selftests: Yet another forgotten mock_i915->mm initialiser Move all of the i915->mm initialisation to a private function that can be reused by the mock i915 device to save forgetting any more steps. For example, <7>[ 1542.046332] [IGT] drv_selftest: starting subtest mock_objects <4>[ 1542.123924] Setting dangerous option mock_selftests - tainting kernel <6>[ 1542.167941] i915: Performing mock selftests with st_random_seed=0x246f5ab5 st_timeout=1000 <4>[ 1542.178012] INFO: trying to register non-static key. <4>[ 1542.178027] the code is fine but needs lockdep annotation. <4>[ 1542.178032] turning off the locking correctness validator. <4>[ 1542.178041] CPU: 3 PID: 6008 Comm: kworker/3:7 Tainted: G U 4.14.0-rc8-CI-CI_DRM_3332+ #1 <4>[ 1542.178049] Hardware name: /NUC6CAYB, BIOS AYAPLCEL.86A.0040.2017.0619.1722 06/19/2017 <4>[ 1542.178144] Workqueue: events __i915_gem_free_work [i915] <4>[ 1542.178152] Call Trace: <4>[ 1542.178163] dump_stack+0x68/0x9f <4>[ 1542.178170] register_lock_class+0x3fd/0x580 <4>[ 1542.178177] ? unwind_next_frame+0x14/0x20 <4>[ 1542.178184] ? __save_stack_trace+0x73/0xd0 <4>[ 1542.178191] __lock_acquire+0xa4/0x1b00 <4>[ 1542.178254] ? __i915_gem_free_work+0x28/0xa0 [i915] <4>[ 1542.178261] ? __lock_acquire+0x4ab/0x1b00 <4>[ 1542.178268] lock_acquire+0xb0/0x200 <4>[ 1542.178273] ? lock_acquire+0xb0/0x200 <4>[ 1542.178336] ? __i915_gem_free_work+0x28/0xa0 [i915] <4>[ 1542.178344] _raw_spin_lock+0x32/0x50 <4>[ 1542.178405] ? __i915_gem_free_work+0x28/0xa0 [i915] <4>[ 1542.178468] __i915_gem_free_work+0x28/0xa0 [i915] <4>[ 1542.178476] process_one_work+0x221/0x650 <4>[ 1542.178483] worker_thread+0x4e/0x3c0 <4>[ 1542.178489] kthread+0x114/0x150 <4>[ 1542.178494] ? process_one_work+0x650/0x650 <4>[ 1542.178499] ? kthread_create_on_node+0x40/0x40 <4>[ 1542.178506] ret_from_fork+0x27/0x40 v2: Fish out i915->mm.object_stat_lock which was being inited over in i915_drv.c (Matthew) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171110232447.21618-1-chris@chris-wilson.co.uk Reviewed-by: Matthew Auld <matthew.auld@intel.com>
2017-11-11 07:24:47 +08:00
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.object_stat_lock);
spin_lock_init(&i915->mm.obj_lock);
spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);
INIT_LIST_HEAD(&i915->mm.unbound_list);
INIT_LIST_HEAD(&i915->mm.bound_list);
INIT_LIST_HEAD(&i915->mm.fence_list);
INIT_LIST_HEAD(&i915->mm.userfault_list);
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
int err = -ENOMEM;
dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
if (!dev_priv->objects)
goto err_out;
dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
if (!dev_priv->vmas)
goto err_objects;
dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
if (!dev_priv->luts)
goto err_vmas;
dev_priv->requests = KMEM_CACHE(i915_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
if (!dev_priv->requests)
goto err_luts;
dev_priv->dependencies = KMEM_CACHE(i915_dependency,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT);
if (!dev_priv->dependencies)
goto err_requests;
dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
if (!dev_priv->priorities)
goto err_dependencies;
INIT_LIST_HEAD(&dev_priv->gt.timelines);
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
drm/i915: Lazily unbind vma on close When userspace is passing around swapbuffers using DRI, we frequently have to open and close the same object in the foreign address space. This shows itself as the same object being rebound at roughly 30fps (with a second object also being rebound at 30fps), which involves us having to rewrite the page tables and maintain the drm_mm range manager every time. However, since the object still exists and it is only the local handle that disappears, if we are lazy and do not unbind the VMA immediately when the local user closes the object but defer it until the GPU is idle, then we can reuse the same VMA binding. We still have to be careful to mark the handle and lookup tables as closed to maintain the uABI, just allowing the underlying VMA to be resurrected if the user is able to access the same object from the same context again. If the object itself is destroyed (neither userspace keeping a handle to it), the VMA will be reaped immediately as usual. In the future, this will be even more useful as instantiating a new VMA for use on the GPU will become heavier. A nuisance indeed, so nip it in the bud. v2: s/__i915_vma_final_close/i915_vma_destroy/ etc. v3: Leave a hint as to why we deferred the unbind on close. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180503195115.22309-1-chris@chris-wilson.co.uk
2018-05-04 03:51:14 +08:00
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
drm/i915/selftests: Yet another forgotten mock_i915->mm initialiser Move all of the i915->mm initialisation to a private function that can be reused by the mock i915 device to save forgetting any more steps. For example, <7>[ 1542.046332] [IGT] drv_selftest: starting subtest mock_objects <4>[ 1542.123924] Setting dangerous option mock_selftests - tainting kernel <6>[ 1542.167941] i915: Performing mock selftests with st_random_seed=0x246f5ab5 st_timeout=1000 <4>[ 1542.178012] INFO: trying to register non-static key. <4>[ 1542.178027] the code is fine but needs lockdep annotation. <4>[ 1542.178032] turning off the locking correctness validator. <4>[ 1542.178041] CPU: 3 PID: 6008 Comm: kworker/3:7 Tainted: G U 4.14.0-rc8-CI-CI_DRM_3332+ #1 <4>[ 1542.178049] Hardware name: /NUC6CAYB, BIOS AYAPLCEL.86A.0040.2017.0619.1722 06/19/2017 <4>[ 1542.178144] Workqueue: events __i915_gem_free_work [i915] <4>[ 1542.178152] Call Trace: <4>[ 1542.178163] dump_stack+0x68/0x9f <4>[ 1542.178170] register_lock_class+0x3fd/0x580 <4>[ 1542.178177] ? unwind_next_frame+0x14/0x20 <4>[ 1542.178184] ? __save_stack_trace+0x73/0xd0 <4>[ 1542.178191] __lock_acquire+0xa4/0x1b00 <4>[ 1542.178254] ? __i915_gem_free_work+0x28/0xa0 [i915] <4>[ 1542.178261] ? __lock_acquire+0x4ab/0x1b00 <4>[ 1542.178268] lock_acquire+0xb0/0x200 <4>[ 1542.178273] ? lock_acquire+0xb0/0x200 <4>[ 1542.178336] ? __i915_gem_free_work+0x28/0xa0 [i915] <4>[ 1542.178344] _raw_spin_lock+0x32/0x50 <4>[ 1542.178405] ? __i915_gem_free_work+0x28/0xa0 [i915] <4>[ 1542.178468] __i915_gem_free_work+0x28/0xa0 [i915] <4>[ 1542.178476] process_one_work+0x221/0x650 <4>[ 1542.178483] worker_thread+0x4e/0x3c0 <4>[ 1542.178489] kthread+0x114/0x150 <4>[ 1542.178494] ? process_one_work+0x650/0x650 <4>[ 1542.178499] ? kthread_create_on_node+0x40/0x40 <4>[ 1542.178506] ret_from_fork+0x27/0x40 v2: Fish out i915->mm.object_stat_lock which was being inited over in i915_drv.c (Matthew) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171110232447.21618-1-chris@chris-wilson.co.uk Reviewed-by: Matthew Auld <matthew.auld@intel.com>
2017-11-11 07:24:47 +08:00
i915_gem_init__mm(dev_priv);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
drm/i915: clear up wedged transitions We have two important transitions of the wedged state in the current code: - 0 -> 1: This means a hang has been detected, and signals to everyone that they please get of any locks, so that the reset work item can do its job. - 1 -> 0: The reset handler has completed. Now the last transition mixes up two states: "Reset completed and successful" and "Reset failed". To distinguish these two we do some tricks with the reset completion, but I simply could not convince myself that this doesn't race under odd circumstances. Hence split this up, and add a new terminal state indicating that the hw is gone for good. Also add explicit #defines for both states, update comments. v2: Split out the reset handling bugfix for the throttle ioctl. v3: s/tmp/wedged/ sugested by Chris Wilson. Also fixup up a rebase error which prevented this patch from actually compiling. v4: To unify the wedged state with the reset counter, keep the reset-in-progress state just as a flag. The terminally-wedged state is now denoted with a big number. v5: Add a comment to the reset_counter special values explaining that WEDGED & RESET_IN_PROGRESS needs to be true for the code to be correct. v6: Fixup logic errors introduced with the wedged+reset_counter unification. Since WEDGED implies reset-in-progress (in a way we're terminally stuck in the dead-but-reset-not-completed state), we need ensure that we check for this everywhere. The specific bug was in wait_for_error, which would simply have timed out. v7: Extract an inline i915_reset_in_progress helper to make the code more readable. Also annote the reset-in-progress case with an unlikely, to help the compiler optimize the fastpath. Do the same for the terminally wedged case with i915_terminally_wedged. Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-11-16 00:17:22 +08:00
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
mutex_init(&dev_priv->gpu_error.wedge_mutex);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
err = i915_gemfs_init(dev_priv);
if (err)
DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
return 0;
err_dependencies:
kmem_cache_destroy(dev_priv->dependencies);
err_requests:
kmem_cache_destroy(dev_priv->requests);
err_luts:
kmem_cache_destroy(dev_priv->luts);
err_vmas:
kmem_cache_destroy(dev_priv->vmas);
err_objects:
kmem_cache_destroy(dev_priv->objects);
err_out:
return err;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
{
i915_gem_drain_freed_objects(dev_priv);
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
WARN_ON(!list_empty(&dev_priv->gt.timelines));
kmem_cache_destroy(dev_priv->priorities);
kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->luts);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
drm/i915: Enable lockless lookup of request tracking via RCU If we enable RCU for the requests (providing a grace period where we can inspect a "dead" request before it is freed), we can allow callers to carefully perform lockless lookup of an active request. However, by enabling deferred freeing of requests, we can potentially hog a lot of memory when dealing with tens of thousands of requests per second - with a quick insertion of a synchronize_rcu() inside our shrinker callback, that issue disappears. v2: Currently, it is our responsibility to handle reclaim i.e. to avoid hogging memory with the delayed slab frees. At the moment, we wait for a grace period in the shrinker, and block for all RCU callbacks on oom. Suggested alternatives focus on flushing our RCU callback when we have a certain number of outstanding request frees, and blocking on that flush after a second high watermark. (So rather than wait for the system to run out of memory, we stop issuing requests - both are nondeterministic.) Paul E. McKenney wrote: Another approach is synchronize_rcu() after some largish number of requests. The advantage of this approach is that it throttles the production of callbacks at the source. The corresponding disadvantage is that it slows things up. Another approach is to use call_rcu(), but if the previous call_rcu() is still in flight, block waiting for it. Yet another approach is the get_state_synchronize_rcu() / cond_synchronize_rcu() pair. The idea is to do something like this: cond_synchronize_rcu(cookie); cookie = get_state_synchronize_rcu(); You would of course do an initial get_state_synchronize_rcu() to get things going. This would not block unless there was less than one grace period's worth of time between invocations. But this assumes a busy system, where there is almost always a grace period in flight. But you can make that happen as follows: cond_synchronize_rcu(cookie); cookie = get_state_synchronize_rcu(); call_rcu(&my_rcu_head, noop_function); Note that you need additional code to make sure that the old callback has completed before doing a new one. Setting and clearing a flag with appropriate memory ordering control suffices (e.g,. smp_load_acquire() and smp_store_release()). v3: More comments on compiler and processor order of operations within the RCU lookup and discover we can use rcu_access_pointer() here instead. v4: Wrap i915_gem_active_get_rcu() to take the rcu_read_lock itself. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: "Goel, Akash" <akash.goel@intel.com> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-25-git-send-email-chris@chris-wilson.co.uk
2016-08-04 23:32:41 +08:00
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
i915_gemfs_fini(dev_priv);
}
drm/i915: Only shrink the unbound objects during freeze At the point of creating the hibernation image, the runtime power manage core is disabled - and using the rpm functions triggers a warn. i915_gem_shrink_all() tries to unbind objects, which requires device access and so tries to how an rpm reference triggering a warning: [ 44.235420] ------------[ cut here ]------------ [ 44.235424] WARNING: CPU: 2 PID: 2199 at drivers/gpu/drm/i915/intel_runtime_pm.c:2688 intel_runtime_pm_get_if_in_use+0xe6/0xf0 [ 44.235426] WARN_ON_ONCE(ret < 0) [ 44.235445] Modules linked in: ctr ccm arc4 rt2800usb rt2x00usb rt2800lib rt2x00lib crc_ccitt mac80211 cmac cfg80211 btusb rfcomm bnep btrtl btbcm btintel bluetooth dcdbas x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_codec_realtek crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec_generic aesni_intel snd_hda_codec_hdmi aes_x86_64 lrw gf128mul snd_hda_intel glue_helper ablk_helper cryptd snd_hda_codec hid_multitouch joydev snd_hda_core binfmt_misc i2c_hid serio_raw snd_pcm acpi_pad snd_timer snd i2c_designware_platform 8250_dw nls_iso8859_1 i2c_designware_core lpc_ich mfd_core soundcore usbhid hid psmouse ahci libahci [ 44.235447] CPU: 2 PID: 2199 Comm: kworker/u8:8 Not tainted 4.8.0-rc5+ #130 [ 44.235447] Hardware name: Dell Inc. XPS 13 9343/0310JH, BIOS A07 11/11/2015 [ 44.235450] Workqueue: events_unbound async_run_entry_fn [ 44.235453] 0000000000000000 ffff8801b2f7fb98 ffffffff81306c2f ffff8801b2f7fbe8 [ 44.235454] 0000000000000000 ffff8801b2f7fbd8 ffffffff81056c01 00000a801f50ecc0 [ 44.235456] ffff88020ce50000 ffff88020ce59b60 ffffffff81a60b5c ffffffff81414840 [ 44.235456] Call Trace: [ 44.235459] [<ffffffff81306c2f>] dump_stack+0x4d/0x6e [ 44.235461] [<ffffffff81056c01>] __warn+0xd1/0xf0 [ 44.235464] [<ffffffff81414840>] ? i915_pm_suspend_late+0x30/0x30 [ 44.235465] [<ffffffff81056c6f>] warn_slowpath_fmt+0x4f/0x60 [ 44.235468] [<ffffffff814e73ce>] ? pm_runtime_get_if_in_use+0x6e/0xa0 [ 44.235469] [<ffffffff81433526>] intel_runtime_pm_get_if_in_use+0xe6/0xf0 [ 44.235471] [<ffffffff81458a26>] i915_gem_shrink+0x306/0x360 [ 44.235473] [<ffffffff81343fd4>] ? pci_platform_power_transition+0x24/0x90 [ 44.235475] [<ffffffff81414840>] ? i915_pm_suspend_late+0x30/0x30 [ 44.235476] [<ffffffff81458dfb>] i915_gem_shrink_all+0x1b/0x30 [ 44.235478] [<ffffffff814560b3>] i915_gem_freeze_late+0x33/0x90 [ 44.235479] [<ffffffff81414877>] i915_pm_freeze_late+0x37/0x40 [ 44.235481] [<ffffffff814e9b8e>] dpm_run_callback+0x4e/0x130 [ 44.235483] [<ffffffff814ea5db>] __device_suspend_late+0xdb/0x1f0 [ 44.235484] [<ffffffff814ea70f>] async_suspend_late+0x1f/0xa0 [ 44.235486] [<ffffffff81077557>] async_run_entry_fn+0x37/0x150 [ 44.235488] [<ffffffff8106f518>] process_one_work+0x148/0x3f0 [ 44.235490] [<ffffffff8106f8eb>] worker_thread+0x12b/0x490 [ 44.235491] [<ffffffff8106f7c0>] ? process_one_work+0x3f0/0x3f0 [ 44.235492] [<ffffffff81074d09>] kthread+0xc9/0xe0 [ 44.235495] [<ffffffff816e257f>] ret_from_fork+0x1f/0x40 [ 44.235496] [<ffffffff81074c40>] ? kthread_park+0x60/0x60 [ 44.235497] ---[ end trace e438706b97c7f132 ]--- Alternatively, to actually shrink everything we have to do so slightly earlier in the hibernation process. To keep lockdep silent, we need to take struct_mutex for the shrinker even though we know that we are the only user during the freeze. Fixes: 7aab2d534e35 ("drm/i915: Shrink objects prior to hibernation") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160921135108.29574-2-chris@chris-wilson.co.uk
2016-09-21 21:51:07 +08:00
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
/* Discard all purgeable objects, let userspace recover those as
* required after resuming.
*/
drm/i915: Only shrink the unbound objects during freeze At the point of creating the hibernation image, the runtime power manage core is disabled - and using the rpm functions triggers a warn. i915_gem_shrink_all() tries to unbind objects, which requires device access and so tries to how an rpm reference triggering a warning: [ 44.235420] ------------[ cut here ]------------ [ 44.235424] WARNING: CPU: 2 PID: 2199 at drivers/gpu/drm/i915/intel_runtime_pm.c:2688 intel_runtime_pm_get_if_in_use+0xe6/0xf0 [ 44.235426] WARN_ON_ONCE(ret < 0) [ 44.235445] Modules linked in: ctr ccm arc4 rt2800usb rt2x00usb rt2800lib rt2x00lib crc_ccitt mac80211 cmac cfg80211 btusb rfcomm bnep btrtl btbcm btintel bluetooth dcdbas x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_codec_realtek crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec_generic aesni_intel snd_hda_codec_hdmi aes_x86_64 lrw gf128mul snd_hda_intel glue_helper ablk_helper cryptd snd_hda_codec hid_multitouch joydev snd_hda_core binfmt_misc i2c_hid serio_raw snd_pcm acpi_pad snd_timer snd i2c_designware_platform 8250_dw nls_iso8859_1 i2c_designware_core lpc_ich mfd_core soundcore usbhid hid psmouse ahci libahci [ 44.235447] CPU: 2 PID: 2199 Comm: kworker/u8:8 Not tainted 4.8.0-rc5+ #130 [ 44.235447] Hardware name: Dell Inc. XPS 13 9343/0310JH, BIOS A07 11/11/2015 [ 44.235450] Workqueue: events_unbound async_run_entry_fn [ 44.235453] 0000000000000000 ffff8801b2f7fb98 ffffffff81306c2f ffff8801b2f7fbe8 [ 44.235454] 0000000000000000 ffff8801b2f7fbd8 ffffffff81056c01 00000a801f50ecc0 [ 44.235456] ffff88020ce50000 ffff88020ce59b60 ffffffff81a60b5c ffffffff81414840 [ 44.235456] Call Trace: [ 44.235459] [<ffffffff81306c2f>] dump_stack+0x4d/0x6e [ 44.235461] [<ffffffff81056c01>] __warn+0xd1/0xf0 [ 44.235464] [<ffffffff81414840>] ? i915_pm_suspend_late+0x30/0x30 [ 44.235465] [<ffffffff81056c6f>] warn_slowpath_fmt+0x4f/0x60 [ 44.235468] [<ffffffff814e73ce>] ? pm_runtime_get_if_in_use+0x6e/0xa0 [ 44.235469] [<ffffffff81433526>] intel_runtime_pm_get_if_in_use+0xe6/0xf0 [ 44.235471] [<ffffffff81458a26>] i915_gem_shrink+0x306/0x360 [ 44.235473] [<ffffffff81343fd4>] ? pci_platform_power_transition+0x24/0x90 [ 44.235475] [<ffffffff81414840>] ? i915_pm_suspend_late+0x30/0x30 [ 44.235476] [<ffffffff81458dfb>] i915_gem_shrink_all+0x1b/0x30 [ 44.235478] [<ffffffff814560b3>] i915_gem_freeze_late+0x33/0x90 [ 44.235479] [<ffffffff81414877>] i915_pm_freeze_late+0x37/0x40 [ 44.235481] [<ffffffff814e9b8e>] dpm_run_callback+0x4e/0x130 [ 44.235483] [<ffffffff814ea5db>] __device_suspend_late+0xdb/0x1f0 [ 44.235484] [<ffffffff814ea70f>] async_suspend_late+0x1f/0xa0 [ 44.235486] [<ffffffff81077557>] async_run_entry_fn+0x37/0x150 [ 44.235488] [<ffffffff8106f518>] process_one_work+0x148/0x3f0 [ 44.235490] [<ffffffff8106f8eb>] worker_thread+0x12b/0x490 [ 44.235491] [<ffffffff8106f7c0>] ? process_one_work+0x3f0/0x3f0 [ 44.235492] [<ffffffff81074d09>] kthread+0xc9/0xe0 [ 44.235495] [<ffffffff816e257f>] ret_from_fork+0x1f/0x40 [ 44.235496] [<ffffffff81074c40>] ? kthread_park+0x60/0x60 [ 44.235497] ---[ end trace e438706b97c7f132 ]--- Alternatively, to actually shrink everything we have to do so slightly earlier in the hibernation process. To keep lockdep silent, we need to take struct_mutex for the shrinker even though we know that we are the only user during the freeze. Fixes: 7aab2d534e35 ("drm/i915: Shrink objects prior to hibernation") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160921135108.29574-2-chris@chris-wilson.co.uk
2016-09-21 21:51:07 +08:00
i915_gem_shrink_all(dev_priv);
return 0;
}
int i915_gem_freeze_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
struct list_head *phases[] = {
&i915->mm.unbound_list,
&i915->mm.bound_list,
NULL
}, **phase;
/*
* Called just before we write the hibernation image.
*
* We need to update the domain tracking to reflect that the CPU
* will be accessing all the pages to create and restore from the
* hibernation, and so upon restoration those pages will be in the
* CPU domain.
*
* To make sure the hibernation image contains the latest state,
* we update that state just before writing out the image.
*
* To try and reduce the hibernation image, we manually shrink
* the objects as well, see i915_gem_freeze()
*/
i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
i915_gem_drain_freed_objects(i915);
mutex_lock(&i915->drm.struct_mutex);
for (phase = phases; *phase; phase++) {
list_for_each_entry(obj, *phase, mm.link)
WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
}
mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_request *request;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_link)
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
}
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
{
struct drm_i915_file_private *file_priv;
int ret;
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
DRM_DEBUG("\n");
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
file->driver_priv = file_priv;
file_priv->dev_priv = i915;
file_priv->file = file;
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1;
file_priv->hang_timestamp = jiffies;
ret = i915_gem_context_open(i915, file);
if (ret)
kfree(file_priv);
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
return ret;
drm/i915: Boost RPS frequency for CPU stalls If we encounter a situation where the CPU blocks waiting for results from the GPU, give the GPU a kick to boost its the frequency. This should work to reduce user interface stalls and to quickly promote mesa to high frequencies - but the cost is that our requested frequency stalls high (as we do not idle for long enough before rc6 to start reducing frequencies, nor are we aggressive at down clocking an underused GPU). However, this should be mitigated by rc6 itself powering off the GPU when idle, and that energy use is dependent upon the workload of the GPU in addition to its frequency (e.g. the math or sampler functions only consume power when used). Still, this is likely to adversely affect light workloads. In particular, this nearly eliminates the highly noticeable wake-up lag in animations from idle. For example, expose or workspace transitions. (However, given the situation where we fail to downclock, our requested frequency is almost always the maximum, except for Baytrail where we manually downclock upon idling. This often masks the latency of upclocking after being idle, so animations are typically smooth - at the cost of increased power consumption.) Stéphane raised the concern that this will punish good applications and reward bad applications - but due to the nature of how mesa performs its client throttling, I believe all mesa applications will be roughly equally affected. To address this concern, and to prevent applications like compositors from permanently boosting the RPS state, we ratelimit the frequency of the wait-boosts each client recieves. Unfortunately, this techinique is ineffective with Ironlake - which also has dynamic render power states and suffers just as dramatically. For Ironlake, the thermal/power headroom is shared with the CPU through Intelligent Power Sharing and the intel-ips module. This leaves us with no GPU boost frequencies available when coming out of idle, and due to hardware limitations we cannot change the arbitration between the CPU and GPU quickly enough to be effective. v2: Limit each client to receiving a single boost for each active period. Tested by QA to only marginally increase power, and to demonstrably increase throughput in games. No latency measurements yet. v3: Cater for front-buffer rendering with manual throttling. v4: Tidy up. v5: Sadly the compositor needs frequent boosts as it may never idle, but due to its picking mechanism (using ReadPixels) may require frequent waits. Those waits, along with the waits for the vrefresh swap, conspire to keep the GPU at low frequencies despite the interactive latency. To overcome this we ditch the one-boost-per-active-period and just ratelimit the number of wait-boosts each client can receive. Reported-and-tested-by: Paul Neumann <paul104x@yahoo.de> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=68716 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Stéphane Marchesin <stephane.marchesin@gmail.com> Cc: Owen Taylor <otaylor@redhat.com> Cc: "Meng, Mengmeng" <mengmeng.meng@intel.com> Cc: "Zhuang, Lena" <lena.zhuang@intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> [danvet: No extern for function prototypes in headers.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-09-26 00:34:56 +08:00
}
/**
* i915_gem_track_fb - update frontbuffer tracking
* @old: current GEM buffer for the frontbuffer slots
* @new: new GEM buffer for the frontbuffer slots
* @frontbuffer_bits: bitmask of frontbuffer slots
*
* This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
* from @old and setting them in @new. Both @old and @new can be NULL.
*/
drm/i915: Introduce accurate frontbuffer tracking So from just a quick look we seem to have enough information to accurately figure out whether a given gem bo is used as a frontbuffer and where exactly: We have obj->pin_count as a first check with no false negatives and only negligible false positives. And then we can just walk the modeset objects and figure out where exactly a buffer is used as scanout. Except that we can't due to locking order: If we already hold dev->struct_mutex we can't acquire any modeset locks, so could potential chase freed pointers and other evil stuff. So we need something else. For that introduce a new set of bits obj->frontbuffer_bits to track where a buffer object is used. That we can then chase without grabbing any modeset locks. Of course the consumers of this (DRRS, PSR, FBC, ...) still need to be able to do their magic both when called from modeset and from gem code. But that can be easily achieved by adding locks for these specific subsystems which always nest within either kms or gem locking. This patch just adds the relevant update code to all places. Note that if we ever support multi-planar scanout targets then we need one frontbuffer tracking bit per attachment point that we expose to userspace. v2: - Fix more oopsen. Oops. - WARN if we leak obj->frontbuffer_bits when freeing a gem buffer. Fix the bugs this brought to light. - s/update_frontbuffer_bits/update_fb_bits/. More consistent with the fb tracking functions (fb for gem object, frontbuffer for raw bits). And the function name was way too long. v3: Size obj->frontbuffer_bits correctly so that all pipes fit in. v4: Don't update fb bits in set_base on failure. Noticed by Chris. v5: s/i915_gem_update_fb_bits/i915_gem_track_fb/ Also remove a few local enum pipe variables which are now no longer needed to make the function arguments no drop over the 80 char limit. Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-06-19 05:28:09 +08:00
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
{
/* Control of individual bits within the mask are guarded by
* the owning plane->mutex, i.e. we can never see concurrent
* manipulation of individual bits. But since the bitfield as a whole
* is updated using RMW, we need to use atomics in order to update
* the bits.
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
BITS_PER_TYPE(atomic_t));
drm/i915: Introduce accurate frontbuffer tracking So from just a quick look we seem to have enough information to accurately figure out whether a given gem bo is used as a frontbuffer and where exactly: We have obj->pin_count as a first check with no false negatives and only negligible false positives. And then we can just walk the modeset objects and figure out where exactly a buffer is used as scanout. Except that we can't due to locking order: If we already hold dev->struct_mutex we can't acquire any modeset locks, so could potential chase freed pointers and other evil stuff. So we need something else. For that introduce a new set of bits obj->frontbuffer_bits to track where a buffer object is used. That we can then chase without grabbing any modeset locks. Of course the consumers of this (DRRS, PSR, FBC, ...) still need to be able to do their magic both when called from modeset and from gem code. But that can be easily achieved by adding locks for these specific subsystems which always nest within either kms or gem locking. This patch just adds the relevant update code to all places. Note that if we ever support multi-planar scanout targets then we need one frontbuffer tracking bit per attachment point that we expose to userspace. v2: - Fix more oopsen. Oops. - WARN if we leak obj->frontbuffer_bits when freeing a gem buffer. Fix the bugs this brought to light. - s/update_frontbuffer_bits/update_fb_bits/. More consistent with the fb tracking functions (fb for gem object, frontbuffer for raw bits). And the function name was way too long. v3: Size obj->frontbuffer_bits correctly so that all pipes fit in. v4: Don't update fb bits in set_base on failure. Noticed by Chris. v5: s/i915_gem_update_fb_bits/i915_gem_track_fb/ Also remove a few local enum pipe variables which are now no longer needed to make the function arguments no drop over the 80 char limit. Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-06-19 05:28:09 +08:00
if (old) {
WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
drm/i915: Introduce accurate frontbuffer tracking So from just a quick look we seem to have enough information to accurately figure out whether a given gem bo is used as a frontbuffer and where exactly: We have obj->pin_count as a first check with no false negatives and only negligible false positives. And then we can just walk the modeset objects and figure out where exactly a buffer is used as scanout. Except that we can't due to locking order: If we already hold dev->struct_mutex we can't acquire any modeset locks, so could potential chase freed pointers and other evil stuff. So we need something else. For that introduce a new set of bits obj->frontbuffer_bits to track where a buffer object is used. That we can then chase without grabbing any modeset locks. Of course the consumers of this (DRRS, PSR, FBC, ...) still need to be able to do their magic both when called from modeset and from gem code. But that can be easily achieved by adding locks for these specific subsystems which always nest within either kms or gem locking. This patch just adds the relevant update code to all places. Note that if we ever support multi-planar scanout targets then we need one frontbuffer tracking bit per attachment point that we expose to userspace. v2: - Fix more oopsen. Oops. - WARN if we leak obj->frontbuffer_bits when freeing a gem buffer. Fix the bugs this brought to light. - s/update_frontbuffer_bits/update_fb_bits/. More consistent with the fb tracking functions (fb for gem object, frontbuffer for raw bits). And the function name was way too long. v3: Size obj->frontbuffer_bits correctly so that all pipes fit in. v4: Don't update fb bits in set_base on failure. Noticed by Chris. v5: s/i915_gem_update_fb_bits/i915_gem_track_fb/ Also remove a few local enum pipe variables which are now no longer needed to make the function arguments no drop over the 80 char limit. Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-06-19 05:28:09 +08:00
}
if (new) {
WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
drm/i915: Introduce accurate frontbuffer tracking So from just a quick look we seem to have enough information to accurately figure out whether a given gem bo is used as a frontbuffer and where exactly: We have obj->pin_count as a first check with no false negatives and only negligible false positives. And then we can just walk the modeset objects and figure out where exactly a buffer is used as scanout. Except that we can't due to locking order: If we already hold dev->struct_mutex we can't acquire any modeset locks, so could potential chase freed pointers and other evil stuff. So we need something else. For that introduce a new set of bits obj->frontbuffer_bits to track where a buffer object is used. That we can then chase without grabbing any modeset locks. Of course the consumers of this (DRRS, PSR, FBC, ...) still need to be able to do their magic both when called from modeset and from gem code. But that can be easily achieved by adding locks for these specific subsystems which always nest within either kms or gem locking. This patch just adds the relevant update code to all places. Note that if we ever support multi-planar scanout targets then we need one frontbuffer tracking bit per attachment point that we expose to userspace. v2: - Fix more oopsen. Oops. - WARN if we leak obj->frontbuffer_bits when freeing a gem buffer. Fix the bugs this brought to light. - s/update_frontbuffer_bits/update_fb_bits/. More consistent with the fb tracking functions (fb for gem object, frontbuffer for raw bits). And the function name was way too long. v3: Size obj->frontbuffer_bits correctly so that all pipes fit in. v4: Don't update fb bits in set_base on failure. Noticed by Chris. v5: s/i915_gem_update_fb_bits/i915_gem_track_fb/ Also remove a few local enum pipe variables which are now no longer needed to make the function arguments no drop over the 80 char limit. Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-06-19 05:28:09 +08:00
}
}
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
const void *data, size_t size)
{
struct drm_i915_gem_object *obj;
struct file *file;
size_t offset;
int err;
obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
offset = 0;
do {
unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
struct page *page;
void *pgdata, *vaddr;
err = pagecache_write_begin(file, file->f_mapping,
offset, len, 0,
&page, &pgdata);
if (err < 0)
goto fail;
vaddr = kmap(page);
memcpy(vaddr, data, len);
kunmap(page);
err = pagecache_write_end(file, file->f_mapping,
offset, len, len,
page, pgdata);
if (err < 0)
goto fail;
size -= len;
data += len;
offset += len;
} while (size);
return obj;
fail:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n,
unsigned int *offset)
{
struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
struct scatterlist *sg;
unsigned int idx, count;
might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
/* As we iterate forward through the sg, we record each entry in a
* radixtree for quick repeated (backwards) lookups. If we have seen
* this index previously, we will have an entry for it.
*
* Initial lookup is O(N), but this is amortized to O(1) for
* sequential page access (where each new request is consecutive
* to the previous one). Repeated lookups are O(lg(obj->base.size)),
* i.e. O(1) with a large constant!
*/
if (n < READ_ONCE(iter->sg_idx))
goto lookup;
mutex_lock(&iter->lock);
/* We prefer to reuse the last sg so that repeated lookup of this
* (or the subsequent) sg are fast - comparing against the last
* sg is faster than going through the radixtree.
*/
sg = iter->sg_pos;
idx = iter->sg_idx;
count = __sg_page_count(sg);
while (idx + count <= n) {
void *entry;
unsigned long i;
int ret;
/* If we cannot allocate and insert this entry, or the
* individual pages from this range, cancel updating the
* sg_idx so that on this lookup we are forced to linearly
* scan onwards, but on future lookups we will try the
* insertion again (in which case we need to be careful of
* the error return reporting that we have already inserted
* this index).
*/
ret = radix_tree_insert(&iter->radix, idx, sg);
if (ret && ret != -EEXIST)
goto scan;
entry = xa_mk_value(idx);
for (i = 1; i < count; i++) {
ret = radix_tree_insert(&iter->radix, idx + i, entry);
if (ret && ret != -EEXIST)
goto scan;
}
idx += count;
sg = ____sg_next(sg);
count = __sg_page_count(sg);
}
scan:
iter->sg_pos = sg;
iter->sg_idx = idx;
mutex_unlock(&iter->lock);
if (unlikely(n < idx)) /* insertion completed by another thread */
goto lookup;
/* In case we failed to insert the entry into the radixtree, we need
* to look beyond the current sg.
*/
while (idx + count <= n) {
idx += count;
sg = ____sg_next(sg);
count = __sg_page_count(sg);
}
*offset = n - idx;
return sg;
lookup:
rcu_read_lock();
sg = radix_tree_lookup(&iter->radix, n);
GEM_BUG_ON(!sg);
/* If this index is in the middle of multi-page sg entry,
* the radix tree will contain a value entry that points
* to the start of that range. We will return the pointer to
* the base page and the offset of this page within the
* sg entry's range.
*/
*offset = 0;
if (unlikely(xa_is_value(sg))) {
unsigned long base = xa_to_value(sg);
sg = radix_tree_lookup(&iter->radix, base);
GEM_BUG_ON(!sg);
*offset = n - base;
}
rcu_read_unlock();
return sg;
}
struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
struct scatterlist *sg;
unsigned int offset;
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
sg = i915_gem_object_get_sg(obj, n, &offset);
return nth_page(sg_page(sg), offset);
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n)
{
struct page *page;
page = i915_gem_object_get_page(obj, n);
if (!obj->mm.dirty)
set_page_dirty(page);
return page;
}
dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n)
{
struct scatterlist *sg;
unsigned int offset;
sg = i915_gem_object_get_sg(obj, n, &offset);
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
struct sg_table *pages;
int err;
if (align > obj->base.size)
return -EINVAL;
if (obj->ops == &i915_gem_phys_ops)
return 0;
if (obj->ops != &i915_gem_object_ops)
return -EINVAL;
err = i915_gem_object_unbind(obj);
if (err)
return err;
mutex_lock(&obj->mm.lock);
if (obj->mm.madv != I915_MADV_WILLNEED) {
err = -EFAULT;
goto err_unlock;
}
if (obj->mm.quirked) {
err = -EFAULT;
goto err_unlock;
}
if (obj->mm.mapping) {
err = -EBUSY;
goto err_unlock;
}
pages = __i915_gem_object_unset_pages(obj);
obj->ops = &i915_gem_phys_ops;
err = ____i915_gem_object_get_pages(obj);
if (err)
goto err_xfer;
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
if (!IS_ERR_OR_NULL(pages))
i915_gem_object_ops.put_pages(obj, pages);
mutex_unlock(&obj->mm.lock);
return 0;
err_xfer:
obj->ops = &i915_gem_object_ops;
if (!IS_ERR_OR_NULL(pages)) {
unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
}
err_unlock:
mutex_unlock(&obj->mm.lock);
return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
#include "selftests/i915_gem_object.c"
#include "selftests/i915_gem_coherency.c"
#include "selftests/i915_gem.c"
#endif