aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gpu_error.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gpu_error.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c203
1 files changed, 172 insertions, 31 deletions
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f020c0086fbc..0c38bfb60c9a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -187,64 +187,64 @@ i915_error_printer(struct drm_i915_error_state_buf *e)
}
/* single threaded page allocator with a reserved stash for emergencies */
-static void pool_fini(struct pagevec *pv)
+static void pool_fini(struct folio_batch *fbatch)
{
- pagevec_release(pv);
+ folio_batch_release(fbatch);
}
-static int pool_refill(struct pagevec *pv, gfp_t gfp)
+static int pool_refill(struct folio_batch *fbatch, gfp_t gfp)
{
- while (pagevec_space(pv)) {
- struct page *p;
+ while (folio_batch_space(fbatch)) {
+ struct folio *folio;
- p = alloc_page(gfp);
- if (!p)
+ folio = folio_alloc(gfp, 0);
+ if (!folio)
return -ENOMEM;
- pagevec_add(pv, p);
+ folio_batch_add(fbatch, folio);
}
return 0;
}
-static int pool_init(struct pagevec *pv, gfp_t gfp)
+static int pool_init(struct folio_batch *fbatch, gfp_t gfp)
{
int err;
- pagevec_init(pv);
+ folio_batch_init(fbatch);
- err = pool_refill(pv, gfp);
+ err = pool_refill(fbatch, gfp);
if (err)
- pool_fini(pv);
+ pool_fini(fbatch);
return err;
}
-static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
+static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp)
{
- struct page *p;
+ struct folio *folio;
- p = alloc_page(gfp);
- if (!p && pagevec_count(pv))
- p = pv->pages[--pv->nr];
+ folio = folio_alloc(gfp, 0);
+ if (!folio && folio_batch_count(fbatch))
+ folio = fbatch->folios[--fbatch->nr];
- return p ? page_address(p) : NULL;
+ return folio ? folio_address(folio) : NULL;
}
-static void pool_free(struct pagevec *pv, void *addr)
+static void pool_free(struct folio_batch *fbatch, void *addr)
{
- struct page *p = virt_to_page(addr);
+ struct folio *folio = virt_to_folio(addr);
- if (pagevec_space(pv))
- pagevec_add(pv, p);
+ if (folio_batch_space(fbatch))
+ folio_batch_add(fbatch, folio);
else
- __free_page(p);
+ folio_put(folio);
}
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
struct i915_vma_compress {
- struct pagevec pool;
+ struct folio_batch pool;
struct z_stream_s zstream;
void *tmp;
};
@@ -381,7 +381,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
struct i915_vma_compress {
- struct pagevec pool;
+ struct folio_batch pool;
};
static bool compress_init(struct i915_vma_compress *c)
@@ -808,10 +808,15 @@ static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
for (ee = gt->engine; ee; ee = ee->next) {
const struct i915_vma_coredump *vma;
- if (ee->guc_capture_node)
- intel_guc_capture_print_engine_node(m, ee);
- else
+ if (gt->uc && gt->uc->guc.is_guc_capture) {
+ if (ee->guc_capture_node)
+ intel_guc_capture_print_engine_node(m, ee);
+ else
+ err_printf(m, " Missing GuC capture node for %s\n",
+ ee->engine->name);
+ } else {
error_print_engine(m, ee);
+ }
err_printf(m, " hung: %u\n", ee->hung);
err_printf(m, " engine reset count: %u\n", ee->reset_count);
@@ -1117,10 +1122,14 @@ i915_vma_coredump_create(const struct intel_gt *gt,
mutex_lock(&ggtt->error_mutex);
if (ggtt->vm.raw_insert_page)
ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
- I915_CACHE_NONE, 0);
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
+ 0);
else
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
- I915_CACHE_NONE, 0);
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
+ 0);
mb();
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
@@ -2162,7 +2171,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
* i915_capture_error_state - capture an error record for later analysis
* @gt: intel_gt which originated the hang
* @engine_mask: hung engines
- *
+ * @dump_flags: dump flags
*
* Should be called when an error is detected (either a hang or an error
* interrupt) to capture error state from the time of the error. Fills
@@ -2219,3 +2228,135 @@ void i915_disable_error_state(struct drm_i915_private *i915, int err)
i915->gpu_error.first_error = ERR_PTR(err);
spin_unlock_irq(&i915->gpu_error.lock);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_klog_error_capture(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask)
+{
+ static int g_count;
+ struct drm_i915_private *i915 = gt->i915;
+ struct i915_gpu_coredump *error;
+ intel_wakeref_t wakeref;
+ size_t buf_size = PAGE_SIZE * 128;
+ size_t pos_err;
+ char *buf, *ptr, *next;
+ int l_count = g_count++;
+ int line = 0;
+
+ /* Can't allocate memory during a reset */
+ if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
+ drm_err(&gt->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n",
+ l_count, line++);
+ return;
+ }
+
+ error = READ_ONCE(i915->gpu_error.first_error);
+ if (error) {
+ drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n",
+ l_count, line++);
+ i915_reset_error_state(i915);
+ }
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE);
+
+ if (IS_ERR(error)) {
+ drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n",
+ l_count, line++, PTR_ERR(error));
+ return;
+ }
+
+ buf = kvmalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n",
+ l_count, line++);
+ i915_gpu_coredump_put(error);
+ return;
+ }
+
+ drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n",
+ l_count, line++, __builtin_return_address(0));
+
+ /* Largest string length safe to print via dmesg */
+# define MAX_CHUNK 800
+
+ pos_err = 0;
+ while (1) {
+ ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1);
+
+ if (got <= 0)
+ break;
+
+ buf[got] = 0;
+ pos_err += got;
+
+ ptr = buf;
+ while (got > 0) {
+ size_t count;
+ char tag[2];
+
+ next = strnchr(ptr, got, '\n');
+ if (next) {
+ count = next - ptr;
+ *next = 0;
+ tag[0] = '>';
+ tag[1] = '<';
+ } else {
+ count = got;
+ tag[0] = '}';
+ tag[1] = '{';
+ }
+
+ if (count > MAX_CHUNK) {
+ size_t pos;
+ char *ptr2 = ptr;
+
+ for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) {
+ char chr = ptr[pos];
+
+ ptr[pos] = 0;
+ drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n",
+ l_count, line++, ptr2);
+ ptr[pos] = chr;
+ ptr2 = ptr + pos;
+
+ /*
+ * If spewing large amounts of data via a serial console,
+ * this can be a very slow process. So be friendly and try
+ * not to cause 'softlockup on CPU' problems.
+ */
+ cond_resched();
+ }
+
+ if (ptr2 < (ptr + count))
+ drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
+ l_count, line++, tag[0], ptr2, tag[1]);
+ else if (tag[0] == '>')
+ drm_info(&i915->drm, "[Capture/%d.%d] ><\n",
+ l_count, line++);
+ } else {
+ drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
+ l_count, line++, tag[0], ptr, tag[1]);
+ }
+
+ ptr = next;
+ got -= count;
+ if (next) {
+ ptr++;
+ got--;
+ }
+
+ /* As above. */
+ cond_resched();
+ }
+
+ if (got)
+ drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n",
+ l_count, line++, got);
+ }
+
+ kvfree(buf);
+
+ drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
+}
+#endif