aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_gt_topology.c
blob: a8d7f272c30a0120861176e376761c0975617476 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2022 Intel Corporation
 */

#include "xe_gt_topology.h"

#include <linux/bitmap.h>

#include "regs/xe_gt_regs.h"
#include "xe_gt.h"
#include "xe_mmio.h"

#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
#define XE_MAX_EU_FUSE_BITS (32 * XE_MAX_EU_FUSE_REGS)

static void
load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
{
	va_list argp;
	u32 fuse_val[XE_MAX_DSS_FUSE_REGS] = {};
	int i;

	if (drm_WARN_ON(&gt_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS))
		numregs = XE_MAX_DSS_FUSE_REGS;

	va_start(argp, numregs);
	for (i = 0; i < numregs; i++)
		fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, struct xe_reg));
	va_end(argp);

	bitmap_from_arr32(mask, fuse_val, numregs * 32);
}

static void
load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
{
	struct xe_device *xe = gt_to_xe(gt);
	u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
	u32 val = 0;
	int i;

	BUILD_BUG_ON(XE_MAX_EU_FUSE_REGS > 1);

	/*
	 * Pre-Xe_HP platforms inverted the bit meaning (disable instead
	 * of enable).
	 */
	if (GRAPHICS_VERx100(xe) < 1250)
		reg_val = ~reg_val & XELP_EU_MASK;

	/* On PVC, one bit = one EU */
	if (GRAPHICS_VERx100(xe) == 1260) {
		val = reg_val;
	} else {
		/* All other platforms, one bit = 2 EU */
		for (i = 0; i < fls(reg_val); i++)
			if (reg_val & BIT(i))
				val |= 0x3 << 2 * i;
	}

	bitmap_from_arr32(mask, &val, XE_MAX_EU_FUSE_BITS);
}

static void
get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
{
	if (GRAPHICS_VER(xe) > 20) {
		*geometry_regs = 3;
		*compute_regs = 3;
	} else if (GRAPHICS_VERx100(xe) == 1260) {
		*geometry_regs = 0;
		*compute_regs = 2;
	} else if (GRAPHICS_VERx100(xe) >= 1250) {
		*geometry_regs = 1;
		*compute_regs = 1;
	} else {
		*geometry_regs = 1;
		*compute_regs = 0;
	}
}

void
xe_gt_topology_init(struct xe_gt *gt)
{
	struct xe_device *xe = gt_to_xe(gt);
	struct drm_printer p = drm_debug_printer("GT topology");
	int num_geometry_regs, num_compute_regs;

	get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);

	/*
	 * Register counts returned shouldn't exceed the number of registers
	 * passed as parameters below.
	 */
	drm_WARN_ON(&xe->drm, num_geometry_regs > 3);
	drm_WARN_ON(&xe->drm, num_compute_regs > 3);

	load_dss_mask(gt, gt->fuse_topo.g_dss_mask,
		      num_geometry_regs,
		      XELP_GT_GEOMETRY_DSS_ENABLE,
		      XE2_GT_GEOMETRY_DSS_1,
		      XE2_GT_GEOMETRY_DSS_2);
	load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs,
		      XEHP_GT_COMPUTE_DSS_ENABLE,
		      XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
		      XE2_GT_COMPUTE_DSS_2);
	load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);

	xe_gt_topology_dump(gt, &p);
}

void
xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
{
	drm_printf(p, "dss mask (geometry): %*pb\n", XE_MAX_DSS_FUSE_BITS,
		   gt->fuse_topo.g_dss_mask);
	drm_printf(p, "dss mask (compute):  %*pb\n", XE_MAX_DSS_FUSE_BITS,
		   gt->fuse_topo.c_dss_mask);

	drm_printf(p, "EU mask per DSS:     %*pb\n", XE_MAX_EU_FUSE_BITS,
		   gt->fuse_topo.eu_mask_per_dss);

}

/*
 * Used to obtain the index of the first DSS.  Can start searching from the
 * beginning of a specific dss group (e.g., gslice, cslice, etc.) if
 * groupsize and groupnum are non-zero.
 */
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum)
{
	return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize);
}

bool xe_dss_mask_empty(const xe_dss_mask_t mask)
{
	return bitmap_empty(mask, XE_MAX_DSS_FUSE_BITS);
}

/**
 * xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant
 * @gt: GT to check
 * @quad: Which quadrant of the DSS space to check
 *
 * Since Xe_HP platforms can have up to four CCS engines, those engines
 * are each logically associated with a quarter of the possible DSS.  If there
 * are no DSS present in one of the four quadrants of the DSS space, the
 * corresponding CCS engine is also not available for use.
 *
 * Returns false if all DSS in a quadrant of the GT are fused off, else true.
 */
bool xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad)
{
	struct xe_device *xe = gt_to_xe(gt);
	xe_dss_mask_t all_dss;
	int g_dss_regs, c_dss_regs, dss_per_quad, quad_first;

	bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
		  XE_MAX_DSS_FUSE_BITS);

	get_num_dss_regs(xe, &g_dss_regs, &c_dss_regs);
	dss_per_quad = 32 * max(g_dss_regs, c_dss_regs) / 4;

	quad_first = xe_dss_mask_group_ffs(all_dss, dss_per_quad, quad);

	return quad_first < (quad + 1) * dss_per_quad;
}