Root/
Source at commit cae94b0ad9d147152af77b971a7234faf20027a9 created 14 years 1 month ago. By Marek Olšák, drm/radeon/kms: allow R500 regs VAP_ALT_NUM_VERTICES and VAP_INDEX_OFFSET | |
---|---|
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #include <linux/seq_file.h> |
29 | #include "drmP.h" |
30 | #include "drm.h" |
31 | #include "radeon_reg.h" |
32 | #include "radeon.h" |
33 | #include "radeon_asic.h" |
34 | #include "radeon_drm.h" |
35 | #include "r100_track.h" |
36 | #include "r300d.h" |
37 | #include "rv350d.h" |
38 | #include "r300_reg_safe.h" |
39 | |
40 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 |
41 | * |
42 | * GPU Errata: |
43 | * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL |
44 | * using MMIO to flush host path read cache, this lead to HARDLOCKUP. |
45 | * However, scheduling such write to the ring seems harmless, i suspect |
46 | * the CP read collide with the flush somehow, or maybe the MC, hard to |
47 | * tell. (Jerome Glisse) |
48 | */ |
49 | |
50 | /* |
51 | * rv370,rv380 PCIE GART |
52 | */ |
53 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
54 | |
55 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
56 | { |
57 | uint32_t tmp; |
58 | int i; |
59 | |
60 | /* Workaround HW bug do flush 2 times */ |
61 | for (i = 0; i < 2; i++) { |
62 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
63 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
64 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
65 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
66 | } |
67 | mb(); |
68 | } |
69 | |
70 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
71 | { |
72 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
73 | |
74 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
75 | return -EINVAL; |
76 | } |
77 | addr = (lower_32_bits(addr) >> 8) | |
78 | ((upper_32_bits(addr) & 0xff) << 24) | |
79 | 0xc; |
80 | /* on x86 we want this to be CPU endian, on powerpc |
81 | * on powerpc without HW swappers, it'll get swapped on way |
82 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
83 | writel(addr, ((void __iomem *)ptr) + (i * 4)); |
84 | return 0; |
85 | } |
86 | |
87 | int rv370_pcie_gart_init(struct radeon_device *rdev) |
88 | { |
89 | int r; |
90 | |
91 | if (rdev->gart.table.vram.robj) { |
92 | WARN(1, "RV370 PCIE GART already initialized.\n"); |
93 | return 0; |
94 | } |
95 | /* Initialize common gart structure */ |
96 | r = radeon_gart_init(rdev); |
97 | if (r) |
98 | return r; |
99 | r = rv370_debugfs_pcie_gart_info_init(rdev); |
100 | if (r) |
101 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
102 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
103 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
104 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
105 | return radeon_gart_table_vram_alloc(rdev); |
106 | } |
107 | |
108 | int rv370_pcie_gart_enable(struct radeon_device *rdev) |
109 | { |
110 | uint32_t table_addr; |
111 | uint32_t tmp; |
112 | int r; |
113 | |
114 | if (rdev->gart.table.vram.robj == NULL) { |
115 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
116 | return -EINVAL; |
117 | } |
118 | r = radeon_gart_table_vram_pin(rdev); |
119 | if (r) |
120 | return r; |
121 | radeon_gart_restore(rdev); |
122 | /* discard memory request outside of configured range */ |
123 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
124 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
125 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); |
126 | tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; |
127 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); |
128 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
129 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
130 | table_addr = rdev->gart.table_addr; |
131 | WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); |
132 | /* FIXME: setup default page */ |
133 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); |
134 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
135 | /* Clear error */ |
136 | WREG32_PCIE(0x18, 0); |
137 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
138 | tmp |= RADEON_PCIE_TX_GART_EN; |
139 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
140 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
141 | rv370_pcie_gart_tlb_flush(rdev); |
142 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n", |
143 | (unsigned)(rdev->mc.gtt_size >> 20), table_addr); |
144 | rdev->gart.ready = true; |
145 | return 0; |
146 | } |
147 | |
148 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
149 | { |
150 | u32 tmp; |
151 | int r; |
152 | |
153 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
154 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
155 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
156 | if (rdev->gart.table.vram.robj) { |
157 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
158 | if (likely(r == 0)) { |
159 | radeon_bo_kunmap(rdev->gart.table.vram.robj); |
160 | radeon_bo_unpin(rdev->gart.table.vram.robj); |
161 | radeon_bo_unreserve(rdev->gart.table.vram.robj); |
162 | } |
163 | } |
164 | } |
165 | |
166 | void rv370_pcie_gart_fini(struct radeon_device *rdev) |
167 | { |
168 | radeon_gart_fini(rdev); |
169 | rv370_pcie_gart_disable(rdev); |
170 | radeon_gart_table_vram_free(rdev); |
171 | } |
172 | |
173 | void r300_fence_ring_emit(struct radeon_device *rdev, |
174 | struct radeon_fence *fence) |
175 | { |
176 | /* Who ever call radeon_fence_emit should call ring_lock and ask |
177 | * for enough space (today caller are ib schedule and buffer move) */ |
178 | /* Write SC register so SC & US assert idle */ |
179 | radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0)); |
180 | radeon_ring_write(rdev, 0); |
181 | radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0)); |
182 | radeon_ring_write(rdev, 0); |
183 | /* Flush 3D cache */ |
184 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
185 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH); |
186 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
187 | radeon_ring_write(rdev, R300_ZC_FLUSH); |
188 | /* Wait until IDLE & CLEAN */ |
189 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
190 | radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN | |
191 | RADEON_WAIT_2D_IDLECLEAN | |
192 | RADEON_WAIT_DMA_GUI_IDLE)); |
193 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
194 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | |
195 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
196 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
197 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl); |
198 | /* Emit fence sequence & fire IRQ */ |
199 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); |
200 | radeon_ring_write(rdev, fence->seq); |
201 | radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); |
202 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
203 | } |
204 | |
205 | void r300_ring_start(struct radeon_device *rdev) |
206 | { |
207 | unsigned gb_tile_config; |
208 | int r; |
209 | |
210 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
211 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
212 | switch(rdev->num_gb_pipes) { |
213 | case 2: |
214 | gb_tile_config |= R300_PIPE_COUNT_R300; |
215 | break; |
216 | case 3: |
217 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
218 | break; |
219 | case 4: |
220 | gb_tile_config |= R300_PIPE_COUNT_R420; |
221 | break; |
222 | case 1: |
223 | default: |
224 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
225 | break; |
226 | } |
227 | |
228 | r = radeon_ring_lock(rdev, 64); |
229 | if (r) { |
230 | return; |
231 | } |
232 | radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); |
233 | radeon_ring_write(rdev, |
234 | RADEON_ISYNC_ANY2D_IDLE3D | |
235 | RADEON_ISYNC_ANY3D_IDLE2D | |
236 | RADEON_ISYNC_WAIT_IDLEGUI | |
237 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
238 | radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); |
239 | radeon_ring_write(rdev, gb_tile_config); |
240 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
241 | radeon_ring_write(rdev, |
242 | RADEON_WAIT_2D_IDLECLEAN | |
243 | RADEON_WAIT_3D_IDLECLEAN); |
244 | radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
245 | radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); |
246 | radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); |
247 | radeon_ring_write(rdev, 0); |
248 | radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); |
249 | radeon_ring_write(rdev, 0); |
250 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
251 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
252 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
253 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); |
254 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
255 | radeon_ring_write(rdev, |
256 | RADEON_WAIT_2D_IDLECLEAN | |
257 | RADEON_WAIT_3D_IDLECLEAN); |
258 | radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); |
259 | radeon_ring_write(rdev, 0); |
260 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
261 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
262 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
263 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); |
264 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); |
265 | radeon_ring_write(rdev, |
266 | ((6 << R300_MS_X0_SHIFT) | |
267 | (6 << R300_MS_Y0_SHIFT) | |
268 | (6 << R300_MS_X1_SHIFT) | |
269 | (6 << R300_MS_Y1_SHIFT) | |
270 | (6 << R300_MS_X2_SHIFT) | |
271 | (6 << R300_MS_Y2_SHIFT) | |
272 | (6 << R300_MSBD0_Y_SHIFT) | |
273 | (6 << R300_MSBD0_X_SHIFT))); |
274 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); |
275 | radeon_ring_write(rdev, |
276 | ((6 << R300_MS_X3_SHIFT) | |
277 | (6 << R300_MS_Y3_SHIFT) | |
278 | (6 << R300_MS_X4_SHIFT) | |
279 | (6 << R300_MS_Y4_SHIFT) | |
280 | (6 << R300_MS_X5_SHIFT) | |
281 | (6 << R300_MS_Y5_SHIFT) | |
282 | (6 << R300_MSBD1_SHIFT))); |
283 | radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); |
284 | radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); |
285 | radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); |
286 | radeon_ring_write(rdev, |
287 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); |
288 | radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); |
289 | radeon_ring_write(rdev, |
290 | R300_GEOMETRY_ROUND_NEAREST | |
291 | R300_COLOR_ROUND_NEAREST); |
292 | radeon_ring_unlock_commit(rdev); |
293 | } |
294 | |
295 | void r300_errata(struct radeon_device *rdev) |
296 | { |
297 | rdev->pll_errata = 0; |
298 | |
299 | if (rdev->family == CHIP_R300 && |
300 | (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { |
301 | rdev->pll_errata |= CHIP_ERRATA_R300_CG; |
302 | } |
303 | } |
304 | |
305 | int r300_mc_wait_for_idle(struct radeon_device *rdev) |
306 | { |
307 | unsigned i; |
308 | uint32_t tmp; |
309 | |
310 | for (i = 0; i < rdev->usec_timeout; i++) { |
311 | /* read MC_STATUS */ |
312 | tmp = RREG32(RADEON_MC_STATUS); |
313 | if (tmp & R300_MC_IDLE) { |
314 | return 0; |
315 | } |
316 | DRM_UDELAY(1); |
317 | } |
318 | return -1; |
319 | } |
320 | |
321 | void r300_gpu_init(struct radeon_device *rdev) |
322 | { |
323 | uint32_t gb_tile_config, tmp; |
324 | |
325 | r100_hdp_reset(rdev); |
326 | /* FIXME: rv380 one pipes ? */ |
327 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || |
328 | (rdev->family == CHIP_R350)) { |
329 | /* r300,r350 */ |
330 | rdev->num_gb_pipes = 2; |
331 | } else { |
332 | /* rv350,rv370,rv380,r300 AD */ |
333 | rdev->num_gb_pipes = 1; |
334 | } |
335 | rdev->num_z_pipes = 1; |
336 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
337 | switch (rdev->num_gb_pipes) { |
338 | case 2: |
339 | gb_tile_config |= R300_PIPE_COUNT_R300; |
340 | break; |
341 | case 3: |
342 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
343 | break; |
344 | case 4: |
345 | gb_tile_config |= R300_PIPE_COUNT_R420; |
346 | break; |
347 | default: |
348 | case 1: |
349 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
350 | break; |
351 | } |
352 | WREG32(R300_GB_TILE_CONFIG, gb_tile_config); |
353 | |
354 | if (r100_gui_wait_for_idle(rdev)) { |
355 | printk(KERN_WARNING "Failed to wait GUI idle while " |
356 | "programming pipes. Bad things might happen.\n"); |
357 | } |
358 | |
359 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
360 | WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); |
361 | |
362 | WREG32(R300_RB2D_DSTCACHE_MODE, |
363 | R300_DC_AUTOFLUSH_ENABLE | |
364 | R300_DC_DC_DISABLE_IGNORE_PE); |
365 | |
366 | if (r100_gui_wait_for_idle(rdev)) { |
367 | printk(KERN_WARNING "Failed to wait GUI idle while " |
368 | "programming pipes. Bad things might happen.\n"); |
369 | } |
370 | if (r300_mc_wait_for_idle(rdev)) { |
371 | printk(KERN_WARNING "Failed to wait MC idle while " |
372 | "programming pipes. Bad things might happen.\n"); |
373 | } |
374 | DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", |
375 | rdev->num_gb_pipes, rdev->num_z_pipes); |
376 | } |
377 | |
378 | int r300_ga_reset(struct radeon_device *rdev) |
379 | { |
380 | uint32_t tmp; |
381 | bool reinit_cp; |
382 | int i; |
383 | |
384 | reinit_cp = rdev->cp.ready; |
385 | rdev->cp.ready = false; |
386 | for (i = 0; i < rdev->usec_timeout; i++) { |
387 | WREG32(RADEON_CP_CSQ_MODE, 0); |
388 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
389 | WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); |
390 | (void)RREG32(RADEON_RBBM_SOFT_RESET); |
391 | udelay(200); |
392 | WREG32(RADEON_RBBM_SOFT_RESET, 0); |
393 | /* Wait to prevent race in RBBM_STATUS */ |
394 | mdelay(1); |
395 | tmp = RREG32(RADEON_RBBM_STATUS); |
396 | if (tmp & ((1 << 20) | (1 << 26))) { |
397 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp); |
398 | /* GA still busy soft reset it */ |
399 | WREG32(0x429C, 0x200); |
400 | WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); |
401 | WREG32(R300_RE_SCISSORS_TL, 0); |
402 | WREG32(R300_RE_SCISSORS_BR, 0); |
403 | WREG32(0x24AC, 0); |
404 | } |
405 | /* Wait to prevent race in RBBM_STATUS */ |
406 | mdelay(1); |
407 | tmp = RREG32(RADEON_RBBM_STATUS); |
408 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
409 | break; |
410 | } |
411 | } |
412 | for (i = 0; i < rdev->usec_timeout; i++) { |
413 | tmp = RREG32(RADEON_RBBM_STATUS); |
414 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
415 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", |
416 | tmp); |
417 | if (reinit_cp) { |
418 | return r100_cp_init(rdev, rdev->cp.ring_size); |
419 | } |
420 | return 0; |
421 | } |
422 | DRM_UDELAY(1); |
423 | } |
424 | tmp = RREG32(RADEON_RBBM_STATUS); |
425 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); |
426 | return -1; |
427 | } |
428 | |
429 | int r300_gpu_reset(struct radeon_device *rdev) |
430 | { |
431 | uint32_t status; |
432 | |
433 | /* reset order likely matter */ |
434 | status = RREG32(RADEON_RBBM_STATUS); |
435 | /* reset HDP */ |
436 | r100_hdp_reset(rdev); |
437 | /* reset rb2d */ |
438 | if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { |
439 | r100_rb2d_reset(rdev); |
440 | } |
441 | /* reset GA */ |
442 | if (status & ((1 << 20) | (1 << 26))) { |
443 | r300_ga_reset(rdev); |
444 | } |
445 | /* reset CP */ |
446 | status = RREG32(RADEON_RBBM_STATUS); |
447 | if (status & (1 << 16)) { |
448 | r100_cp_reset(rdev); |
449 | } |
450 | /* Check if GPU is idle */ |
451 | status = RREG32(RADEON_RBBM_STATUS); |
452 | if (status & RADEON_RBBM_ACTIVE) { |
453 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
454 | return -1; |
455 | } |
456 | DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); |
457 | return 0; |
458 | } |
459 | |
460 | |
461 | /* |
462 | * r300,r350,rv350,rv380 VRAM info |
463 | */ |
464 | void r300_mc_init(struct radeon_device *rdev) |
465 | { |
466 | u64 base; |
467 | u32 tmp; |
468 | |
469 | /* DDR for all card after R300 & IGP */ |
470 | rdev->mc.vram_is_ddr = true; |
471 | tmp = RREG32(RADEON_MEM_CNTL); |
472 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
473 | switch (tmp) { |
474 | case 0: rdev->mc.vram_width = 64; break; |
475 | case 1: rdev->mc.vram_width = 128; break; |
476 | case 2: rdev->mc.vram_width = 256; break; |
477 | default: rdev->mc.vram_width = 128; break; |
478 | } |
479 | r100_vram_init_sizes(rdev); |
480 | base = rdev->mc.aper_base; |
481 | if (rdev->flags & RADEON_IS_IGP) |
482 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
483 | radeon_vram_location(rdev, &rdev->mc, base); |
484 | if (!(rdev->flags & RADEON_IS_AGP)) |
485 | radeon_gtt_location(rdev, &rdev->mc); |
486 | radeon_update_bandwidth_info(rdev); |
487 | } |
488 | |
489 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
490 | { |
491 | uint32_t link_width_cntl, mask; |
492 | |
493 | if (rdev->flags & RADEON_IS_IGP) |
494 | return; |
495 | |
496 | if (!(rdev->flags & RADEON_IS_PCIE)) |
497 | return; |
498 | |
499 | /* FIXME wait for idle */ |
500 | |
501 | switch (lanes) { |
502 | case 0: |
503 | mask = RADEON_PCIE_LC_LINK_WIDTH_X0; |
504 | break; |
505 | case 1: |
506 | mask = RADEON_PCIE_LC_LINK_WIDTH_X1; |
507 | break; |
508 | case 2: |
509 | mask = RADEON_PCIE_LC_LINK_WIDTH_X2; |
510 | break; |
511 | case 4: |
512 | mask = RADEON_PCIE_LC_LINK_WIDTH_X4; |
513 | break; |
514 | case 8: |
515 | mask = RADEON_PCIE_LC_LINK_WIDTH_X8; |
516 | break; |
517 | case 12: |
518 | mask = RADEON_PCIE_LC_LINK_WIDTH_X12; |
519 | break; |
520 | case 16: |
521 | default: |
522 | mask = RADEON_PCIE_LC_LINK_WIDTH_X16; |
523 | break; |
524 | } |
525 | |
526 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
527 | |
528 | if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == |
529 | (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) |
530 | return; |
531 | |
532 | link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | |
533 | RADEON_PCIE_LC_RECONFIG_NOW | |
534 | RADEON_PCIE_LC_RECONFIG_LATER | |
535 | RADEON_PCIE_LC_SHORT_RECONFIG_EN); |
536 | link_width_cntl |= mask; |
537 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
538 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | |
539 | RADEON_PCIE_LC_RECONFIG_NOW)); |
540 | |
541 | /* wait for lane set to complete */ |
542 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
543 | while (link_width_cntl == 0xffffffff) |
544 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
545 | |
546 | } |
547 | |
548 | int rv370_get_pcie_lanes(struct radeon_device *rdev) |
549 | { |
550 | u32 link_width_cntl; |
551 | |
552 | if (rdev->flags & RADEON_IS_IGP) |
553 | return 0; |
554 | |
555 | if (!(rdev->flags & RADEON_IS_PCIE)) |
556 | return 0; |
557 | |
558 | /* FIXME wait for idle */ |
559 | |
560 | if (rdev->family < CHIP_R600) |
561 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
562 | else |
563 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
564 | |
565 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { |
566 | case RADEON_PCIE_LC_LINK_WIDTH_X0: |
567 | return 0; |
568 | case RADEON_PCIE_LC_LINK_WIDTH_X1: |
569 | return 1; |
570 | case RADEON_PCIE_LC_LINK_WIDTH_X2: |
571 | return 2; |
572 | case RADEON_PCIE_LC_LINK_WIDTH_X4: |
573 | return 4; |
574 | case RADEON_PCIE_LC_LINK_WIDTH_X8: |
575 | return 8; |
576 | case RADEON_PCIE_LC_LINK_WIDTH_X16: |
577 | default: |
578 | return 16; |
579 | } |
580 | } |
581 | |
582 | #if defined(CONFIG_DEBUG_FS) |
583 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) |
584 | { |
585 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
586 | struct drm_device *dev = node->minor->dev; |
587 | struct radeon_device *rdev = dev->dev_private; |
588 | uint32_t tmp; |
589 | |
590 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
591 | seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); |
592 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); |
593 | seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); |
594 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); |
595 | seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); |
596 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); |
597 | seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); |
598 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); |
599 | seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); |
600 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); |
601 | seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); |
602 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); |
603 | seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); |
604 | return 0; |
605 | } |
606 | |
607 | static struct drm_info_list rv370_pcie_gart_info_list[] = { |
608 | {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, |
609 | }; |
610 | #endif |
611 | |
612 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
613 | { |
614 | #if defined(CONFIG_DEBUG_FS) |
615 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); |
616 | #else |
617 | return 0; |
618 | #endif |
619 | } |
620 | |
621 | static int r300_packet0_check(struct radeon_cs_parser *p, |
622 | struct radeon_cs_packet *pkt, |
623 | unsigned idx, unsigned reg) |
624 | { |
625 | struct radeon_cs_reloc *reloc; |
626 | struct r100_cs_track *track; |
627 | volatile uint32_t *ib; |
628 | uint32_t tmp, tile_flags = 0; |
629 | unsigned i; |
630 | int r; |
631 | u32 idx_value; |
632 | |
633 | ib = p->ib->ptr; |
634 | track = (struct r100_cs_track *)p->track; |
635 | idx_value = radeon_get_ib_value(p, idx); |
636 | |
637 | switch(reg) { |
638 | case AVIVO_D1MODE_VLINE_START_END: |
639 | case RADEON_CRTC_GUI_TRIG_VLINE: |
640 | r = r100_cs_packet_parse_vline(p); |
641 | if (r) { |
642 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
643 | idx, reg); |
644 | r100_cs_dump_packet(p, pkt); |
645 | return r; |
646 | } |
647 | break; |
648 | case RADEON_DST_PITCH_OFFSET: |
649 | case RADEON_SRC_PITCH_OFFSET: |
650 | r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
651 | if (r) |
652 | return r; |
653 | break; |
654 | case R300_RB3D_COLOROFFSET0: |
655 | case R300_RB3D_COLOROFFSET1: |
656 | case R300_RB3D_COLOROFFSET2: |
657 | case R300_RB3D_COLOROFFSET3: |
658 | i = (reg - R300_RB3D_COLOROFFSET0) >> 2; |
659 | r = r100_cs_packet_next_reloc(p, &reloc); |
660 | if (r) { |
661 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
662 | idx, reg); |
663 | r100_cs_dump_packet(p, pkt); |
664 | return r; |
665 | } |
666 | track->cb[i].robj = reloc->robj; |
667 | track->cb[i].offset = idx_value; |
668 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
669 | break; |
670 | case R300_ZB_DEPTHOFFSET: |
671 | r = r100_cs_packet_next_reloc(p, &reloc); |
672 | if (r) { |
673 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
674 | idx, reg); |
675 | r100_cs_dump_packet(p, pkt); |
676 | return r; |
677 | } |
678 | track->zb.robj = reloc->robj; |
679 | track->zb.offset = idx_value; |
680 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
681 | break; |
682 | case R300_TX_OFFSET_0: |
683 | case R300_TX_OFFSET_0+4: |
684 | case R300_TX_OFFSET_0+8: |
685 | case R300_TX_OFFSET_0+12: |
686 | case R300_TX_OFFSET_0+16: |
687 | case R300_TX_OFFSET_0+20: |
688 | case R300_TX_OFFSET_0+24: |
689 | case R300_TX_OFFSET_0+28: |
690 | case R300_TX_OFFSET_0+32: |
691 | case R300_TX_OFFSET_0+36: |
692 | case R300_TX_OFFSET_0+40: |
693 | case R300_TX_OFFSET_0+44: |
694 | case R300_TX_OFFSET_0+48: |
695 | case R300_TX_OFFSET_0+52: |
696 | case R300_TX_OFFSET_0+56: |
697 | case R300_TX_OFFSET_0+60: |
698 | i = (reg - R300_TX_OFFSET_0) >> 2; |
699 | r = r100_cs_packet_next_reloc(p, &reloc); |
700 | if (r) { |
701 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
702 | idx, reg); |
703 | r100_cs_dump_packet(p, pkt); |
704 | return r; |
705 | } |
706 | |
707 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
708 | tile_flags |= R300_TXO_MACRO_TILE; |
709 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
710 | tile_flags |= R300_TXO_MICRO_TILE; |
711 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) |
712 | tile_flags |= R300_TXO_MICRO_TILE_SQUARE; |
713 | |
714 | tmp = idx_value + ((u32)reloc->lobj.gpu_offset); |
715 | tmp |= tile_flags; |
716 | ib[idx] = tmp; |
717 | track->textures[i].robj = reloc->robj; |
718 | break; |
719 | /* Tracked registers */ |
720 | case 0x2084: |
721 | /* VAP_VF_CNTL */ |
722 | track->vap_vf_cntl = idx_value; |
723 | break; |
724 | case 0x20B4: |
725 | /* VAP_VTX_SIZE */ |
726 | track->vtx_size = idx_value & 0x7F; |
727 | break; |
728 | case 0x2134: |
729 | /* VAP_VF_MAX_VTX_INDX */ |
730 | track->max_indx = idx_value & 0x00FFFFFFUL; |
731 | break; |
732 | case 0x2088: |
733 | /* VAP_ALT_NUM_VERTICES - only valid on r500 */ |
734 | if (p->rdev->family < CHIP_RV515) |
735 | goto fail; |
736 | track->vap_alt_nverts = idx_value & 0xFFFFFF; |
737 | break; |
738 | case 0x43E4: |
739 | /* SC_SCISSOR1 */ |
740 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
741 | if (p->rdev->family < CHIP_RV515) { |
742 | track->maxy -= 1440; |
743 | } |
744 | break; |
745 | case 0x4E00: |
746 | /* RB3D_CCTL */ |
747 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
748 | break; |
749 | case 0x4E38: |
750 | case 0x4E3C: |
751 | case 0x4E40: |
752 | case 0x4E44: |
753 | /* RB3D_COLORPITCH0 */ |
754 | /* RB3D_COLORPITCH1 */ |
755 | /* RB3D_COLORPITCH2 */ |
756 | /* RB3D_COLORPITCH3 */ |
757 | r = r100_cs_packet_next_reloc(p, &reloc); |
758 | if (r) { |
759 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
760 | idx, reg); |
761 | r100_cs_dump_packet(p, pkt); |
762 | return r; |
763 | } |
764 | |
765 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
766 | tile_flags |= R300_COLOR_TILE_ENABLE; |
767 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
768 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
769 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) |
770 | tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; |
771 | |
772 | tmp = idx_value & ~(0x7 << 16); |
773 | tmp |= tile_flags; |
774 | ib[idx] = tmp; |
775 | i = (reg - 0x4E38) >> 2; |
776 | track->cb[i].pitch = idx_value & 0x3FFE; |
777 | switch (((idx_value >> 21) & 0xF)) { |
778 | case 9: |
779 | case 11: |
780 | case 12: |
781 | track->cb[i].cpp = 1; |
782 | break; |
783 | case 3: |
784 | case 4: |
785 | case 13: |
786 | case 15: |
787 | track->cb[i].cpp = 2; |
788 | break; |
789 | case 6: |
790 | track->cb[i].cpp = 4; |
791 | break; |
792 | case 10: |
793 | track->cb[i].cpp = 8; |
794 | break; |
795 | case 7: |
796 | track->cb[i].cpp = 16; |
797 | break; |
798 | default: |
799 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
800 | ((idx_value >> 21) & 0xF)); |
801 | return -EINVAL; |
802 | } |
803 | break; |
804 | case 0x4F00: |
805 | /* ZB_CNTL */ |
806 | if (idx_value & 2) { |
807 | track->z_enabled = true; |
808 | } else { |
809 | track->z_enabled = false; |
810 | } |
811 | break; |
812 | case 0x4F10: |
813 | /* ZB_FORMAT */ |
814 | switch ((idx_value & 0xF)) { |
815 | case 0: |
816 | case 1: |
817 | track->zb.cpp = 2; |
818 | break; |
819 | case 2: |
820 | track->zb.cpp = 4; |
821 | break; |
822 | default: |
823 | DRM_ERROR("Invalid z buffer format (%d) !\n", |
824 | (idx_value & 0xF)); |
825 | return -EINVAL; |
826 | } |
827 | break; |
828 | case 0x4F24: |
829 | /* ZB_DEPTHPITCH */ |
830 | r = r100_cs_packet_next_reloc(p, &reloc); |
831 | if (r) { |
832 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
833 | idx, reg); |
834 | r100_cs_dump_packet(p, pkt); |
835 | return r; |
836 | } |
837 | |
838 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
839 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
840 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
841 | tile_flags |= R300_DEPTHMICROTILE_TILED; |
842 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) |
843 | tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; |
844 | |
845 | tmp = idx_value & ~(0x7 << 16); |
846 | tmp |= tile_flags; |
847 | ib[idx] = tmp; |
848 | |
849 | track->zb.pitch = idx_value & 0x3FFC; |
850 | break; |
851 | case 0x4104: |
852 | for (i = 0; i < 16; i++) { |
853 | bool enabled; |
854 | |
855 | enabled = !!(idx_value & (1 << i)); |
856 | track->textures[i].enabled = enabled; |
857 | } |
858 | break; |
859 | case 0x44C0: |
860 | case 0x44C4: |
861 | case 0x44C8: |
862 | case 0x44CC: |
863 | case 0x44D0: |
864 | case 0x44D4: |
865 | case 0x44D8: |
866 | case 0x44DC: |
867 | case 0x44E0: |
868 | case 0x44E4: |
869 | case 0x44E8: |
870 | case 0x44EC: |
871 | case 0x44F0: |
872 | case 0x44F4: |
873 | case 0x44F8: |
874 | case 0x44FC: |
875 | /* TX_FORMAT1_[0-15] */ |
876 | i = (reg - 0x44C0) >> 2; |
877 | tmp = (idx_value >> 25) & 0x3; |
878 | track->textures[i].tex_coord_type = tmp; |
879 | switch ((idx_value & 0x1F)) { |
880 | case R300_TX_FORMAT_X8: |
881 | case R300_TX_FORMAT_Y4X4: |
882 | case R300_TX_FORMAT_Z3Y3X2: |
883 | track->textures[i].cpp = 1; |
884 | break; |
885 | case R300_TX_FORMAT_X16: |
886 | case R300_TX_FORMAT_Y8X8: |
887 | case R300_TX_FORMAT_Z5Y6X5: |
888 | case R300_TX_FORMAT_Z6Y5X5: |
889 | case R300_TX_FORMAT_W4Z4Y4X4: |
890 | case R300_TX_FORMAT_W1Z5Y5X5: |
891 | case R300_TX_FORMAT_D3DMFT_CxV8U8: |
892 | case R300_TX_FORMAT_B8G8_B8G8: |
893 | case R300_TX_FORMAT_G8R8_G8B8: |
894 | track->textures[i].cpp = 2; |
895 | break; |
896 | case R300_TX_FORMAT_Y16X16: |
897 | case R300_TX_FORMAT_Z11Y11X10: |
898 | case R300_TX_FORMAT_Z10Y11X11: |
899 | case R300_TX_FORMAT_W8Z8Y8X8: |
900 | case R300_TX_FORMAT_W2Z10Y10X10: |
901 | case 0x17: |
902 | case R300_TX_FORMAT_FL_I32: |
903 | case 0x1e: |
904 | track->textures[i].cpp = 4; |
905 | break; |
906 | case R300_TX_FORMAT_W16Z16Y16X16: |
907 | case R300_TX_FORMAT_FL_R16G16B16A16: |
908 | case R300_TX_FORMAT_FL_I32A32: |
909 | track->textures[i].cpp = 8; |
910 | break; |
911 | case R300_TX_FORMAT_FL_R32G32B32A32: |
912 | track->textures[i].cpp = 16; |
913 | break; |
914 | case R300_TX_FORMAT_DXT1: |
915 | track->textures[i].cpp = 1; |
916 | track->textures[i].compress_format = R100_TRACK_COMP_DXT1; |
917 | break; |
918 | case R300_TX_FORMAT_ATI2N: |
919 | if (p->rdev->family < CHIP_R420) { |
920 | DRM_ERROR("Invalid texture format %u\n", |
921 | (idx_value & 0x1F)); |
922 | return -EINVAL; |
923 | } |
924 | /* The same rules apply as for DXT3/5. */ |
925 | /* Pass through. */ |
926 | case R300_TX_FORMAT_DXT3: |
927 | case R300_TX_FORMAT_DXT5: |
928 | track->textures[i].cpp = 1; |
929 | track->textures[i].compress_format = R100_TRACK_COMP_DXT35; |
930 | break; |
931 | default: |
932 | DRM_ERROR("Invalid texture format %u\n", |
933 | (idx_value & 0x1F)); |
934 | return -EINVAL; |
935 | break; |
936 | } |
937 | break; |
938 | case 0x4400: |
939 | case 0x4404: |
940 | case 0x4408: |
941 | case 0x440C: |
942 | case 0x4410: |
943 | case 0x4414: |
944 | case 0x4418: |
945 | case 0x441C: |
946 | case 0x4420: |
947 | case 0x4424: |
948 | case 0x4428: |
949 | case 0x442C: |
950 | case 0x4430: |
951 | case 0x4434: |
952 | case 0x4438: |
953 | case 0x443C: |
954 | /* TX_FILTER0_[0-15] */ |
955 | i = (reg - 0x4400) >> 2; |
956 | tmp = idx_value & 0x7; |
957 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
958 | track->textures[i].roundup_w = false; |
959 | } |
960 | tmp = (idx_value >> 3) & 0x7; |
961 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
962 | track->textures[i].roundup_h = false; |
963 | } |
964 | break; |
965 | case 0x4500: |
966 | case 0x4504: |
967 | case 0x4508: |
968 | case 0x450C: |
969 | case 0x4510: |
970 | case 0x4514: |
971 | case 0x4518: |
972 | case 0x451C: |
973 | case 0x4520: |
974 | case 0x4524: |
975 | case 0x4528: |
976 | case 0x452C: |
977 | case 0x4530: |
978 | case 0x4534: |
979 | case 0x4538: |
980 | case 0x453C: |
981 | /* TX_FORMAT2_[0-15] */ |
982 | i = (reg - 0x4500) >> 2; |
983 | tmp = idx_value & 0x3FFF; |
984 | track->textures[i].pitch = tmp + 1; |
985 | if (p->rdev->family >= CHIP_RV515) { |
986 | tmp = ((idx_value >> 15) & 1) << 11; |
987 | track->textures[i].width_11 = tmp; |
988 | tmp = ((idx_value >> 16) & 1) << 11; |
989 | track->textures[i].height_11 = tmp; |
990 | |
991 | /* ATI1N */ |
992 | if (idx_value & (1 << 14)) { |
993 | /* The same rules apply as for DXT1. */ |
994 | track->textures[i].compress_format = |
995 | R100_TRACK_COMP_DXT1; |
996 | } |
997 | } else if (idx_value & (1 << 14)) { |
998 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); |
999 | return -EINVAL; |
1000 | } |
1001 | break; |
1002 | case 0x4480: |
1003 | case 0x4484: |
1004 | case 0x4488: |
1005 | case 0x448C: |
1006 | case 0x4490: |
1007 | case 0x4494: |
1008 | case 0x4498: |
1009 | case 0x449C: |
1010 | case 0x44A0: |
1011 | case 0x44A4: |
1012 | case 0x44A8: |
1013 | case 0x44AC: |
1014 | case 0x44B0: |
1015 | case 0x44B4: |
1016 | case 0x44B8: |
1017 | case 0x44BC: |
1018 | /* TX_FORMAT0_[0-15] */ |
1019 | i = (reg - 0x4480) >> 2; |
1020 | tmp = idx_value & 0x7FF; |
1021 | track->textures[i].width = tmp + 1; |
1022 | tmp = (idx_value >> 11) & 0x7FF; |
1023 | track->textures[i].height = tmp + 1; |
1024 | tmp = (idx_value >> 26) & 0xF; |
1025 | track->textures[i].num_levels = tmp; |
1026 | tmp = idx_value & (1 << 31); |
1027 | track->textures[i].use_pitch = !!tmp; |
1028 | tmp = (idx_value >> 22) & 0xF; |
1029 | track->textures[i].txdepth = tmp; |
1030 | break; |
1031 | case R300_ZB_ZPASS_ADDR: |
1032 | r = r100_cs_packet_next_reloc(p, &reloc); |
1033 | if (r) { |
1034 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
1035 | idx, reg); |
1036 | r100_cs_dump_packet(p, pkt); |
1037 | return r; |
1038 | } |
1039 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1040 | break; |
1041 | case 0x4e0c: |
1042 | /* RB3D_COLOR_CHANNEL_MASK */ |
1043 | track->color_channel_mask = idx_value; |
1044 | break; |
1045 | case 0x4d1c: |
1046 | /* ZB_BW_CNTL */ |
1047 | track->fastfill = !!(idx_value & (1 << 2)); |
1048 | break; |
1049 | case 0x4e04: |
1050 | /* RB3D_BLENDCNTL */ |
1051 | track->blend_read_enable = !!(idx_value & (1 << 2)); |
1052 | break; |
1053 | case 0x4be8: |
1054 | /* valid register only on RV530 */ |
1055 | if (p->rdev->family == CHIP_RV530) |
1056 | break; |
1057 | /* fallthrough do not move */ |
1058 | default: |
1059 | goto fail; |
1060 | } |
1061 | return 0; |
1062 | fail: |
1063 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
1064 | reg, idx); |
1065 | return -EINVAL; |
1066 | } |
1067 | |
1068 | static int r300_packet3_check(struct radeon_cs_parser *p, |
1069 | struct radeon_cs_packet *pkt) |
1070 | { |
1071 | struct radeon_cs_reloc *reloc; |
1072 | struct r100_cs_track *track; |
1073 | volatile uint32_t *ib; |
1074 | unsigned idx; |
1075 | int r; |
1076 | |
1077 | ib = p->ib->ptr; |
1078 | idx = pkt->idx + 1; |
1079 | track = (struct r100_cs_track *)p->track; |
1080 | switch(pkt->opcode) { |
1081 | case PACKET3_3D_LOAD_VBPNTR: |
1082 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1083 | if (r) |
1084 | return r; |
1085 | break; |
1086 | case PACKET3_INDX_BUFFER: |
1087 | r = r100_cs_packet_next_reloc(p, &reloc); |
1088 | if (r) { |
1089 | DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); |
1090 | r100_cs_dump_packet(p, pkt); |
1091 | return r; |
1092 | } |
1093 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); |
1094 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1095 | if (r) { |
1096 | return r; |
1097 | } |
1098 | break; |
1099 | /* Draw packet */ |
1100 | case PACKET3_3D_DRAW_IMMD: |
1101 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1102 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1103 | * in cmd stream */ |
1104 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1105 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1106 | return -EINVAL; |
1107 | } |
1108 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1109 | track->immd_dwords = pkt->count - 1; |
1110 | r = r100_cs_track_check(p->rdev, track); |
1111 | if (r) { |
1112 | return r; |
1113 | } |
1114 | break; |
1115 | case PACKET3_3D_DRAW_IMMD_2: |
1116 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1117 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1118 | * in cmd stream */ |
1119 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1120 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1121 | return -EINVAL; |
1122 | } |
1123 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1124 | track->immd_dwords = pkt->count; |
1125 | r = r100_cs_track_check(p->rdev, track); |
1126 | if (r) { |
1127 | return r; |
1128 | } |
1129 | break; |
1130 | case PACKET3_3D_DRAW_VBUF: |
1131 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1132 | r = r100_cs_track_check(p->rdev, track); |
1133 | if (r) { |
1134 | return r; |
1135 | } |
1136 | break; |
1137 | case PACKET3_3D_DRAW_VBUF_2: |
1138 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1139 | r = r100_cs_track_check(p->rdev, track); |
1140 | if (r) { |
1141 | return r; |
1142 | } |
1143 | break; |
1144 | case PACKET3_3D_DRAW_INDX: |
1145 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1146 | r = r100_cs_track_check(p->rdev, track); |
1147 | if (r) { |
1148 | return r; |
1149 | } |
1150 | break; |
1151 | case PACKET3_3D_DRAW_INDX_2: |
1152 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1153 | r = r100_cs_track_check(p->rdev, track); |
1154 | if (r) { |
1155 | return r; |
1156 | } |
1157 | break; |
1158 | case PACKET3_NOP: |
1159 | break; |
1160 | default: |
1161 | DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); |
1162 | return -EINVAL; |
1163 | } |
1164 | return 0; |
1165 | } |
1166 | |
1167 | int r300_cs_parse(struct radeon_cs_parser *p) |
1168 | { |
1169 | struct radeon_cs_packet pkt; |
1170 | struct r100_cs_track *track; |
1171 | int r; |
1172 | |
1173 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
1174 | r100_cs_track_clear(p->rdev, track); |
1175 | p->track = track; |
1176 | do { |
1177 | r = r100_cs_packet_parse(p, &pkt, p->idx); |
1178 | if (r) { |
1179 | return r; |
1180 | } |
1181 | p->idx += pkt.count + 2; |
1182 | switch (pkt.type) { |
1183 | case PACKET_TYPE0: |
1184 | r = r100_cs_parse_packet0(p, &pkt, |
1185 | p->rdev->config.r300.reg_safe_bm, |
1186 | p->rdev->config.r300.reg_safe_bm_size, |
1187 | &r300_packet0_check); |
1188 | break; |
1189 | case PACKET_TYPE2: |
1190 | break; |
1191 | case PACKET_TYPE3: |
1192 | r = r300_packet3_check(p, &pkt); |
1193 | break; |
1194 | default: |
1195 | DRM_ERROR("Unknown packet type %d !\n", pkt.type); |
1196 | return -EINVAL; |
1197 | } |
1198 | if (r) { |
1199 | return r; |
1200 | } |
1201 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
1202 | return 0; |
1203 | } |
1204 | |
1205 | void r300_set_reg_safe(struct radeon_device *rdev) |
1206 | { |
1207 | rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; |
1208 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
1209 | } |
1210 | |
1211 | void r300_mc_program(struct radeon_device *rdev) |
1212 | { |
1213 | struct r100_mc_save save; |
1214 | int r; |
1215 | |
1216 | r = r100_debugfs_mc_info_init(rdev); |
1217 | if (r) { |
1218 | dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); |
1219 | } |
1220 | |
1221 | /* Stops all mc clients */ |
1222 | r100_mc_stop(rdev, &save); |
1223 | if (rdev->flags & RADEON_IS_AGP) { |
1224 | WREG32(R_00014C_MC_AGP_LOCATION, |
1225 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | |
1226 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); |
1227 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); |
1228 | WREG32(R_00015C_AGP_BASE_2, |
1229 | upper_32_bits(rdev->mc.agp_base) & 0xff); |
1230 | } else { |
1231 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); |
1232 | WREG32(R_000170_AGP_BASE, 0); |
1233 | WREG32(R_00015C_AGP_BASE_2, 0); |
1234 | } |
1235 | /* Wait for mc idle */ |
1236 | if (r300_mc_wait_for_idle(rdev)) |
1237 | DRM_INFO("Failed to wait MC idle before programming MC.\n"); |
1238 | /* Program MC, should be a 32bits limited address space */ |
1239 | WREG32(R_000148_MC_FB_LOCATION, |
1240 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
1241 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
1242 | r100_mc_resume(rdev, &save); |
1243 | } |
1244 | |
1245 | void r300_clock_startup(struct radeon_device *rdev) |
1246 | { |
1247 | u32 tmp; |
1248 | |
1249 | if (radeon_dynclks != -1 && radeon_dynclks) |
1250 | radeon_legacy_set_clock_gating(rdev, 1); |
1251 | /* We need to force on some of the block */ |
1252 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); |
1253 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
1254 | if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) |
1255 | tmp |= S_00000D_FORCE_VAP(1); |
1256 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); |
1257 | } |
1258 | |
1259 | static int r300_startup(struct radeon_device *rdev) |
1260 | { |
1261 | int r; |
1262 | |
1263 | /* set common regs */ |
1264 | r100_set_common_regs(rdev); |
1265 | /* program mc */ |
1266 | r300_mc_program(rdev); |
1267 | /* Resume clock */ |
1268 | r300_clock_startup(rdev); |
1269 | /* Initialize GPU configuration (# pipes, ...) */ |
1270 | r300_gpu_init(rdev); |
1271 | /* Initialize GART (initialize after TTM so we can allocate |
1272 | * memory through TTM but finalize after TTM) */ |
1273 | if (rdev->flags & RADEON_IS_PCIE) { |
1274 | r = rv370_pcie_gart_enable(rdev); |
1275 | if (r) |
1276 | return r; |
1277 | } |
1278 | |
1279 | if (rdev->family == CHIP_R300 || |
1280 | rdev->family == CHIP_R350 || |
1281 | rdev->family == CHIP_RV350) |
1282 | r100_enable_bm(rdev); |
1283 | |
1284 | if (rdev->flags & RADEON_IS_PCI) { |
1285 | r = r100_pci_gart_enable(rdev); |
1286 | if (r) |
1287 | return r; |
1288 | } |
1289 | /* Enable IRQ */ |
1290 | r100_irq_set(rdev); |
1291 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
1292 | /* 1M ring buffer */ |
1293 | r = r100_cp_init(rdev, 1024 * 1024); |
1294 | if (r) { |
1295 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
1296 | return r; |
1297 | } |
1298 | r = r100_wb_init(rdev); |
1299 | if (r) |
1300 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); |
1301 | r = r100_ib_init(rdev); |
1302 | if (r) { |
1303 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
1304 | return r; |
1305 | } |
1306 | return 0; |
1307 | } |
1308 | |
1309 | int r300_resume(struct radeon_device *rdev) |
1310 | { |
1311 | /* Make sur GART are not working */ |
1312 | if (rdev->flags & RADEON_IS_PCIE) |
1313 | rv370_pcie_gart_disable(rdev); |
1314 | if (rdev->flags & RADEON_IS_PCI) |
1315 | r100_pci_gart_disable(rdev); |
1316 | /* Resume clock before doing reset */ |
1317 | r300_clock_startup(rdev); |
1318 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
1319 | if (radeon_gpu_reset(rdev)) { |
1320 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
1321 | RREG32(R_000E40_RBBM_STATUS), |
1322 | RREG32(R_0007C0_CP_STAT)); |
1323 | } |
1324 | /* post */ |
1325 | radeon_combios_asic_init(rdev->ddev); |
1326 | /* Resume clock after posting */ |
1327 | r300_clock_startup(rdev); |
1328 | /* Initialize surface registers */ |
1329 | radeon_surface_init(rdev); |
1330 | return r300_startup(rdev); |
1331 | } |
1332 | |
1333 | int r300_suspend(struct radeon_device *rdev) |
1334 | { |
1335 | r100_cp_disable(rdev); |
1336 | r100_wb_disable(rdev); |
1337 | r100_irq_disable(rdev); |
1338 | if (rdev->flags & RADEON_IS_PCIE) |
1339 | rv370_pcie_gart_disable(rdev); |
1340 | if (rdev->flags & RADEON_IS_PCI) |
1341 | r100_pci_gart_disable(rdev); |
1342 | return 0; |
1343 | } |
1344 | |
1345 | void r300_fini(struct radeon_device *rdev) |
1346 | { |
1347 | radeon_pm_fini(rdev); |
1348 | r100_cp_fini(rdev); |
1349 | r100_wb_fini(rdev); |
1350 | r100_ib_fini(rdev); |
1351 | radeon_gem_fini(rdev); |
1352 | if (rdev->flags & RADEON_IS_PCIE) |
1353 | rv370_pcie_gart_fini(rdev); |
1354 | if (rdev->flags & RADEON_IS_PCI) |
1355 | r100_pci_gart_fini(rdev); |
1356 | radeon_agp_fini(rdev); |
1357 | radeon_irq_kms_fini(rdev); |
1358 | radeon_fence_driver_fini(rdev); |
1359 | radeon_bo_fini(rdev); |
1360 | radeon_atombios_fini(rdev); |
1361 | kfree(rdev->bios); |
1362 | rdev->bios = NULL; |
1363 | } |
1364 | |
1365 | int r300_init(struct radeon_device *rdev) |
1366 | { |
1367 | int r; |
1368 | |
1369 | /* Disable VGA */ |
1370 | r100_vga_render_disable(rdev); |
1371 | /* Initialize scratch registers */ |
1372 | radeon_scratch_init(rdev); |
1373 | /* Initialize surface registers */ |
1374 | radeon_surface_init(rdev); |
1375 | /* TODO: disable VGA need to use VGA request */ |
1376 | /* BIOS*/ |
1377 | if (!radeon_get_bios(rdev)) { |
1378 | if (ASIC_IS_AVIVO(rdev)) |
1379 | return -EINVAL; |
1380 | } |
1381 | if (rdev->is_atom_bios) { |
1382 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); |
1383 | return -EINVAL; |
1384 | } else { |
1385 | r = radeon_combios_init(rdev); |
1386 | if (r) |
1387 | return r; |
1388 | } |
1389 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
1390 | if (radeon_gpu_reset(rdev)) { |
1391 | dev_warn(rdev->dev, |
1392 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
1393 | RREG32(R_000E40_RBBM_STATUS), |
1394 | RREG32(R_0007C0_CP_STAT)); |
1395 | } |
1396 | /* check if cards are posted or not */ |
1397 | if (radeon_boot_test_post_card(rdev) == false) |
1398 | return -EINVAL; |
1399 | /* Set asic errata */ |
1400 | r300_errata(rdev); |
1401 | /* Initialize clocks */ |
1402 | radeon_get_clock_info(rdev->ddev); |
1403 | /* Initialize power management */ |
1404 | radeon_pm_init(rdev); |
1405 | /* initialize AGP */ |
1406 | if (rdev->flags & RADEON_IS_AGP) { |
1407 | r = radeon_agp_init(rdev); |
1408 | if (r) { |
1409 | radeon_agp_disable(rdev); |
1410 | } |
1411 | } |
1412 | /* initialize memory controller */ |
1413 | r300_mc_init(rdev); |
1414 | /* Fence driver */ |
1415 | r = radeon_fence_driver_init(rdev); |
1416 | if (r) |
1417 | return r; |
1418 | r = radeon_irq_kms_init(rdev); |
1419 | if (r) |
1420 | return r; |
1421 | /* Memory manager */ |
1422 | r = radeon_bo_init(rdev); |
1423 | if (r) |
1424 | return r; |
1425 | if (rdev->flags & RADEON_IS_PCIE) { |
1426 | r = rv370_pcie_gart_init(rdev); |
1427 | if (r) |
1428 | return r; |
1429 | } |
1430 | if (rdev->flags & RADEON_IS_PCI) { |
1431 | r = r100_pci_gart_init(rdev); |
1432 | if (r) |
1433 | return r; |
1434 | } |
1435 | r300_set_reg_safe(rdev); |
1436 | rdev->accel_working = true; |
1437 | r = r300_startup(rdev); |
1438 | if (r) { |
1439 | /* Somethings want wront with the accel init stop accel */ |
1440 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
1441 | r100_cp_fini(rdev); |
1442 | r100_wb_fini(rdev); |
1443 | r100_ib_fini(rdev); |
1444 | radeon_irq_kms_fini(rdev); |
1445 | if (rdev->flags & RADEON_IS_PCIE) |
1446 | rv370_pcie_gart_fini(rdev); |
1447 | if (rdev->flags & RADEON_IS_PCI) |
1448 | r100_pci_gart_fini(rdev); |
1449 | radeon_agp_fini(rdev); |
1450 | rdev->accel_working = false; |
1451 | } |
1452 | return 0; |
1453 | } |
1454 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9