Update to DRM CVS as of 2006-04-09. The most notable new feature is the updated

Radeon memmap code, which with a new DDX driver and DRI drivers should fix
long-term stability issues with Radeons.  Also adds support for r200's
ATI_fragment_shader, r300 texrect support and texture caching fixes, i915
vblank support and bugfixes, and new PCI IDs.
This commit is contained in:
Eric Anholt 2006-04-09 20:45:45 +00:00
parent 43293c6937
commit 72ab0eff0f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=157617
21 changed files with 1266 additions and 770 deletions

View File

@ -9,9 +9,11 @@ for i in `ls *.[ch]`; do
done
cp /usr/src/drm/bsd-core/*.[ch] .
rm i810*.[ch]
rm -f i810*.[ch]
rm via*.[ch]
(cd /usr/src/drm/bsd-core/ && make drm_pciids.h)
# Replace drm_pciids.h with one with a $FreeBSD$
line=`grep \\\$FreeBSD drm_pciids.h.cvs`
rm -f drm_pciids.h

View File

@ -123,7 +123,7 @@ __FBSDID("$FreeBSD$");
#if defined(__linux__) || defined(__NetBSD__)
#define DRM_MAJOR 226
#endif
#define DRM_MAX_MINOR 255
#define DRM_MAX_MINOR 15
#endif
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */

View File

@ -45,16 +45,6 @@ __FBSDID("$FreeBSD$");
static int
drm_device_find_capability(drm_device_t *dev, int cap)
{
int ret;
if (dev->driver.device_is_agp != NULL) {
ret = (*dev->driver.device_is_agp)(dev);
if (ret != DRM_MIGHT_BE_AGP) {
return ret == 2;
}
}
#ifdef __FreeBSD__
#if __FreeBSD_version >= 700010
@ -104,7 +94,7 @@ int drm_device_is_agp(drm_device_t *dev)
* AGP, 2 = fall back to PCI capability
*/
ret = (*dev->driver.device_is_agp)(dev);
if (ret != 2)
if (ret != DRM_MIGHT_BE_AGP)
return ret;
}

View File

@ -6,94 +6,136 @@
Please contact dri-devel@lists.sf.net to add new cards to this list
*/
#define radeon_PCI_IDS \
{0x1002, 0x4136, CHIP_RS100|CHIP_IS_IGP, "ATI Radeon RS100 IGP 320M"}, \
{0x1002, 0x4137, CHIP_RS200|CHIP_IS_IGP, "ATI Radeon RS200 IGP"}, \
{0x1002, 0x4144, CHIP_R300, "ATI Radeon AD 9500 Pro"}, \
{0x1002, 0x3150, CHIP_RV380|CHIP_IS_MOBILITY, "ATI Radeon Mobility X600 M24"}, \
{0x1002, 0x3152, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon Mobility X300 M24"}, \
{0x1002, 0x3154, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI FireGL M24 GL"}, \
{0x1002, 0x3E50, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI Radeon RV380 X600"}, \
{0x1002, 0x3E54, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI FireGL V3200 RV380"}, \
{0x1002, 0x4136, CHIP_RS100|CHIP_IS_IGP, "ATI Radeon RS100 IGP 320"}, \
{0x1002, 0x4137, CHIP_RS200|CHIP_IS_IGP, "ATI Radeon RS200 IGP 340"}, \
{0x1002, 0x4144, CHIP_R300, "ATI Radeon AD 9500"}, \
{0x1002, 0x4145, CHIP_R300, "ATI Radeon AE 9700 Pro"}, \
{0x1002, 0x4146, CHIP_R300, "ATI Radeon AF 9700 Pro"}, \
{0x1002, 0x4147, CHIP_R300, "ATI FireGL AG Z1/X1"}, \
{0x1002, 0x4146, CHIP_R300, "ATI Radeon AF R300 9600TX"}, \
{0x1002, 0x4147, CHIP_R300, "ATI FireGL AG Z1"}, \
{0x1002, 0x4148, CHIP_R350, "ATI Radeon AH 9800 SE"}, \
{0x1002, 0x4149, CHIP_R350, "ATI Radeon AI 9800"}, \
{0x1002, 0x414A, CHIP_R350, "ATI Radeon AJ 9800"}, \
{0x1002, 0x414B, CHIP_R350, "ATI FireGL AK X2"}, \
{0x1002, 0x4150, CHIP_RV350, "ATI Radeon AP 9600"}, \
{0x1002, 0x4151, CHIP_RV350, "ATI Radeon AQ 9600"}, \
{0x1002, 0x4152, CHIP_RV350, "ATI Radeon AR 9600"}, \
{0x1002, 0x4153, CHIP_RV350, "ATI Radeon AS 9600 AS"}, \
{0x1002, 0x4151, CHIP_RV350, "ATI Radeon AQ 9600 SE"}, \
{0x1002, 0x4152, CHIP_RV350, "ATI Radeon AR 9600 XT"}, \
{0x1002, 0x4153, CHIP_RV350, "ATI Radeon AS 9550"}, \
{0x1002, 0x4154, CHIP_RV350, "ATI FireGL AT T2"}, \
{0x1002, 0x4156, CHIP_RV350, "ATI FireGL AV T2"}, \
{0x1002, 0x4237, CHIP_RS250|CHIP_IS_IGP, "ATI Radeon RS250 IGP"}, \
{0x1002, 0x4155, CHIP_RV350, "ATI Radeon 9650"}, \
{0x1002, 0x4156, CHIP_RV350, "ATI FireGL AV RV360 T2"}, \
{0x1002, 0x4237, CHIP_RS200|CHIP_IS_IGP, "ATI Radeon RS250 IGP"}, \
{0x1002, 0x4242, CHIP_R200, "ATI Radeon BB R200 AIW 8500DV"}, \
{0x1002, 0x4243, CHIP_R200, "ATI Radeon BC R200"}, \
{0x1002, 0x4336, CHIP_RS100|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS100 Mobility U1"}, \
{0x1002, 0x4337, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS200 Mobility IGP 340M"}, \
{0x1002, 0x4437, CHIP_RS250|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS250 Mobility IGP"}, \
{0x1002, 0x4964, CHIP_R250, "ATI Radeon Id R250 9000"}, \
{0x1002, 0x4965, CHIP_R250, "ATI Radeon Ie R250 9000"}, \
{0x1002, 0x4966, CHIP_R250, "ATI Radeon If R250 9000"}, \
{0x1002, 0x4967, CHIP_R250, "ATI Radeon Ig R250 9000"}, \
{0x1002, 0x4A49, CHIP_R420, "ATI Radeon JI R420 X800PRO"}, \
{0x1002, 0x4A4B, CHIP_R420, "ATI Radeon JK R420 X800 XT"}, \
{0x1002, 0x4437, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS250 Mobility IGP"}, \
{0x1002, 0x4966, CHIP_RV250, "ATI Radeon If RV250 9000"}, \
{0x1002, 0x4967, CHIP_RV250, "ATI Radeon Ig RV250 9000"}, \
{0x1002, 0x4A48, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JH R420 X800"}, \
{0x1002, 0x4A49, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JI R420 X800 Pro"}, \
{0x1002, 0x4A4A, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JJ R420 X800 SE"}, \
{0x1002, 0x4A4B, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JK R420 X800 XT"}, \
{0x1002, 0x4A4C, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JL R420 X800"}, \
{0x1002, 0x4A4D, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL JM X3-256"}, \
{0x1002, 0x4A4E, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon JN R420 Mobility M18"}, \
{0x1002, 0x4A4F, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JO R420 X800 SE"}, \
{0x1002, 0x4A50, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JP R420 X800 XT PE"}, \
{0x1002, 0x4A54, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JT R420 AIW X800 VE"}, \
{0x1002, 0x4B49, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R481 X850 XT"}, \
{0x1002, 0x4B4A, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R481 X850 SE"}, \
{0x1002, 0x4B4B, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R481 X850 Pro"}, \
{0x1002, 0x4B4C, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R481 X850 XT PE"}, \
{0x1002, 0x4C57, CHIP_RV200|CHIP_IS_MOBILITY, "ATI Radeon LW RV200 Mobility 7500 M7"}, \
{0x1002, 0x4C58, CHIP_RV200|CHIP_IS_MOBILITY, "ATI Radeon LX RV200 Mobility FireGL 7800 M7"}, \
{0x1002, 0x4C59, CHIP_RV100|CHIP_IS_MOBILITY, "ATI Radeon LY RV100 Mobility M6"}, \
{0x1002, 0x4C5A, CHIP_RV100|CHIP_IS_MOBILITY, "ATI Radeon LZ RV100 Mobility M6"}, \
{0x1002, 0x4C64, CHIP_R250|CHIP_IS_MOBILITY, "ATI Radeon Ld R250 Mobility 9000 M9"}, \
{0x1002, 0x4C65, CHIP_R250|CHIP_IS_MOBILITY, "ATI Radeon Le R250 Mobility 9000 M9"}, \
{0x1002, 0x4C66, CHIP_R250|CHIP_IS_MOBILITY, "ATI Radeon Lf R250 Mobility 9000 M9"}, \
{0x1002, 0x4C67, CHIP_R250|CHIP_IS_MOBILITY, "ATI Radeon Lg R250 Mobility 9000 M9"}, \
{0x1002, 0x4C64, CHIP_RV250|CHIP_IS_MOBILITY, "ATI Radeon Ld RV250 Mobility 9000 M9"}, \
{0x1002, 0x4C66, CHIP_RV250, "ATI Radeon Lf RV250 Mobility 9000 M9 / FireMV 2400 PCI"}, \
{0x1002, 0x4C67, CHIP_RV250|CHIP_IS_MOBILITY, "ATI Radeon Lg RV250 Mobility 9000 M9"}, \
{0x1002, 0x4E44, CHIP_R300, "ATI Radeon ND R300 9700 Pro"}, \
{0x1002, 0x4E45, CHIP_R300, "ATI Radeon NE R300 9500 Pro"}, \
{0x1002, 0x4E46, CHIP_RV350, "ATI Radeon NF RV350 9600"}, \
{0x1002, 0x4E45, CHIP_R300, "ATI Radeon NE R300 9500 Pro / 9700"}, \
{0x1002, 0x4E46, CHIP_R300, "ATI Radeon NF R300 9600TX"}, \
{0x1002, 0x4E47, CHIP_R300, "ATI Radeon NG R300 FireGL X1"}, \
{0x1002, 0x4E48, CHIP_R350, "ATI Radeon NH R350 9800 Pro"}, \
{0x1002, 0x4E49, CHIP_R350, "ATI Radeon NI R350 9800"}, \
{0x1002, 0x4E4A, CHIP_RV350, "ATI Radeon NJ RV350 9800 XT"}, \
{0x1002, 0x4E4B, CHIP_R350, "ATI Radeon NK R350 FireGL X2"}, \
{0x1002, 0x4E50, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV300 Mobility 9600 M10"}, \
{0x1002, 0x4E4A, CHIP_R350, "ATI Radeon NJ R360 9800 XT"}, \
{0x1002, 0x4E4B, CHIP_R350, "ATI FireGL NK X2"}, \
{0x1002, 0x4E50, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NP"}, \
{0x1002, 0x4E51, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NQ"}, \
{0x1002, 0x4E54, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon FireGL T2 128"}, \
{0x1002, 0x4E56, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon FireGL Mobility T2e"}, \
{0x1002, 0x4E52, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M11 NR"}, \
{0x1002, 0x4E53, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NS"}, \
{0x1002, 0x4E54, CHIP_RV350|CHIP_IS_MOBILITY, "ATI FireGL T2/T2e"}, \
{0x1002, 0x4E56, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon Mobility 9550"}, \
{0x1002, 0x5144, CHIP_R100|CHIP_SINGLE_CRTC, "ATI Radeon QD R100"}, \
{0x1002, 0x5145, CHIP_R100|CHIP_SINGLE_CRTC, "ATI Radeon QE R100"}, \
{0x1002, 0x5146, CHIP_R100|CHIP_SINGLE_CRTC, "ATI Radeon QF R100"}, \
{0x1002, 0x5147, CHIP_R100|CHIP_SINGLE_CRTC, "ATI Radeon QG R100"}, \
{0x1002, 0x5148, CHIP_R200, "ATI Radeon QH R200 8500"}, \
{0x1002, 0x5149, CHIP_R200, "ATI Radeon QI R200"}, \
{0x1002, 0x514A, CHIP_R200, "ATI Radeon QJ R200"}, \
{0x1002, 0x514B, CHIP_R200, "ATI Radeon QK R200"}, \
{0x1002, 0x514C, CHIP_R200, "ATI Radeon QL R200 8500 LE"}, \
{0x1002, 0x514D, CHIP_R200, "ATI Radeon QM R200 9100"}, \
{0x1002, 0x514E, CHIP_R200, "ATI Radeon QN R200 8500 LE"}, \
{0x1002, 0x514F, CHIP_R200, "ATI Radeon QO R200 8500 LE"}, \
{0x1002, 0x5157, CHIP_RV200, "ATI Radeon QW RV200 7500"}, \
{0x1002, 0x5158, CHIP_RV200, "ATI Radeon QX RV200 7500"}, \
{0x1002, 0x5159, CHIP_RV100, "ATI Radeon QY RV100 7000/VE"}, \
{0x1002, 0x515A, CHIP_RV100, "ATI Radeon QZ RV100 7000/VE"}, \
{0x1002, 0x515E, CHIP_RV100, "ATI ES1000 RN50"}, \
{0x1002, 0x5168, CHIP_R200, "ATI Radeon Qh R200"}, \
{0x1002, 0x5169, CHIP_R200, "ATI Radeon Qi R200"}, \
{0x1002, 0x516A, CHIP_R200, "ATI Radeon Qj R200"}, \
{0x1002, 0x516B, CHIP_R200, "ATI Radeon Qk R200"}, \
{0x1002, 0x516C, CHIP_R200, "ATI Radeon Ql R200"}, \
{0x1002, 0x5460, CHIP_RV350, "ATI Radeon X300"}, \
{0x1002, 0x554F, CHIP_R350, "ATI Radeon X800"}, \
{0x1002, 0x5834, CHIP_RS300|CHIP_IS_IGP, "ATI Radeon RS300 IGP"}, \
{0x1002, 0x5460, CHIP_RV380|CHIP_IS_MOBILITY, "ATI Radeon Mobility X300 M22"}, \
{0x1002, 0x5462, CHIP_RV380|CHIP_IS_MOBILITY, "ATI Radeon Mobility X600 SE M24C"}, \
{0x1002, 0x5464, CHIP_RV380|CHIP_IS_MOBILITY, "ATI FireGL M22 GL 5464"}, \
{0x1002, 0x5548, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800"}, \
{0x1002, 0x5549, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800 Pro"}, \
{0x1002, 0x554A, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800 XT PE"}, \
{0x1002, 0x554B, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800 SE"}, \
{0x1002, 0x554C, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R430 X800 XTP"}, \
{0x1002, 0x554D, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R430 X800 XL"}, \
{0x1002, 0x554E, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R430 X800 SE"}, \
{0x1002, 0x554F, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R430 X800"}, \
{0x1002, 0x5550, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL V7100 R423"}, \
{0x1002, 0x5551, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL V5100 R423 UQ"}, \
{0x1002, 0x5552, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL unknown R423 UR"}, \
{0x1002, 0x5554, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL unknown R423 UT"}, \
{0x1002, 0x564A, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \
{0x1002, 0x564B, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \
{0x1002, 0x564F, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon Mobility X700 XL M26"}, \
{0x1002, 0x5652, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \
{0x1002, 0x5653, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \
{0x1002, 0x5834, CHIP_RS300|CHIP_IS_IGP, "ATI Radeon RS300 9100 IGP"}, \
{0x1002, 0x5835, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS300 Mobility IGP"}, \
{0x1002, 0x5836, CHIP_RS300|CHIP_IS_IGP, "ATI Radeon RS300 IGP"}, \
{0x1002, 0x5837, CHIP_RS300|CHIP_IS_IGP, "ATI Radeon RS300 IGP"}, \
{0x1002, 0x5960, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5961, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
{0x1002, 0x5960, CHIP_RV280, "ATI Radeon RV280 9250"}, \
{0x1002, 0x5961, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5962, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5963, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5964, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
{0x1002, 0x5968, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5965, CHIP_RV280, "ATI FireMV 2200 PCI"}, \
{0x1002, 0x5969, CHIP_RV100, "ATI ES1000 RN50"}, \
{0x1002, 0x596A, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x596B, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5b60, CHIP_RV350, "ATI Radeon RV370 X300SE"}, \
{0x1002, 0x5b60, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI Radeon RV370 X300 SE"}, \
{0x1002, 0x5b62, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI Radeon RV370 X600 Pro"}, \
{0x1002, 0x5b63, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI Radeon RV370 X550"}, \
{0x1002, 0x5b64, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI FireGL V3100 (RV370) 5B64"}, \
{0x1002, 0x5b65, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI FireMV 2200 PCIE (RV370) 5B65"}, \
{0x1002, 0x5c61, CHIP_RV280|CHIP_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
{0x1002, 0x5c62, CHIP_RV280, "ATI Radeon RV280"}, \
{0x1002, 0x5c63, CHIP_RV280|CHIP_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
{0x1002, 0x5c64, CHIP_RV280, "ATI Radeon RV280"}, \
{0x1002, 0x5d4d, CHIP_R350, "ATI Radeon R480"}, \
{0x1002, 0x5e4b, CHIP_R420, "ATI Radeon RV410 X700PRO"}, \
{0x1002, 0x5d48, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility Radeon X800 XT M28"}, \
{0x1002, 0x5d49, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility FireGL V5100 M28"}, \
{0x1002, 0x5d4a, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility Radeon X800 M28"}, \
{0x1002, 0x5d4c, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850"}, \
{0x1002, 0x5d4d, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850 XT PE"}, \
{0x1002, 0x5d4e, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850 SE"}, \
{0x1002, 0x5d4f, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850 Pro"}, \
{0x1002, 0x5d50, CHIP_R420|CHIP_NEW_MEMMAP, "ATI unknown Radeon / FireGL R480"}, \
{0x1002, 0x5d52, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850 XT"}, \
{0x1002, 0x5d57, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800 XT"}, \
{0x1002, 0x5e48, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI FireGL V5000 RV410"}, \
{0x1002, 0x5e4a, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700 XT"}, \
{0x1002, 0x5e4b, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700 Pro"}, \
{0x1002, 0x5e4c, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
{0x1002, 0x5e4d, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700"}, \
{0x1002, 0x5e4f, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
{0x1002, 0x7834, CHIP_RS300|CHIP_IS_IGP|CHIP_NEW_MEMMAP, "ATI Radeon RS350 9000/9100 IGP"}, \
{0x1002, 0x7835, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon RS350 Mobility IGP"}, \
{0, 0, 0, NULL}
#define r128_PCI_IDS \
@ -245,6 +287,7 @@
{0x8086, 0x2582, 0, "Intel i915G"}, \
{0x8086, 0x2592, 0, "Intel i915GM"}, \
{0x8086, 0x2772, 0, "Intel i945G"}, \
{0x8086, 0x27A2, 0, "Intel i945GM"}, \
{0, 0, 0, NULL}
#define imagine_PCI_IDS \

View File

@ -347,18 +347,20 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
int i;
RING_LOCALS;
if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
return DRM_ERR(EINVAL);
BEGIN_LP_RING(((dwords+1)&~1));
for (i = 0; i < dwords;) {
int cmd, sz;
if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
return DRM_ERR(EINVAL);
/* printk("%d/%d ", i, dwords); */
if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
return DRM_ERR(EINVAL);
BEGIN_LP_RING(sz);
OUT_RING(cmd);
while (++i, --sz) {
@ -368,9 +370,13 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
}
OUT_RING(cmd);
}
ADVANCE_LP_RING();
}
if (dwords & 1)
OUT_RING(0);
ADVANCE_LP_RING();
return 0;
}
@ -404,6 +410,22 @@ static int i915_emit_box(drm_device_t * dev,
return 0;
}
static void i915_emit_breadcrumb(drm_device_t *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
static int i915_dispatch_cmdbuffer(drm_device_t * dev,
drm_i915_cmdbuffer_t * cmd)
{
@ -432,6 +454,7 @@ static int i915_dispatch_cmdbuffer(drm_device_t * dev,
return ret;
}
i915_emit_breadcrumb( dev );
return 0;
}
@ -476,15 +499,7 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev,
}
}
dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
i915_emit_breadcrumb( dev );
return 0;
}
@ -667,8 +682,11 @@ static int i915_getparam(DRM_IOCTL_ARGS)
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
break;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
break;
default:
DRM_ERROR("Unkown parameter %d\n", param.param);
DRM_ERROR("Unknown parameter %d\n", param.param);
return DRM_ERR(EINVAL);
}
@ -756,7 +774,8 @@ drm_ioctl_desc_t i915_ioctls[] = {
[DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
[DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
[DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}
[DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
[DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

View File

@ -77,6 +77,30 @@ typedef struct _drm_i915_sarea {
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
int width, height; /* screen size in pixels */
drm_handle_t front_handle;
int front_offset;
int front_size;
drm_handle_t back_handle;
int back_offset;
int back_size;
drm_handle_t depth_handle;
int depth_offset;
int depth_size;
drm_handle_t tex_handle;
int tex_offset;
int tex_size;
int log_tex_granularity;
int pitch;
int rotation; /* 0, 90, 180 or 270 */
int rotated_offset;
int rotated_size;
int rotated_pitch;
int virtualX, virtualY;
} drm_i915_sarea_t;
/* Flags for perf_boxes
@ -102,6 +126,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_FREE 0x09
#define DRM_I915_INIT_HEAP 0x0a
#define DRM_I915_CMDBUFFER 0x0b
#define DRM_I915_DESTROY_HEAP 0x0c
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@ -115,6 +140,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@ -154,6 +181,7 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
typedef struct drm_i915_getparam {
int param;
@ -193,4 +221,11 @@ typedef struct drm_i915_mem_init_heap {
int start;
} drm_i915_mem_init_heap_t;
/* Allow memory manager to be torn down and re-initialized (eg on
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
int region;
} drm_i915_mem_destroy_heap_t;
#endif /* _I915_DRM_H_ */

View File

@ -40,15 +40,17 @@ __FBSDID("$FreeBSD$");
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20041217"
#define DRIVER_DATE "20060119"
/* Interface history:
*
* 1.1: Original.
* 1.2: Add Power Management
* 1.3: Add vblank support
* 1.4: Fix cmdbuffer path, add heap destroy
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
#define DRIVER_MINOR 4
#define DRIVER_PATCHLEVEL 0
typedef struct _drm_i915_ring_buffer {
@ -115,6 +117,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_irq_emit(DRM_IOCTL_ARGS);
extern int i915_irq_wait(DRM_IOCTL_ARGS);
extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(drm_device_t * dev);
extern void i915_driver_irq_postinstall(drm_device_t * dev);
@ -124,6 +127,7 @@ extern void i915_driver_irq_uninstall(drm_device_t * dev);
extern int i915_mem_alloc(DRM_IOCTL_ARGS);
extern int i915_mem_free(DRM_IOCTL_ARGS);
extern int i915_mem_init_heap(DRM_IOCTL_ARGS);
extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(drm_device_t * dev,
DRMFILE filp, struct mem_block *heap);
@ -265,4 +269,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5])
#endif

View File

@ -34,9 +34,11 @@ __FBSDID("$FreeBSD$");
#include "dev/drm/i915_drm.h"
#include "dev/drm/i915_drv.h"
#define USER_INT_FLAG 0x2
#define USER_INT_FLAG (1<<1)
#define VSYNC_PIPEB_FLAG (1<<5)
#define VSYNC_PIPEA_FLAG (1<<7)
#define MAX_NOPID ((u32)~0)
#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5])
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
@ -45,7 +47,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
u16 temp;
temp = I915_READ16(I915REG_INT_IDENTITY_R);
temp &= USER_INT_FLAG;
temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG);
DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
@ -53,7 +56,17 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
DRM_WAKEUP(&dev_priv->irq_queue);
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
if (temp & USER_INT_FLAG)
DRM_WAKEUP(&dev_priv->irq_queue);
if (temp & VSYNC_PIPEA_FLAG) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
}
return IRQ_HANDLED;
}
@ -104,6 +117,26 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
return ret;
}
int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1<<23)));
*sequence = cur_vblank;
return ret;
}
/* Needs the lock as it touches the ring.
*/
int i915_irq_emit(DRM_IOCTL_ARGS)
@ -167,17 +200,21 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG);
I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | VSYNC_PIPEA_FLAG);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
}
void i915_driver_irq_uninstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 temp;
if (!dev_priv)
return;
I915_WRITE16(I915REG_HWSTAM, 0xffff);
I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
temp = I915_READ16(I915REG_INT_IDENTITY_R);
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
}

View File

@ -368,3 +368,33 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS)
return init_heap(heap, initheap.start, initheap.size);
}
int i915_mem_destroy_heap( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_destroy_heap_t destroyheap;
struct mem_block **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data,
sizeof(destroyheap) );
heap = get_heap( dev_priv, destroyheap.region );
if (!heap) {
DRM_ERROR("get_heap failed");
return DRM_ERR(EFAULT);
}
if (!*heap) {
DRM_ERROR("heap not initialized?");
return DRM_ERR(EFAULT);
}
i915_mem_takedown( heap );
return 0;
}

View File

@ -41,11 +41,11 @@ __FBSDID("$FreeBSD$");
#define DRIVER_NAME "mga"
#define DRIVER_DESC "Matrox G200/G400"
#define DRIVER_DATE "20051102"
#define DRIVER_DATE "20060319"
#define DRIVER_MAJOR 3
#define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 1
#define DRIVER_PATCHLEVEL 2
typedef struct drm_mga_primary_buffer {
u8 *start;

View File

@ -40,7 +40,6 @@ __FBSDID("$FreeBSD$");
#include "dev/drm/radeon_drv.h"
#include "dev/drm/r300_reg.h"
#define R300_SIMULTANEOUS_CLIPRECTS 4
/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
@ -52,14 +51,12 @@ static const int r300_cliprect_cntl[4] = {
0xFFFE
};
/**
* Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
* buffer, starting with index n.
*/
static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
int n)
static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf, int n)
{
drm_clip_rect_t box;
int nr;
@ -73,38 +70,47 @@ static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
DRM_DEBUG("%i cliprects\n", nr);
if (nr) {
BEGIN_RING(6 + nr*2);
OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) );
BEGIN_RING(6 + nr * 2);
OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
for(i = 0; i < nr; ++i) {
if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) {
for (i = 0; i < nr; ++i) {
if (DRM_COPY_FROM_USER_UNCHECKED
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return DRM_ERR(EFAULT);
}
box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.x1 =
(box.x1 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y1 =
(box.y1 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.x2 =
(box.x2 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y2 =
(box.y2 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
(box.y2 << R300_CLIPRECT_Y_SHIFT));
(box.y2 << R300_CLIPRECT_Y_SHIFT));
}
OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] );
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
/* TODO/SECURITY: Force scissors to a safe value, otherwise the
* client might be able to trample over memory.
* The impact should be very limited, but I'd rather be safe than
* sorry.
*/
OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) );
OUT_RING( 0 );
OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK );
* client might be able to trample over memory.
* The impact should be very limited, but I'd rather be safe than
* sorry.
*/
OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
OUT_RING(0);
OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
ADVANCE_RING();
} else {
} else {
/* Why we allow zero cliprect rendering:
* There are some commands in a command buffer that must be submitted
* even when there are no cliprects, e.g. DMA buffer discard
@ -121,28 +127,27 @@ static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
* can't produce any fragments.
*/
BEGIN_RING(2);
OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 );
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
ADVANCE_RING();
}
}
return 0;
}
u8 r300_reg_flags[0x10000>>2];
static u8 r300_reg_flags[0x10000 >> 2];
void r300_init_reg_flags(void)
{
int i;
memset(r300_reg_flags, 0, 0x10000>>2);
#define ADD_RANGE_MARK(reg, count,mark) \
memset(r300_reg_flags, 0, 0x10000 >> 2);
#define ADD_RANGE_MARK(reg, count,mark) \
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
r300_reg_flags[i]|=(mark);
#define MARK_SAFE 1
#define MARK_CHECK_OFFSET 2
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
#define MARK_SAFE 1
#define MARK_CHECK_OFFSET 2
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
@ -159,6 +164,7 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5);
ADD_RANGE(R300_TX_CNTL, 1);
ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1);
@ -196,27 +202,28 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_RB3D_CBLEND, 2);
ADD_RANGE(R300_RB3D_COLORMASK, 1);
ADD_RANGE(0x4E10, 3);
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
ADD_RANGE(0x4F10, 4);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
ADD_RANGE(0x4F28, 1);
ADD_RANGE(0x4F30, 2);
ADD_RANGE(0x4F44, 1);
ADD_RANGE(0x4F54, 1);
ADD_RANGE(R300_TX_FILTER_0, 16);
ADD_RANGE(R300_TX_UNK1_0, 16);
ADD_RANGE(R300_TX_FILTER1_0, 16);
ADD_RANGE(R300_TX_SIZE_0, 16);
ADD_RANGE(R300_TX_FORMAT_0, 16);
/* Texture offset is dangerous and needs more checking */
ADD_RANGE(R300_TX_PITCH_0, 16);
/* Texture offset is dangerous and needs more checking */
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
ADD_RANGE(R300_TX_UNK4_0, 16);
ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
/* Sporadic registers used as primitives are emitted */
@ -227,33 +234,43 @@ void r300_init_reg_flags(void)
}
static __inline__ int r300_check_range(unsigned reg, int count)
static __inline__ int r300_check_range(unsigned reg, int count)
{
int i;
if(reg & ~0xffff)return -1;
for(i=(reg>>2);i<(reg>>2)+count;i++)
if(r300_reg_flags[i]!=MARK_SAFE)return 1;
if (reg & ~0xffff)
return -1;
for (i = (reg >> 2); i < (reg >> 2) + count; i++)
if (r300_reg_flags[i] != MARK_SAFE)
return 1;
return 0;
}
/* we expect offsets passed to the framebuffer to be either within video memory or
within AGP space */
static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset)
/*
* we expect offsets passed to the framebuffer to be either within video
* memory or within AGP space
*/
static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
u32 offset)
{
/* we realy want to check against end of video aperture
but this value is not being kept.
This code is correct for now (does the same thing as the
code that sets MC_FB_LOCATION) in radeon_cp.c */
if((offset>=dev_priv->fb_location) &&
(offset<dev_priv->gart_vm_start))return 0;
if((offset>=dev_priv->gart_vm_start) &&
(offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0;
but this value is not being kept.
This code is correct for now (does the same thing as the
code that sets MC_FB_LOCATION) in radeon_cp.c */
if (offset >= dev_priv->fb_location &&
offset < (dev_priv->fb_location + dev_priv->fb_size))
return 0;
if (offset >= dev_priv->gart_vm_start &&
offset < (dev_priv->gart_vm_start + dev_priv->gart_size))
return 0;
return 1;
}
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
dev_priv,
drm_radeon_kcmd_buffer_t
* cmdbuf,
drm_r300_cmd_header_t
header)
{
int reg;
int sz;
@ -263,35 +280,40 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t*
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if((sz>64)||(sz<0)){
DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz);
if ((sz > 64) || (sz < 0)) {
DRM_ERROR
("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
reg, sz);
return DRM_ERR(EINVAL);
}
for(i=0;i<sz;i++){
values[i]=((int __user*)cmdbuf->buf)[i];
switch(r300_reg_flags[(reg>>2)+i]){
}
for (i = 0; i < sz; i++) {
values[i] = ((int *)cmdbuf->buf)[i];
switch (r300_reg_flags[(reg >> 2) + i]) {
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
if(r300_check_offset(dev_priv, (u32)values[i])){
DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz);
if (r300_check_offset(dev_priv, (u32) values[i])) {
DRM_ERROR
("Offset failed range check (reg=%04x sz=%d)\n",
reg, sz);
return DRM_ERR(EINVAL);
}
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]);
DRM_ERROR("Register %04x failed check as flag=%02x\n",
reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
return DRM_ERR(EINVAL);
}
}
BEGIN_RING(1+sz);
OUT_RING( CP_PACKET0( reg, sz-1 ) );
OUT_RING_TABLE( values, sz );
}
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
OUT_RING_TABLE(values, sz);
ADVANCE_RING();
cmdbuf->buf += sz*4;
cmdbuf->bufsz -= sz*4;
cmdbuf->buf += sz * 4;
cmdbuf->bufsz -= sz * 4;
return 0;
}
@ -302,9 +324,9 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t*
*
* Note that checks are performed on contents and addresses of the registers
*/
static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int reg;
int sz;
@ -316,39 +338,40 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv,
if (!sz)
return 0;
if (sz*4 > cmdbuf->bufsz)
if (sz * 4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
if (reg+sz*4 >= 0x10000){
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz);
return DRM_ERR(EINVAL);
}
if(r300_check_range(reg, sz)){
if (reg + sz * 4 >= 0x10000) {
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
sz);
return DRM_ERR(EINVAL);
}
if (r300_check_range(reg, sz)) {
/* go and check everything */
return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header);
}
return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
header);
}
/* the rest of the data is safe to emit, whatever the values the user passed */
BEGIN_RING(1+sz);
OUT_RING( CP_PACKET0( reg, sz-1 ) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz );
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz);
ADVANCE_RING();
cmdbuf->buf += sz*4;
cmdbuf->bufsz -= sz*4;
cmdbuf->buf += sz * 4;
cmdbuf->bufsz -= sz * 4;
return 0;
}
/**
* Uploads user-supplied vertex program instructions or parameters onto
* the graphics card.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int sz;
@ -360,104 +383,156 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv,
if (!sz)
return 0;
if (sz*16 > cmdbuf->bufsz)
if (sz * 16 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
BEGIN_RING(5+sz*4);
BEGIN_RING(5 + sz * 4);
/* Wait for VAP to come to senses.. */
/* there is no need to emit it multiple times, (only once before VAP is programmed,
but this optimization is for later */
OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 );
OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr );
OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 );
OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
ADVANCE_RING();
cmdbuf->buf += sz*16;
cmdbuf->bufsz -= sz*16;
cmdbuf->buf += sz * 16;
cmdbuf->bufsz -= sz * 16;
return 0;
}
/**
* Emit a clear packet from userspace.
* Called by r300_emit_packet3.
*/
static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
RING_LOCALS;
if (8*4 > cmdbuf->bufsz)
if (8 * 4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
BEGIN_RING(10);
OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
(1<<R300_PRIM_NUM_VERTICES_SHIFT) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 );
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
(1 << R300_PRIM_NUM_VERTICES_SHIFT));
OUT_RING_TABLE((int *)cmdbuf->buf, 8);
ADVANCE_RING();
cmdbuf->buf += 8*4;
cmdbuf->bufsz -= 8*4;
cmdbuf->buf += 8 * 4;
cmdbuf->bufsz -= 8 * 4;
return 0;
}
static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
u32 header)
static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
u32 header)
{
int count, i,k;
#define MAX_ARRAY_PACKET 64
int count, i, k;
#define MAX_ARRAY_PACKET 64
u32 payload[MAX_ARRAY_PACKET];
u32 narrays;
RING_LOCALS;
count=(header>>16) & 0x3fff;
if((count+1)>MAX_ARRAY_PACKET){
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count);
count = (header >> 16) & 0x3fff;
if ((count + 1) > MAX_ARRAY_PACKET) {
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
return DRM_ERR(EINVAL);
}
memset(payload, 0, MAX_ARRAY_PACKET*4);
memcpy(payload, cmdbuf->buf+4, (count+1)*4);
}
memset(payload, 0, MAX_ARRAY_PACKET * 4);
memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
/* carefully check packet contents */
narrays=payload[0];
k=0;
i=1;
while((k<narrays) && (i<(count+1))){
i++; /* skip attribute field */
if(r300_check_offset(dev_priv, payload[i])){
DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
narrays = payload[0];
k = 0;
i = 1;
while ((k < narrays) && (i < (count + 1))) {
i++; /* skip attribute field */
if (r300_check_offset(dev_priv, payload[i])) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
return DRM_ERR(EINVAL);
}
}
k++;
i++;
if(k==narrays)break;
if (k == narrays)
break;
/* have one more to process, they come in pairs */
if(r300_check_offset(dev_priv, payload[i])){
DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
if (r300_check_offset(dev_priv, payload[i])) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
return DRM_ERR(EINVAL);
}
}
k++;
i++;
}
i++;
}
/* do the counts match what we expect ? */
if((k!=narrays) || (i!=(count+1))){
DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1);
if ((k != narrays) || (i != (count + 1))) {
DRM_ERROR
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
k, i, narrays, count + 1);
return DRM_ERR(EINVAL);
}
}
/* all clear, output packet */
BEGIN_RING(count+2);
BEGIN_RING(count + 2);
OUT_RING(header);
OUT_RING_TABLE(payload, count+1);
OUT_RING_TABLE(payload, count + 1);
ADVANCE_RING();
cmdbuf->buf += (count + 2) * 4;
cmdbuf->bufsz -= (count + 2) * 4;
return 0;
}
static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 *cmd = (u32 *) cmdbuf->buf;
int count, ret;
RING_LOCALS;
count=(cmd[0]>>16) & 0x3fff;
if (cmd[0] & 0x8000) {
u32 offset;
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
ret = r300_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
return DRM_ERR(EINVAL);
}
}
if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[3] << 10;
ret = r300_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
return DRM_ERR(EINVAL);
}
}
}
BEGIN_RING(count+2);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
@ -466,8 +541,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv,
return 0;
}
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 header;
int count;
@ -476,36 +551,40 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
if (4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
/* Fixme !! This simply emits a packet without much checking.
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
/* obtain first word - actual packet3 header */
header = *(u32 __user*)cmdbuf->buf;
header = *(u32 *) cmdbuf->buf;
/* Is it packet 3 ? */
if( (header>>30)!=0x3 ) {
if ((header >> 30) != 0x3) {
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
return DRM_ERR(EINVAL);
}
}
count=(header>>16) & 0x3fff;
count = (header >> 16) & 0x3fff;
/* Check again now that we know how much data to expect */
if ((count+2)*4 > cmdbuf->bufsz){
DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n",
(count+2)*4, cmdbuf->bufsz);
if ((count + 2) * 4 > cmdbuf->bufsz) {
DRM_ERROR
("Expected packet3 of length %d but have only %d bytes left\n",
(count + 2) * 4, cmdbuf->bufsz);
return DRM_ERR(EINVAL);
}
}
/* Is it a packet type we know about ? */
switch(header & 0xff00){
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
switch (header & 0xff00) {
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
case RADEON_WAIT_FOR_IDLE:
case RADEON_CP_NOP:
/* these packets are safe */
@ -513,32 +592,30 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
default:
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
return DRM_ERR(EINVAL);
}
}
BEGIN_RING(count+2);
BEGIN_RING(count + 2);
OUT_RING(header);
OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
cmdbuf->buf += (count + 2) * 4;
cmdbuf->bufsz -= (count + 2) * 4;
return 0;
}
/**
* Emit a rendering packet3 from userspace.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int n;
int ret;
char __user* orig_buf = cmdbuf->buf;
char *orig_buf = cmdbuf->buf;
int orig_bufsz = cmdbuf->bufsz;
/* This is a do-while-loop so that we run the interior at least once,
@ -553,16 +630,16 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
cmdbuf->buf = orig_buf;
cmdbuf->bufsz = orig_bufsz;
}
}
switch(header.packet3.packet) {
switch (header.packet3.packet) {
case R300_CMD_PACKET3_CLEAR:
DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
ret = r300_emit_clear(dev_priv, cmdbuf);
if (ret) {
DRM_ERROR("r300_emit_clear failed\n");
return ret;
}
}
break;
case R300_CMD_PACKET3_RAW:
@ -571,18 +648,18 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
if (ret) {
DRM_ERROR("r300_emit_raw_packet3 failed\n");
return ret;
}
}
break;
default:
DRM_ERROR("bad packet3 type %i at %p\n",
header.packet3.packet,
cmdbuf->buf - sizeof(header));
header.packet3.packet,
cmdbuf->buf - sizeof(header));
return DRM_ERR(EINVAL);
}
}
n += R300_SIMULTANEOUS_CLIPRECTS;
} while(n < cmdbuf->nbox);
} while (n < cmdbuf->nbox);
return 0;
}
@ -601,21 +678,20 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
/**
* Emit the sequence to pacify R300.
*/
static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
{
RING_LOCALS;
BEGIN_RING(6);
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
OUT_RING( 0xa );
OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
OUT_RING( 0x3 );
OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
OUT_RING( 0x0 );
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(0xa);
OUT_RING(CP_PACKET0(0x4f18, 0));
OUT_RING(0x3);
OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
OUT_RING(0x0);
ADVANCE_RING();
}
/**
* Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
@ -631,20 +707,76 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
buf->used = 0;
}
static int r300_scratch(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
u32 *ref_age_base;
u32 i, buf_idx, h_pending;
RING_LOCALS;
if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) {
return DRM_ERR(EINVAL);
}
if (header.scratch.reg >= 5) {
return DRM_ERR(EINVAL);
}
dev_priv->scratch_ages[header.scratch.reg] ++;
ref_age_base = *(u32 **)cmdbuf->buf;
cmdbuf->buf += sizeof(uint64_t);
cmdbuf->bufsz -= sizeof(uint64_t);
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = *(u32 *)cmdbuf->buf;
buf_idx *= 2; /* 8 bytes per buf */
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
return DRM_ERR(EINVAL);
}
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
return DRM_ERR(EINVAL);
}
if (h_pending == 0) {
return DRM_ERR(EINVAL);
}
h_pending--;
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
return DRM_ERR(EINVAL);
}
cmdbuf->buf += sizeof(buf_idx);
cmdbuf->bufsz -= sizeof(buf_idx);
}
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ADVANCE_RING();
return 0;
}
/**
* Parses and validates a user-supplied command buffer and emits appropriate
* commands on the DMA ring buffer.
* Called by the ioctl handler function radeon_cp_cmdbuf.
*/
int r300_do_cp_cmdbuf(drm_device_t* dev,
DRMFILE filp,
drm_file_t* filp_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
int r300_do_cp_cmdbuf(drm_device_t *dev,
DRMFILE filp,
drm_file_t *filp_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf = NULL;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf = NULL;
int emit_dispatch_age = 0;
int ret = 0;
@ -658,9 +790,9 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
if (ret)
goto cleanup;
}
}
while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
int idx;
drm_r300_cmd_header_t header;
@ -669,14 +801,14 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
switch(header.header.cmd_type) {
case R300_CMD_PACKET0:
switch (header.header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
ret = r300_emit_packet0(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
goto cleanup;
}
}
break;
case R300_CMD_VPU:
@ -685,7 +817,7 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
if (ret) {
DRM_ERROR("r300_emit_vpu failed\n");
goto cleanup;
}
}
break;
case R300_CMD_PACKET3:
@ -694,26 +826,26 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
if (ret) {
DRM_ERROR("r300_emit_packet3 failed\n");
goto cleanup;
}
}
break;
case R300_CMD_END3D:
DRM_DEBUG("R300_CMD_END3D\n");
/* TODO:
Ideally userspace driver should not need to issue this call,
i.e. the drm driver should issue it automatically and prevent
lockups.
In practice, we do not understand why this call is needed and what
it does (except for some vague guesses that it has to do with cache
coherence) and so the user space driver does it.
Once we are sure which uses prevent lockups the code could be moved
into the kernel and the userspace driver will not
need to use this command.
/* TODO:
Ideally userspace driver should not need to issue this call,
i.e. the drm driver should issue it automatically and prevent
lockups.
Note that issuing this command does not hurt anything
except, possibly, performance */
In practice, we do not understand why this call is needed and what
it does (except for some vague guesses that it has to do with cache
coherence) and so the user space driver does it.
Once we are sure which uses prevent lockups the code could be moved
into the kernel and the userspace driver will not
need to use this command.
Note that issuing this command does not hurt anything
except, possibly, performance */
r300_pacify(dev_priv);
break;
@ -725,7 +857,7 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
RING_LOCALS;
BEGIN_RING(header.delay.count);
for(i=0;i<header.delay.count;i++)
for (i = 0; i < header.delay.count; i++)
OUT_RING(RADEON_CP_PACKET2);
ADVANCE_RING();
}
@ -733,53 +865,63 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
case R300_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
idx = header.dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
idx = header.dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
ret = DRM_ERR(EINVAL);
goto cleanup;
}
buf = dma->buflist[idx];
if (buf->filp != filp || buf->pending) {
DRM_ERROR("bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
ret = DRM_ERR(EINVAL);
goto cleanup;
}
}
buf = dma->buflist[idx];
if (buf->filp != filp || buf->pending) {
DRM_ERROR("bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
ret = DRM_ERR(EINVAL);
goto cleanup;
}
emit_dispatch_age = 1;
r300_discard_buffer(dev, buf);
break;
break;
case R300_CMD_WAIT:
/* simple enough, we can do it here */
DRM_DEBUG("R300_CMD_WAIT\n");
if(header.wait.flags==0)break; /* nothing to do */
if (header.wait.flags == 0)
break; /* nothing to do */
{
RING_LOCALS;
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );
OUT_RING( (header.wait.flags & 0xf)<<14 );
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING((header.wait.flags & 0xf) << 14);
ADVANCE_RING();
}
break;
case R300_CMD_SCRATCH:
DRM_DEBUG("R300_CMD_SCRATCH\n");
ret = r300_scratch(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_scratch failed\n");
goto cleanup;
}
break;
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
header.header.cmd_type,
cmdbuf->buf - sizeof(header));
ret = DRM_ERR(EINVAL);
goto cleanup;
}
}
}
DRM_DEBUG("END\n");
cleanup:
cleanup:
r300_pacify(dev_priv);
/* We emit the vertex buffer age here, outside the pacifier "brackets"
@ -795,10 +937,9 @@ cleanup:
BEGIN_RING(2);
RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
ADVANCE_RING();
}
}
COMMIT_RING();
return ret;
}

View File

@ -457,6 +457,9 @@ I am fairly certain that they are correct unless stated otherwise in comments.
/* END */
/* gap */
/* Zero to flush caches. */
#define R300_TX_CNTL 0x4100
/* The upper enable bits are guessed, based on fglrx reported limits. */
#define R300_TX_ENABLE 0x4104
# define R300_TX_ENABLE_0 (1 << 0)
@ -715,8 +718,22 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
# define R300_TX_MAX_ANISO_MASK (14 << 21)
#define R300_TX_UNK1_0 0x4440
#define R300_TX_FILTER1_0 0x4440
# define R300_CHROMA_KEY_MODE_DISABLE 0
# define R300_CHROMA_KEY_FORCE 1
# define R300_CHROMA_KEY_BLEND 2
# define R300_MC_ROUND_NORMAL (0<<2)
# define R300_MC_ROUND_MPEG4 (1<<2)
# define R300_LOD_BIAS_MASK 0x1fff
# define R300_EDGE_ANISO_EDGE_DIAG (0<<13)
# define R300_EDGE_ANISO_EDGE_ONLY (1<<13)
# define R300_MC_COORD_TRUNCATE_DISABLE (0<<14)
# define R300_MC_COORD_TRUNCATE_MPEG (1<<14)
# define R300_TX_TRI_PERF_0_8 (0<<15)
# define R300_TX_TRI_PERF_1_8 (1<<15)
# define R300_TX_TRI_PERF_1_4 (2<<15)
# define R300_TX_TRI_PERF_3_8 (3<<15)
# define R300_ANISO_THRESHOLD_MASK (7<<17)
#define R300_TX_SIZE_0 0x4480
# define R300_TX_WIDTHMASK_SHIFT 0
@ -726,6 +743,8 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_UNK23 (1 << 23)
# define R300_TX_SIZE_SHIFT 26 /* largest of width, height */
# define R300_TX_SIZE_MASK (15 << 26)
# define R300_TX_SIZE_PROJECTED (1<<30)
# define R300_TX_SIZE_TXPITCH_EN (1<<31)
#define R300_TX_FORMAT_0 0x44C0
/* The interpretation of the format word by Wladimir van der Laan */
/* The X, Y, Z and W refer to the layout of the components.
@ -755,6 +774,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
/* 0x16 - some 16 bit green format.. ?? */
# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
/* gap */
/* Floating point formats */
@ -804,16 +824,19 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_FORMAT_YUV_MODE 0x00800000
#define R300_TX_PITCH_0 0x4500 /* obvious missing in gap */
#define R300_TX_OFFSET_0 0x4540
/* BEGIN: Guess from R200 */
# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
# define R300_TXO_MACRO_TILE (1 << 2)
# define R300_TXO_MICRO_TILE (1 << 3)
# define R300_TXO_OFFSET_MASK 0xffffffe0
# define R300_TXO_OFFSET_SHIFT 5
/* END */
#define R300_TX_UNK4_0 0x4580
#define R300_TX_CHROMA_KEY_0 0x4580 /* 32 bit chroma key */
#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
/* END */
@ -871,7 +894,9 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
# define R300_PFS_NODE_TEX_END_SHIFT 17
# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
# define R300_PFS_NODE_LAST_NODE (1 << 22)
/*# define R300_PFS_NODE_LAST_NODE (1 << 22) */
# define R300_PFS_NODE_OUTPUT_COLOR (1 << 22)
# define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23)
/* TEX
// As far as I can tell, texture instructions cannot write into output
@ -890,6 +915,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
*/
# define R300_FPITX_OPCODE_SHIFT 15
# define R300_FPITX_OP_TEX 1
# define R300_FPITX_OP_KIL 2
# define R300_FPITX_OP_TXP 3
# define R300_FPITX_OP_TXB 4
@ -965,9 +991,11 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI1_SRC2C_CONST (1 << 17)
# define R300_FPI1_DSTC_SHIFT 18
# define R300_FPI1_DSTC_MASK (31 << 18)
# define R300_FPI1_DSTC_REG_MASK_SHIFT 23
# define R300_FPI1_DSTC_REG_X (1 << 23)
# define R300_FPI1_DSTC_REG_Y (1 << 24)
# define R300_FPI1_DSTC_REG_Z (1 << 25)
# define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26
# define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
@ -986,6 +1014,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI3_DSTA_MASK (31 << 18)
# define R300_FPI3_DSTA_REG (1 << 23)
# define R300_FPI3_DSTA_OUTPUT (1 << 24)
# define R300_FPI3_DSTA_DEPTH (1 << 27)
#define R300_PFS_INSTR0_0 0x48C0
# define R300_FPI0_ARGC_SRC0C_XYZ 0
@ -1039,7 +1068,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI0_OUTC_FRC (9 << 23)
# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
# define R300_FPI0_OUTC_SAT (1 << 30)
# define R300_FPI0_UNKNOWN_31 (1 << 31)
# define R300_FPI0_INSERT_NOP (1 << 31)
#define R300_PFS_INSTR2_0 0x49C0
# define R300_FPI2_ARGA_SRC0C_X 0

View File

@ -947,7 +947,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
if (dev_priv->microcode_version==UCODE_R200) {
if (dev_priv->microcode_version == UCODE_R200) {
DRM_INFO("Loading R200 Microcode\n");
for (i = 0; i < 256; i++) {
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
@ -955,13 +955,13 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
R200_cp_microcode[i][0]);
}
} else if (dev_priv->microcode_version==UCODE_R300) {
} else if (dev_priv->microcode_version == UCODE_R300) {
DRM_INFO("Loading R300 Microcode\n");
for ( i = 0 ; i < 256 ; i++ ) {
RADEON_WRITE( RADEON_CP_ME_RAM_DATAH,
R300_cp_microcode[i][1] );
RADEON_WRITE( RADEON_CP_ME_RAM_DATAL,
R300_cp_microcode[i][0] );
for (i = 0; i < 256; i++) {
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
R300_cp_microcode[i][1]);
RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
R300_cp_microcode[i][0]);
}
} else {
for (i = 0; i < 256; i++) {
@ -1121,26 +1121,33 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
{
u32 ring_start, cur_read_ptr;
u32 tmp;
/* Initialize the memory controller */
RADEON_WRITE(RADEON_MC_FB_LOCATION,
((dev_priv->gart_vm_start - 1) & 0xffff0000)
| (dev_priv->fb_location >> 16));
/* Initialize the memory controller. With new memory map, the fb location
* is not changed, it should have been properly initialized already. Part
* of the problem is that the code below is bogus, assuming the GART is
* always appended to the fb which is not necessarily the case
*/
if (!dev_priv->new_memmap)
RADEON_WRITE(RADEON_MC_FB_LOCATION,
((dev_priv->gart_vm_start - 1) & 0xffff0000)
| (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP
if (dev_priv->flags & CHIP_IS_AGP) {
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
ring_start = (dev_priv->cp_ring->offset
- dev->agp->base + dev_priv->gart_vm_start);
- dev->agp->base
+ dev_priv->gart_vm_start);
} else
#endif
ring_start = (dev_priv->cp_ring->offset
- (unsigned long)dev->sg->virtual + dev_priv->gart_vm_start);
- (unsigned long)dev->sg->virtual
+ dev_priv->gart_vm_start);
RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
@ -1155,8 +1162,6 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
#if __OS_HAS_AGP
if (dev_priv->flags & CHIP_IS_AGP) {
/* set RADEON_AGP_BASE here instead of relying on X from user space */
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
dev_priv->ring_rptr->offset
- dev->agp->base + dev_priv->gart_vm_start);
@ -1166,7 +1171,8 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
drm_sg_mem_t *entry = dev->sg;
unsigned long tmp_ofs, page_ofs;
tmp_ofs = dev_priv->ring_rptr->offset - (unsigned long)dev->sg->virtual;
tmp_ofs = dev_priv->ring_rptr->offset -
(unsigned long)dev->sg->virtual;
page_ofs = tmp_ofs >> PAGE_SHIFT;
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
@ -1175,6 +1181,17 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
entry->handle + tmp_ofs);
}
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(RADEON_CP_RB_CNTL,
dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
#else
RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
#endif
/* Start with assuming that writeback doesn't work */
dev_priv->writeback_works = 0;
/* Initialize the scratch register pointer. This will cause
* the scratch register values to be written out to memory
* whenever they are updated.
@ -1191,7 +1208,38 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
/* Writeback doesn't seem to work everywhere, test it first */
/* Turn on bus mastering */
tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
dev_priv->sarea_priv->last_dispatch);
dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
radeon_do_wait_for_idle(dev_priv);
/* Sync everything up */
RADEON_WRITE(RADEON_ISYNC_CNTL,
(RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI));
}
static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
{
u32 tmp;
/* Writeback doesn't seem to work everywhere, test it here and possibly
* enable it if it appears to work
*/
DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
@ -1204,46 +1252,15 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
if (tmp < dev_priv->usec_timeout) {
dev_priv->writeback_works = 1;
DRM_DEBUG("writeback test succeeded, tmp=%d\n", tmp);
DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
} else {
dev_priv->writeback_works = 0;
DRM_DEBUG("writeback test failed\n");
DRM_INFO("writeback test failed\n");
}
if (radeon_no_wb == 1) {
dev_priv->writeback_works = 0;
DRM_DEBUG("writeback forced off\n");
DRM_INFO("writeback forced off\n");
}
dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
dev_priv->sarea_priv->last_dispatch);
dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(RADEON_CP_RB_CNTL,
dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
#else
RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
#endif
radeon_do_wait_for_idle(dev_priv);
/* Turn on bus mastering */
tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
/* Sync everything up */
RADEON_WRITE(RADEON_ISYNC_CNTL,
(RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI));
}
/* Enable or disable PCI-E GART on the chip */
@ -1253,19 +1270,26 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
if (on) {
DRM_DEBUG("programming pcie %08X %08lX %08X\n",
dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr,
dev_priv->gart_vm_start,
(long)dev_priv->gart_info.bus_addr,
dev_priv->gart_size);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, dev_priv->gart_vm_start);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, dev_priv->gart_info.bus_addr);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, dev_priv->gart_vm_start);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, dev_priv->gart_vm_start
+ dev_priv->gart_size - 1);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
dev_priv->gart_vm_start);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
dev_priv->gart_info.bus_addr);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
dev_priv->gart_vm_start);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
dev_priv->gart_vm_start +
dev_priv->gart_size - 1);
RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, RADEON_PCIE_TX_GART_EN);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
RADEON_PCIE_TX_GART_EN);
} else {
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, (tmp & ~RADEON_PCIE_TX_GART_EN) | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
tmp & ~RADEON_PCIE_TX_GART_EN);
}
}
@ -1274,8 +1298,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp;
if (dev_priv->flags & CHIP_IS_PCIE)
{
if (dev_priv->flags & CHIP_IS_PCIE) {
radeon_set_pciegart(dev_priv, on);
return;
}
@ -1309,8 +1332,17 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
/* if we require new memory map but we don't have it fail */
if ((dev_priv->flags & CHIP_NEW_MEMMAP) && !dev_priv->new_memmap)
{
DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
{
DRM_DEBUG("Forcing AGP card to PCI mode\n");
@ -1333,14 +1365,13 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
switch(init->func) {
case RADEON_INIT_R200_CP:
dev_priv->microcode_version=UCODE_R200;
dev_priv->microcode_version = UCODE_R200;
break;
case RADEON_INIT_R300_CP:
dev_priv->microcode_version=UCODE_R300;
dev_priv->microcode_version = UCODE_R300;
break;
default:
dev_priv->microcode_version=UCODE_R100;
break;
dev_priv->microcode_version = UCODE_R100;
}
dev_priv->do_boxes = 0;
@ -1390,8 +1421,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
*/
dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
(dev_priv->color_fmt << 10) |
(dev_priv->microcode_version == UCODE_R100 ?
RADEON_ZBLOCK16 : 0));
(dev_priv->microcode_version ==
UCODE_R100 ? RADEON_ZBLOCK16 : 0));
dev_priv->depth_clear.rb3d_zstencilcntl =
(dev_priv->depth_fmt |
@ -1491,6 +1522,9 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION)
& 0xffff) << 16;
dev_priv->fb_size =
((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000)
- dev_priv->fb_location;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
((dev_priv->front_offset
@ -1505,8 +1539,46 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
+ dev_priv->fb_location) >> 10));
dev_priv->gart_size = init->gart_size;
dev_priv->gart_vm_start = dev_priv->fb_location
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
/* New let's set the memory map ... */
if (dev_priv->new_memmap) {
u32 base = 0;
DRM_INFO("Setting GART location based on new memory map\n");
/* If using AGP, try to locate the AGP aperture at the same
* location in the card and on the bus, though we have to
* align it down.
*/
#if __OS_HAS_AGP
if (dev_priv->flags & CHIP_IS_AGP) {
base = dev->agp->base;
/* Check if valid */
if ((base + dev_priv->gart_size) > dev_priv->fb_location &&
base < (dev_priv->fb_location + dev_priv->fb_size)) {
DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
dev->agp->base);
base = 0;
}
}
#endif
/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
if (base == 0) {
base = dev_priv->fb_location + dev_priv->fb_size;
if (((base + dev_priv->gart_size) & 0xfffffffful)
< base)
base = dev_priv->fb_location
- dev_priv->gart_size;
}
dev_priv->gart_vm_start = base & 0xffc00000u;
if (dev_priv->gart_vm_start != base)
DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
base, dev_priv->gart_vm_start);
} else {
DRM_INFO("Setting GART location based on old memory map\n");
dev_priv->gart_vm_start = dev_priv->fb_location +
RADEON_READ(RADEON_CONFIG_APER_SIZE);
}
#if __OS_HAS_AGP
if (dev_priv->flags & CHIP_IS_AGP)
@ -1516,8 +1588,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
else
#endif
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
- (unsigned long)dev->sg->virtual
+ dev_priv->gart_vm_start);
- (unsigned long)dev->sg->virtual
+ dev_priv->gart_vm_start);
DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
@ -1543,24 +1615,33 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
{
/* if we have an offset set from userspace */
if (dev_priv->pcigart_offset) {
dev_priv->gart_info.bus_addr = dev_priv->pcigart_offset + dev_priv->fb_location;
dev_priv->gart_info.mapping.offset = dev_priv->gart_info.bus_addr;
dev_priv->gart_info.mapping.size = RADEON_PCIGART_TABLE_SIZE;
drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr = dev_priv->gart_info.mapping.handle;
dev_priv->gart_info.bus_addr =
dev_priv->pcigart_offset + dev_priv->fb_location;
dev_priv->gart_info.mapping.offset =
dev_priv->gart_info.bus_addr;
dev_priv->gart_info.mapping.size =
RADEON_PCIGART_TABLE_SIZE;
dev_priv->gart_info.is_pcie = !!(dev_priv->flags & CHIP_IS_PCIE);
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB;
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", dev_priv->gart_info.addr, dev_priv->pcigart_offset);
}
else {
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr =
dev_priv->gart_info.mapping.handle;
dev_priv->gart_info.is_pcie =
!!(dev_priv->flags & CHIP_IS_PCIE);
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_FB;
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
dev_priv->gart_info.addr,
dev_priv->pcigart_offset);
} else {
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_MAIN;
dev_priv->gart_info.addr = NULL;
dev_priv->gart_info.bus_addr = 0;
if (dev_priv->flags & CHIP_IS_PCIE)
{
DRM_ERROR("Cannot use PCI Express without GART in FB memory\n");
if (dev_priv->flags & CHIP_IS_PCIE) {
DRM_ERROR
("Cannot use PCI Express without GART in FB memory\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@ -1582,6 +1663,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->last_buf = 0;
radeon_do_engine_reset(dev);
radeon_test_writeback(dev_priv);
return 0;
}
@ -1684,9 +1766,9 @@ int radeon_cp_init(DRM_IOCTL_ARGS)
DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data,
sizeof(init));
if(init.func == RADEON_INIT_R300_CP)
if (init.func == RADEON_INIT_R300_CP)
r300_init_reg_flags();
switch (init.func) {
case RADEON_INIT_CP:
case RADEON_INIT_R200_CP:
@ -1775,7 +1857,6 @@ void radeon_do_release(drm_device_t * dev)
int i, ret;
if (dev_priv) {
if (dev_priv->cp_running) {
/* Stop the cp */
while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
@ -1799,11 +1880,13 @@ void radeon_do_release(drm_device_t * dev)
if (dev_priv->mmio) /* remove this after permanent addmaps */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
if (dev_priv->mmio) {/* remove all surfaces */
if (dev_priv->mmio) { /* remove all surfaces */
for (i = 0; i < RADEON_MAX_SURFACES; i++) {
RADEON_WRITE(RADEON_SURFACE0_INFO + 16*i, 0);
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*i, 0);
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*i, 0);
RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
16 * i, 0);
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
16 * i, 0);
}
}
@ -2107,11 +2190,13 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
case CHIP_RV200:
case CHIP_R200:
case CHIP_R300:
case CHIP_R350:
case CHIP_R420:
case CHIP_RV410:
dev_priv->flags |= CHIP_HAS_HIERZ;
break;
default:
/* all other chips have no hierarchical z buffer */
/* all other chips have no hierarchical z buffer */
break;
}
@ -2123,7 +2208,6 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG("%s card detected\n",
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
return ret;
}

View File

@ -214,10 +214,10 @@ typedef union {
* The interface has not been stabilized, so some of these may be removed
* and eventually reordered before stabilization.
*/
#define R300_CMD_PACKET0 1
#define R300_CMD_VPU 2 /* emit vertex program upload */
#define R300_CMD_PACKET3 3 /* emit a packet3 */
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
#define R300_CMD_PACKET0 1
#define R300_CMD_VPU 2 /* emit vertex program upload */
#define R300_CMD_PACKET3 3 /* emit a packet3 */
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
@ -225,6 +225,7 @@ typedef union {
# define R300_WAIT_3D 0x2
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
#define R300_CMD_SCRATCH 8
typedef union {
unsigned int u;
@ -242,20 +243,23 @@ typedef union {
} packet3;
struct {
unsigned char cmd_type, packet;
unsigned short count; /* amount of packet2 to emit */
unsigned short count; /* amount of packet2 to emit */
} delay;
struct {
unsigned char cmd_type, buf_idx, pad0, pad1;
} dma;
struct {
unsigned char cmd_type, flags, pad0, pad1;
unsigned char cmd_type, flags, pad0, pad1;
} wait;
struct {
unsigned char cmd_type, reg, n_bufs, flags;
} scratch;
} drm_r300_cmd_header_t;
#define RADEON_FRONT 0x1
#define RADEON_BACK 0x2
#define RADEON_DEPTH 0x4
#define RADEON_STENCIL 0x8
#define RADEON_STENCIL 0x8
#define RADEON_CLEAR_FASTZ 0x80000000
#define RADEON_USE_HIERZ 0x40000000
#define RADEON_USE_COMP_ZBUF 0x20000000
@ -627,6 +631,11 @@ typedef struct drm_radeon_indirect {
int discard;
} drm_radeon_indirect_t;
/* enum for card type parameters */
#define RADEON_CARD_PCI 0
#define RADEON_CARD_AGP 1
#define RADEON_CARD_PCIE 2
/* 1.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
@ -643,6 +652,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
typedef struct drm_radeon_getparam {
int param;
@ -693,7 +703,9 @@ typedef struct drm_radeon_setparam {
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
/* 1.14: Clients can allocate/free a surface
*/

View File

@ -41,7 +41,7 @@ __FBSDID("$FreeBSD$");
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
#define DRIVER_DATE "20050911"
#define DRIVER_DATE "20060225"
/* Interface history:
*
@ -86,32 +86,42 @@ __FBSDID("$FreeBSD$");
* 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
* texture filtering on r200
* 1.17- Add initial support for R300 (3D).
* 1.18- Add support for GL_ATI_fragment_shader, new packets R200_EMIT_PP_AFS_0/1,
R200_EMIT_PP_TXCTLALL_0-5 (replaces R200_EMIT_PP_TXFILTER_0-5, 2 more regs)
and R200_EMIT_ATF_TFACTOR (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
* 1.18- Add support for GL_ATI_fragment_shader, new packets
* R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
* R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
* (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
* 1.19- Add support for gart table in FB memory and PCIE r300
* 1.20- Add support for r300 texrect
* 1.21- Add support for card type getparam
* 1.22- Add support for texture cache flushes (R300_TX_CNTL)
* 1.23- Add new radeon memory map work from benh
* 1.24- Add general-purpose packet for manipulating scratch registers (r300)
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 19
#define DRIVER_MINOR 24
#define DRIVER_PATCHLEVEL 0
/*
* Radeon chip families
*/
enum radeon_family {
CHIP_R100,
CHIP_RS100,
CHIP_RV100,
CHIP_RS100,
CHIP_RV200,
CHIP_R200,
CHIP_RS200,
CHIP_R250,
CHIP_RS250,
CHIP_R200,
CHIP_RV250,
CHIP_RS300,
CHIP_RV280,
CHIP_R300,
CHIP_RS300,
CHIP_R350,
CHIP_RV350,
CHIP_RV380,
CHIP_R420,
CHIP_RV410,
CHIP_RS400,
CHIP_LAST,
};
@ -131,11 +141,13 @@ enum radeon_chip_flags {
CHIP_IS_IGP = 0x00020000UL,
CHIP_SINGLE_CRTC = 0x00040000UL,
CHIP_IS_AGP = 0x00080000UL,
CHIP_HAS_HIERZ = 0x00100000UL,
CHIP_HAS_HIERZ = 0x00100000UL,
CHIP_IS_PCIE = 0x00200000UL,
CHIP_NEW_MEMMAP = 0x00400000UL,
};
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
DRM_READ32( (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR))
#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
typedef struct drm_radeon_freelist {
@ -197,6 +209,8 @@ typedef struct drm_radeon_private {
drm_radeon_sarea_t *sarea_priv;
u32 fb_location;
u32 fb_size;
int new_memmap;
int gart_size;
u32 gart_vm_start;
@ -269,6 +283,9 @@ typedef struct drm_radeon_private {
unsigned long pcigart_offset;
drm_ati_pcigart_info gart_info;
u32 scratch_ages[5];
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
@ -278,6 +295,13 @@ typedef struct drm_radeon_buf_priv {
u32 age;
} drm_radeon_buf_priv_t;
typedef struct drm_radeon_kcmd_buffer {
int bufsz;
char *buf;
int nbox;
drm_clip_rect_t __user *boxes;
} drm_radeon_kcmd_buffer_t;
extern int radeon_no_wb;
extern drm_ioctl_desc_t radeon_ioctls[];
extern int radeon_max_ioctl;
@ -331,10 +355,9 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(void);
extern int r300_do_cp_cmdbuf( drm_device_t* dev,
DRMFILE filp,
drm_file_t* filp_priv,
drm_radeon_cmd_buffer_t* cmdbuf );
extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
drm_file_t* filp_priv,
drm_radeon_kcmd_buffer_t* cmdbuf);
/* Flags for stats.boxes
*/
@ -350,7 +373,6 @@ extern int r300_do_cp_cmdbuf( drm_device_t* dev,
#define RADEON_AGP_COMMAND 0x0f60
#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
# define RADEON_AGP_ENABLE (1<<8)
#define RADEON_AUX_SCISSOR_CNTL 0x26f0
# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
@ -366,6 +388,7 @@ extern int r300_do_cp_cmdbuf( drm_device_t* dev,
# define RADEON_PLL_WR_EN (1 << 7)
#define RADEON_CLOCK_CNTL_INDEX 0x0008
#define RADEON_CONFIG_APER_SIZE 0x0108
#define RADEON_CONFIG_MEMSIZE 0x00f8
#define RADEON_CRTC_OFFSET 0x0224
#define RADEON_CRTC_OFFSET_CNTL 0x0228
# define RADEON_CRTC_TILE_EN (1 << 15)

View File

@ -48,29 +48,59 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
u32 off = *offset;
struct drm_radeon_driver_file_fields *radeon_priv;
if (off >= dev_priv->fb_location &&
off < (dev_priv->gart_vm_start + dev_priv->gart_size))
/* Hrm ... the story of the offset ... So this function converts
* the various ideas of what userland clients might have for an
* offset in the card address space into an offset into the card
* address space :) So with a sane client, it should just keep
* the value intact and just do some boundary checking. However,
* not all clients are sane. Some older clients pass us 0 based
* offsets relative to the start of the framebuffer and some may
* assume the AGP aperture it appended to the framebuffer, so we
* try to detect those cases and fix them up.
*
* Note: It might be a good idea here to make sure the offset lands
* in some "allowed" area to protect things like the PCIE GART...
*/
/* First, the best case, the offset already lands in either the
* framebuffer or the GART mapped space
*/
if ((off >= dev_priv->fb_location &&
off < (dev_priv->fb_location + dev_priv->fb_size)) ||
(off >= dev_priv->gart_vm_start &&
off < (dev_priv->gart_vm_start + dev_priv->gart_size)))
return 0;
radeon_priv = filp_priv->driver_priv;
/* Ok, that didn't happen... now check if we have a zero based
* offset that fits in the framebuffer + gart space, apply the
* magic offset we get from SETPARAM or calculated from fb_location
*/
if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
radeon_priv = filp_priv->driver_priv;
off += radeon_priv->radeon_fb_delta;
}
off += radeon_priv->radeon_fb_delta;
/* Finally, assume we aimed at a GART offset if beyond the fb */
if (off > (dev_priv->fb_location + dev_priv->fb_size))
off = off - (dev_priv->fb_location + dev_priv->fb_size) +
dev_priv->gart_vm_start;
DRM_DEBUG("offset fixed up to 0x%x\n", off);
if (off < dev_priv->fb_location ||
off >= (dev_priv->gart_vm_start + dev_priv->gart_size))
return DRM_ERR(EINVAL);
*offset = off;
return 0;
/* Now recheck and fail if out of bounds */
if ((off >= dev_priv->fb_location &&
off < (dev_priv->fb_location + dev_priv->fb_size)) ||
(off >= dev_priv->gart_vm_start &&
off < (dev_priv->gart_vm_start + dev_priv->gart_size))) {
DRM_DEBUG("offset fixed up to 0x%x\n", off);
*offset = off;
return 0;
}
return DRM_ERR(EINVAL);
}
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
dev_priv,
drm_file_t * filp_priv,
int id, u32 __user * data)
int id, u32 *data)
{
switch (id) {
@ -235,8 +265,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
dev_priv,
drm_file_t * filp_priv,
drm_radeon_cmd_buffer_t *
drm_file_t *filp_priv,
drm_radeon_kcmd_buffer_t *
cmdbuf,
unsigned int *cmdsz)
{
@ -523,7 +553,7 @@ static struct {
{RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
{RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
{RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
{R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
{R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
{R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
@ -565,7 +595,7 @@ static struct {
{R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
{R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
"R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
"R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
@ -813,68 +843,73 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
/* hyper z clear */
/* no docs available, based on reverse engeneering by Stephane Marchesin */
if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) && (flags & RADEON_CLEAR_FASTZ)) {
if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
&& (flags & RADEON_CLEAR_FASTZ)) {
int i;
int depthpixperline = dev_priv->depth_fmt==RADEON_DEPTH_FORMAT_16BIT_INT_Z?
(dev_priv->depth_pitch / 2): (dev_priv->depth_pitch / 4);
int depthpixperline =
dev_priv->depth_fmt ==
RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
2) : (dev_priv->
depth_pitch / 4);
u32 clearmask;
u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
((clear->depth_mask & 0xff) << 24);
((clear->depth_mask & 0xff) << 24);
/* Make sure we restore the 3D state next time.
* we haven't touched any "normal" state - still need this?
*/
dev_priv->sarea_priv->ctx_owner = 0;
if ((dev_priv->flags & CHIP_HAS_HIERZ) && (flags & RADEON_USE_HIERZ)) {
/* FIXME : reverse engineer that for Rx00 cards */
/* FIXME : the mask supposedly contains low-res z values. So can't set
just to the max (0xff? or actually 0x3fff?), need to take z clear
value into account? */
/* pattern seems to work for r100, though get slight
rendering errors with glxgears. If hierz is not enabled for r100,
only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
other ones are ignored, and the same clear mask can be used. That's
very different behaviour than R200 which needs different clear mask
and different number of tiles to clear if hierz is enabled or not !?!
*/
clearmask = (0xff<<22)|(0xff<<6)| 0x003f003f;
}
else {
/* clear mask : chooses the clearing pattern.
rv250: could be used to clear only parts of macrotiles
(but that would get really complicated...)?
bit 0 and 1 (either or both of them ?!?!) are used to
not clear tile (or maybe one of the bits indicates if the tile is
compressed or not), bit 2 and 3 to not clear tile 1,...,.
Pattern is as follows:
| 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
bits -------------------------------------------------
| 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
rv100: clearmask covers 2x8 4x1 tiles, but one clear still
covers 256 pixels ?!?
*/
if ((dev_priv->flags & CHIP_HAS_HIERZ)
&& (flags & RADEON_USE_HIERZ)) {
/* FIXME : reverse engineer that for Rx00 cards */
/* FIXME : the mask supposedly contains low-res z values. So can't set
just to the max (0xff? or actually 0x3fff?), need to take z clear
value into account? */
/* pattern seems to work for r100, though get slight
rendering errors with glxgears. If hierz is not enabled for r100,
only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
other ones are ignored, and the same clear mask can be used. That's
very different behaviour than R200 which needs different clear mask
and different number of tiles to clear if hierz is enabled or not !?!
*/
clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
} else {
/* clear mask : chooses the clearing pattern.
rv250: could be used to clear only parts of macrotiles
(but that would get really complicated...)?
bit 0 and 1 (either or both of them ?!?!) are used to
not clear tile (or maybe one of the bits indicates if the tile is
compressed or not), bit 2 and 3 to not clear tile 1,...,.
Pattern is as follows:
| 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
bits -------------------------------------------------
| 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
rv100: clearmask covers 2x8 4x1 tiles, but one clear still
covers 256 pixels ?!?
*/
clearmask = 0x0;
}
BEGIN_RING( 8 );
BEGIN_RING(8);
RADEON_WAIT_UNTIL_2D_IDLE();
OUT_RING_REG( RADEON_RB3D_DEPTHCLEARVALUE,
tempRB3D_DEPTHCLEARVALUE);
OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
tempRB3D_DEPTHCLEARVALUE);
/* what offset is this exactly ? */
OUT_RING_REG( RADEON_RB3D_ZMASKOFFSET, 0 );
OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
/* need ctlstat, otherwise get some strange black flickering */
OUT_RING_REG( RADEON_RB3D_ZCACHE_CTLSTAT, RADEON_RB3D_ZC_FLUSH_ALL );
OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
RADEON_RB3D_ZC_FLUSH_ALL);
ADVANCE_RING();
for (i = 0; i < nbox; i++) {
int tileoffset, nrtilesx, nrtilesy, j;
/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
if ((dev_priv->flags&CHIP_HAS_HIERZ) && !(dev_priv->microcode_version==UCODE_R200)) {
if ((dev_priv->flags & CHIP_HAS_HIERZ)
&& !(dev_priv->microcode_version == UCODE_R200)) {
/* FIXME : figure this out for r200 (when hierz is enabled). Or
maybe r200 actually doesn't need to put the low-res z value into
the tile cache like r100, but just needs to clear the hi-level z-buffer?
@ -882,59 +917,74 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
R100 seems to operate on 2x1 8x8 tiles, but...
odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
problematic with resolutions which are not 64 pix aligned? */
tileoffset = ((pbox[i].y1 >> 3) * depthpixperline + pbox[i].x1) >> 6;
nrtilesx = ((pbox[i].x2 & ~63) - (pbox[i].x1 & ~63)) >> 4;
nrtilesy = (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
tileoffset =
((pbox[i].y1 >> 3) * depthpixperline +
pbox[i].x1) >> 6;
nrtilesx =
((pbox[i].x2 & ~63) -
(pbox[i].x1 & ~63)) >> 4;
nrtilesy =
(pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
for (j = 0; j <= nrtilesy; j++) {
BEGIN_RING( 4 );
OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
BEGIN_RING(4);
OUT_RING(CP_PACKET3
(RADEON_3D_CLEAR_ZMASK, 2));
/* first tile */
OUT_RING( tileoffset * 8 );
OUT_RING(tileoffset * 8);
/* the number of tiles to clear */
OUT_RING( nrtilesx + 4 );
OUT_RING(nrtilesx + 4);
/* clear mask : chooses the clearing pattern. */
OUT_RING( clearmask );
OUT_RING(clearmask);
ADVANCE_RING();
tileoffset += depthpixperline >> 6;
}
}
else if (dev_priv->microcode_version==UCODE_R200) {
} else if (dev_priv->microcode_version == UCODE_R200) {
/* works for rv250. */
/* find first macro tile (8x2 4x4 z-pixels on rv250) */
tileoffset = ((pbox[i].y1 >> 3) * depthpixperline + pbox[i].x1) >> 5;
nrtilesx = (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
nrtilesy = (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
tileoffset =
((pbox[i].y1 >> 3) * depthpixperline +
pbox[i].x1) >> 5;
nrtilesx =
(pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
nrtilesy =
(pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
for (j = 0; j <= nrtilesy; j++) {
BEGIN_RING( 4 );
OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
BEGIN_RING(4);
OUT_RING(CP_PACKET3
(RADEON_3D_CLEAR_ZMASK, 2));
/* first tile */
/* judging by the first tile offset needed, could possibly
directly address/clear 4x4 tiles instead of 8x2 * 4x4
macro tiles, though would still need clear mask for
right/bottom if truely 4x4 granularity is desired ? */
OUT_RING( tileoffset * 16 );
OUT_RING(tileoffset * 16);
/* the number of tiles to clear */
OUT_RING( nrtilesx + 1 );
OUT_RING(nrtilesx + 1);
/* clear mask : chooses the clearing pattern. */
OUT_RING( clearmask );
OUT_RING(clearmask);
ADVANCE_RING();
tileoffset += depthpixperline >> 5;
}
}
else { /* rv 100 */
} else { /* rv 100 */
/* rv100 might not need 64 pix alignment, who knows */
/* offsets are, hmm, weird */
tileoffset = ((pbox[i].y1 >> 4) * depthpixperline + pbox[i].x1) >> 6;
nrtilesx = ((pbox[i].x2 & ~63) - (pbox[i].x1 & ~63)) >> 4;
nrtilesy = (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
tileoffset =
((pbox[i].y1 >> 4) * depthpixperline +
pbox[i].x1) >> 6;
nrtilesx =
((pbox[i].x2 & ~63) -
(pbox[i].x1 & ~63)) >> 4;
nrtilesy =
(pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
for (j = 0; j <= nrtilesy; j++) {
BEGIN_RING( 4 );
OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
OUT_RING( tileoffset * 128 );
BEGIN_RING(4);
OUT_RING(CP_PACKET3
(RADEON_3D_CLEAR_ZMASK, 2));
OUT_RING(tileoffset * 128);
/* the number of tiles to clear */
OUT_RING( nrtilesx + 4 );
OUT_RING(nrtilesx + 4);
/* clear mask : chooses the clearing pattern. */
OUT_RING( clearmask );
OUT_RING(clearmask);
ADVANCE_RING();
tileoffset += depthpixperline >> 6;
}
@ -942,18 +992,19 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
}
/* TODO don't always clear all hi-level z tiles */
if ((dev_priv->flags & CHIP_HAS_HIERZ) && (dev_priv->microcode_version==UCODE_R200)
&& (flags & RADEON_USE_HIERZ))
/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
/* FIXME : the mask supposedly contains low-res z values. So can't set
just to the max (0xff? or actually 0x3fff?), need to take z clear
value into account? */
if ((dev_priv->flags & CHIP_HAS_HIERZ)
&& (dev_priv->microcode_version == UCODE_R200)
&& (flags & RADEON_USE_HIERZ))
/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
/* FIXME : the mask supposedly contains low-res z values. So can't set
just to the max (0xff? or actually 0x3fff?), need to take z clear
value into account? */
{
BEGIN_RING( 4 );
OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_HIZ, 2 ) );
OUT_RING( 0x0 ); /* First tile */
OUT_RING( 0x3cc0 );
OUT_RING( (0xff<<22)|(0xff<<6)| 0x003f003f);
BEGIN_RING(4);
OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
OUT_RING(0x0); /* First tile */
OUT_RING(0x3cc0);
OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
ADVANCE_RING();
}
}
@ -1031,7 +1082,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
if (flags & RADEON_USE_COMP_ZBUF) {
tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
RADEON_Z_DECOMPRESSION_ENABLE;
RADEON_Z_DECOMPRESSION_ENABLE;
}
if (flags & RADEON_USE_HIERZ) {
tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
@ -1088,7 +1139,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
} else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
rb3d_cntl = depth_clear->rb3d_cntl;
if (flags & RADEON_DEPTH) {
@ -1107,7 +1158,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
if (flags & RADEON_USE_COMP_ZBUF) {
tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
RADEON_Z_DECOMPRESSION_ENABLE;
RADEON_Z_DECOMPRESSION_ENABLE;
}
if (flags & RADEON_USE_HIERZ) {
tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
@ -1569,8 +1620,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
/* we got tiled coordinates, untile them */
image->x *= 2;
}
}
else microtile = 0;
} else
microtile = 0;
DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
@ -1614,6 +1665,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
dwords = size / 4;
#define RADEON_COPY_MT(_buf, _data, _width) \
do { \
if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
return DRM_ERR(EFAULT); \
} \
} while(0)
if (microtile) {
/* texture micro tiling in use, minimum texture width is thus 16 bytes.
however, we cannot use blitter directly for texture width < 64 bytes,
@ -1625,101 +1684,58 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
from user space. */
if (tex->height == 1) {
if (tex_width >= 64 || tex_width <= 16) {
if (DRM_COPY_FROM_USER(buffer, data,
tex_width * sizeof(u32))) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data,
(int)(tex_width * sizeof(u32)));
} else if (tex_width == 32) {
if (DRM_COPY_FROM_USER(buffer, data, 16)) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
if (DRM_COPY_FROM_USER(buffer + 8, data + 16, 16)) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data, 16);
RADEON_COPY_MT(buffer + 8,
data + 16, 16);
}
} else if (tex_width >= 64 || tex_width == 16) {
if (DRM_COPY_FROM_USER(buffer, data,
dwords * sizeof(u32))) {
DRM_ERROR("EFAULT on data, %d dwords\n",
dwords);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data,
(int)(dwords * sizeof(u32)));
} else if (tex_width < 16) {
for (i = 0; i < tex->height; i++) {
if (DRM_COPY_FROM_USER(buffer, data, tex_width)) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data, tex_width);
buffer += 4;
data += tex_width;
}
} else if (tex_width == 32) {
/* TODO: make sure this works when not fitting in one buffer
(i.e. 32bytes x 2048...) */
/* TODO: make sure this works when not fitting in one buffer
(i.e. 32bytes x 2048...) */
for (i = 0; i < tex->height; i += 2) {
if (DRM_COPY_FROM_USER(buffer, data, 16)) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data, 16);
data += 16;
if (DRM_COPY_FROM_USER(buffer + 8, data, 16)) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer + 8, data, 16);
data += 16;
if (DRM_COPY_FROM_USER(buffer + 4, data, 16)) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer + 4, data, 16);
data += 16;
if (DRM_COPY_FROM_USER(buffer + 12, data, 16)) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer + 12, data, 16);
data += 16;
buffer += 16;
}
}
}
else {
} else {
if (tex_width >= 32) {
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
if (DRM_COPY_FROM_USER(buffer, data,
dwords * sizeof(u32))) {
DRM_ERROR("EFAULT on data, %d dwords\n",
dwords);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data,
(int)(dwords * sizeof(u32)));
} else {
/* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/
for (i = 0; i < tex->height; i++) {
if (DRM_COPY_FROM_USER(buffer, data, tex_width)) {
DRM_ERROR("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data, tex_width);
buffer += 8;
data += tex_width;
}
}
}
#undef RADEON_COPY_MT
buf->filp = filp;
buf->used = size;
offset = dev_priv->gart_buffers_offset + buf->offset;
@ -1732,8 +1748,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS );
RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
OUT_RING((spitch << 22) | (offset >> 10));
OUT_RING((texpitch << 22) | (tex->offset >> 10));
OUT_RING(0);
@ -1781,33 +1796,35 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
ADVANCE_RING();
}
static void radeon_apply_surface_regs(int surf_index, drm_radeon_private_t *dev_priv)
static void radeon_apply_surface_regs(int surf_index,
drm_radeon_private_t *dev_priv)
{
if (!dev_priv->mmio)
return;
radeon_do_cp_idle(dev_priv);
RADEON_WRITE(RADEON_SURFACE0_INFO + 16*surf_index,
dev_priv->surfaces[surf_index].flags);
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*surf_index,
dev_priv->surfaces[surf_index].lower);
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*surf_index,
dev_priv->surfaces[surf_index].upper);
RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
dev_priv->surfaces[surf_index].flags);
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
dev_priv->surfaces[surf_index].lower);
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
dev_priv->surfaces[surf_index].upper);
}
/* Allocates a virtual surface
* doesn't always allocate a real surface, will stretch an existing
* doesn't always allocate a real surface, will stretch an existing
* surface when possible.
*
* Note that refcount can be at most 2, since during a free refcount=3
* might mean we have to allocate a new surface which might not always
* be available.
* For example : we allocate three contigous surfaces ABC. If B is
* For example : we allocate three contigous surfaces ABC. If B is
* freed, we suddenly need two surfaces to store A and C, which might
* not always be available.
*/
static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *dev_priv, DRMFILE filp)
static int alloc_surface(drm_radeon_surface_alloc_t *new,
drm_radeon_private_t *dev_priv, DRMFILE filp)
{
struct radeon_virt_surface *s;
int i;
@ -1819,34 +1836,37 @@ static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *
/* sanity check */
if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != RADEON_SURF_ADDRESS_FIXED_MASK) ||
((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
RADEON_SURF_ADDRESS_FIXED_MASK)
|| ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
return -1;
/* make sure there is no overlap with existing surfaces */
for (i = 0; i < RADEON_MAX_SURFACES; i++) {
if ((dev_priv->surfaces[i].refcount != 0) &&
(( (new_lower >= dev_priv->surfaces[i].lower) &&
(new_lower < dev_priv->surfaces[i].upper) ) ||
( (new_lower < dev_priv->surfaces[i].lower) &&
(new_upper > dev_priv->surfaces[i].lower) )) ){
return -1;}
(((new_lower >= dev_priv->surfaces[i].lower) &&
(new_lower < dev_priv->surfaces[i].upper)) ||
((new_lower < dev_priv->surfaces[i].lower) &&
(new_upper > dev_priv->surfaces[i].lower)))) {
return -1;
}
}
/* find a virtual surface */
for (i = 0; i < 2*RADEON_MAX_SURFACES; i++)
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
if (dev_priv->virt_surfaces[i].filp == 0)
break;
if (i == 2*RADEON_MAX_SURFACES) {
return -1;}
if (i == 2 * RADEON_MAX_SURFACES) {
return -1;
}
virt_surface_index = i;
/* try to reuse an existing surface */
for (i = 0; i < RADEON_MAX_SURFACES; i++) {
/* extend before */
if ((dev_priv->surfaces[i].refcount == 1) &&
(new->flags == dev_priv->surfaces[i].flags) &&
(new_upper + 1 == dev_priv->surfaces[i].lower)) {
(new->flags == dev_priv->surfaces[i].flags) &&
(new_upper + 1 == dev_priv->surfaces[i].lower)) {
s = &(dev_priv->virt_surfaces[virt_surface_index]);
s->surface_index = i;
s->lower = new_lower;
@ -1861,8 +1881,8 @@ static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *
/* extend after */
if ((dev_priv->surfaces[i].refcount == 1) &&
(new->flags == dev_priv->surfaces[i].flags) &&
(new_lower == dev_priv->surfaces[i].upper + 1)) {
(new->flags == dev_priv->surfaces[i].flags) &&
(new_lower == dev_priv->surfaces[i].upper + 1)) {
s = &(dev_priv->virt_surfaces[virt_surface_index]);
s->surface_index = i;
s->lower = new_lower;
@ -1898,26 +1918,34 @@ static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *
return -1;
}
static int free_surface(DRMFILE filp, drm_radeon_private_t *dev_priv, int lower)
static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
int lower)
{
struct radeon_virt_surface *s;
int i;
/* find the virtual surface */
for(i = 0; i < 2*RADEON_MAX_SURFACES; i++) {
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
s = &(dev_priv->virt_surfaces[i]);
if (s->filp) {
if ((lower == s->lower) && (filp == s->filp)) {
if (dev_priv->surfaces[s->surface_index].lower == s->lower)
dev_priv->surfaces[s->surface_index].lower = s->upper;
if (dev_priv->surfaces[s->surface_index].
lower == s->lower)
dev_priv->surfaces[s->surface_index].
lower = s->upper;
if (dev_priv->surfaces[s->surface_index].upper == s->upper)
dev_priv->surfaces[s->surface_index].upper = s->lower;
if (dev_priv->surfaces[s->surface_index].
upper == s->upper)
dev_priv->surfaces[s->surface_index].
upper = s->lower;
dev_priv->surfaces[s->surface_index].refcount--;
if (dev_priv->surfaces[s->surface_index].refcount == 0)
dev_priv->surfaces[s->surface_index].flags = 0;
s->filp = 0;
radeon_apply_surface_regs(s->surface_index, dev_priv);
if (dev_priv->surfaces[s->surface_index].
refcount == 0)
dev_priv->surfaces[s->surface_index].
flags = 0;
s->filp = NULL;
radeon_apply_surface_regs(s->surface_index,
dev_priv);
return 0;
}
}
@ -1925,20 +1953,20 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t *dev_priv, int lower)
return 1;
}
static void radeon_surfaces_release(DRMFILE filp, drm_radeon_private_t *dev_priv)
static void radeon_surfaces_release(DRMFILE filp,
drm_radeon_private_t * dev_priv)
{
int i;
for( i = 0; i < 2*RADEON_MAX_SURFACES; i++)
{
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
if (dev_priv->virt_surfaces[i].filp == filp)
free_surface(filp, dev_priv, dev_priv->virt_surfaces[i].lower);
free_surface(filp, dev_priv,
dev_priv->virt_surfaces[i].lower);
}
}
/* ================================================================
* IOCTL functions
*/
static int radeon_surface_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
@ -1946,12 +1974,13 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS)
drm_radeon_surface_alloc_t alloc;
if (!dev_priv) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_surface_alloc_t __user *)data,
sizeof(alloc));
DRM_COPY_FROM_USER_IOCTL(alloc,
(drm_radeon_surface_alloc_t __user *) data,
sizeof(alloc));
if (alloc_surface(&alloc, dev_priv, filp) == -1)
return DRM_ERR(EINVAL);
@ -1966,12 +1995,12 @@ static int radeon_surface_free(DRM_IOCTL_ARGS)
drm_radeon_surface_free_t memfree;
if (!dev_priv) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *)data,
sizeof(memfree) );
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data,
sizeof(memfree));
if (free_surface(filp, dev_priv, memfree.address))
return DRM_ERR(EINVAL);
@ -2097,7 +2126,7 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_file_t *filp_priv;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_radeon_sarea_t *sarea_priv;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_radeon_vertex_t vertex;
@ -2110,6 +2139,8 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
return DRM_ERR(EINVAL);
}
sarea_priv = dev_priv->sarea_priv;
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
@ -2185,7 +2216,7 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_file_t *filp_priv;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_radeon_sarea_t *sarea_priv;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_radeon_indices_t elts;
@ -2198,6 +2229,7 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
sarea_priv = dev_priv->sarea_priv;
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
@ -2413,7 +2445,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_file_t *filp_priv;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_radeon_sarea_t *sarea_priv;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_radeon_vertex2_t vertex;
@ -2427,6 +2459,8 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
return DRM_ERR(EINVAL);
}
sarea_priv = dev_priv->sarea_priv;
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data,
@ -2515,7 +2549,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
drm_file_t * filp_priv,
drm_radeon_cmd_header_t header,
drm_radeon_cmd_buffer_t * cmdbuf)
drm_radeon_kcmd_buffer_t *cmdbuf)
{
int id = (int)header.packet.packet_id;
int sz, reg;
@ -2548,9 +2582,9 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
return 0;
}
static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
drm_radeon_cmd_header_t header,
drm_radeon_cmd_buffer_t * cmdbuf)
drm_radeon_kcmd_buffer_t *cmdbuf)
{
int sz = header.scalars.count;
int start = header.scalars.offset;
@ -2570,9 +2604,9 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
/* God this is ugly
*/
static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
drm_radeon_cmd_header_t header,
drm_radeon_cmd_buffer_t * cmdbuf)
drm_radeon_kcmd_buffer_t *cmdbuf)
{
int sz = header.scalars.count;
int start = ((unsigned int)header.scalars.offset) + 0x100;
@ -2590,9 +2624,9 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
return 0;
}
static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv,
static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
drm_radeon_cmd_header_t header,
drm_radeon_cmd_buffer_t * cmdbuf)
drm_radeon_kcmd_buffer_t *cmdbuf)
{
int sz = header.vectors.count;
int start = header.vectors.offset;
@ -2613,7 +2647,7 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv,
static int radeon_emit_packet3(drm_device_t * dev,
drm_file_t * filp_priv,
drm_radeon_cmd_buffer_t * cmdbuf)
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int cmdsz;
@ -2637,9 +2671,9 @@ static int radeon_emit_packet3(drm_device_t * dev,
return 0;
}
static int radeon_emit_packet3_cliprect(drm_device_t * dev,
drm_file_t * filp_priv,
drm_radeon_cmd_buffer_t * cmdbuf,
static int radeon_emit_packet3_cliprect(drm_device_t *dev,
drm_file_t *filp_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
int orig_nbox)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@ -2736,7 +2770,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf = NULL;
int idx;
drm_radeon_cmd_buffer_t cmdbuf;
drm_radeon_kcmd_buffer_t cmdbuf;
drm_radeon_cmd_header_t header;
int orig_nbox, orig_bufsz;
char *kbuf = NULL;
@ -2751,7 +2785,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(cmdbuf,
(drm_radeon_cmd_buffer_t __user *) data,
(drm_radeon_kcmd_buffer_t __user *) data,
sizeof(cmdbuf));
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@ -2770,7 +2804,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
if (kbuf == NULL)
return DRM_ERR(ENOMEM);
if (DRM_COPY_FROM_USER(kbuf, cmdbuf.buf, cmdbuf.bufsz)) {
if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
cmdbuf.bufsz)) {
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
return DRM_ERR(EFAULT);
}
@ -2778,19 +2813,20 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
}
orig_nbox = cmdbuf.nbox;
if(dev_priv->microcode_version == UCODE_R300) {
if (dev_priv->microcode_version == UCODE_R300) {
int temp;
temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
temp = r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
return temp;
}
/* microcode_version != r300 */
while (cmdbuf.bufsz >= sizeof(header)) {
header.i = *(int *)cmdbuf.buf;
cmdbuf.buf += sizeof(header);
cmdbuf.bufsz -= sizeof(header);
@ -2882,12 +2918,12 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
err:
err:
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
return DRM_ERR(EINVAL);
@ -2937,7 +2973,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
case RADEON_PARAM_STATUS_HANDLE:
value = dev_priv->ring_rptr_offset;
break;
#ifndef __LP64__
#if BITS_PER_LONG == 32
/*
* This ioctl() doesn't work on 64-bit platforms because hw_lock is a
* pointer which can't fit into an int-sized variable. According to
@ -2955,6 +2991,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
case RADEON_PARAM_GART_TEX_HANDLE:
value = dev_priv->gart_textures_offset;
break;
case RADEON_PARAM_CARD_TYPE:
if (dev_priv->flags & CHIP_IS_PCIE)
value = RADEON_CARD_PCIE;
else if (dev_priv->flags & CHIP_IS_AGP)
value = RADEON_CARD_AGP;
else
value = RADEON_CARD_PCI;
break;
default:
return DRM_ERR(EINVAL);
}
@ -2992,13 +3037,12 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
break;
case RADEON_SETPARAM_SWITCH_TILING:
if (sp.value == 0) {
DRM_DEBUG( "color tiling disabled\n" );
DRM_DEBUG("color tiling disabled\n");
dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
dev_priv->sarea_priv->tiling_enabled = 0;
}
else if (sp.value == 1) {
DRM_DEBUG( "color tiling enabled\n" );
} else if (sp.value == 1) {
DRM_DEBUG("color tiling enabled\n");
dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
dev_priv->sarea_priv->tiling_enabled = 1;
@ -3007,6 +3051,9 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
case RADEON_SETPARAM_PCIGART_LOCATION:
dev_priv->pcigart_offset = sp.value;
break;
case RADEON_SETPARAM_NEW_MEMMAP:
dev_priv->new_memmap = sp.value;
break;
default:
DRM_DEBUG("Invalid parameter %d\n", sp.param);
return DRM_ERR(EINVAL);

View File

@ -300,8 +300,7 @@ static int savage_dma_init(drm_savage_private_t *dev_priv)
dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
(SAVAGE_DMA_PAGE_SIZE*4);
dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
dev_priv->nr_dma_pages,
DRM_MEM_DRIVER);
dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
if (dev_priv->dma_pages == NULL)
return DRM_ERR(ENOMEM);
@ -376,8 +375,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
if (cur + nr_pages < dev_priv->nr_dma_pages) {
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur*SAVAGE_DMA_PAGE_SIZE +
dev_priv->dma_pages[cur].used;
cur*SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
if (n < rest)
rest = n;
dev_priv->dma_pages[cur].used += rest;
@ -385,7 +383,8 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
cur++;
} else {
dev_priv->dma_flush(dev_priv);
nr_pages = (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
nr_pages =
(n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
dev_priv->dma_pages[i].used = 0;
@ -443,8 +442,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
/* pad with noops */
if (pad) {
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur * SAVAGE_DMA_PAGE_SIZE +
dev_priv->dma_pages[cur].used;
cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
dev_priv->dma_pages[cur].used += pad;
while(pad != 0) {
*dma_ptr++ = BCI_CMD_WAIT;
@ -459,8 +457,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
(first * SAVAGE_DMA_PAGE_SIZE +
dev_priv->dma_pages[first].flushed) * 4;
len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
dev_priv->dma_pages[cur].used -
dev_priv->dma_pages[first].flushed;
dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
DRM_DEBUG("phys_addr=%lx, len=%u\n",
phys_addr | dev_priv->dma_type, len);
@ -588,19 +585,19 @@ int savage_driver_firstopen(drm_device_t *dev)
* MTRRs. */
dev_priv->mtrr[0].base = fb_base;
dev_priv->mtrr[0].size = 0x01000000;
dev_priv->mtrr[0].handle = drm_mtrr_add(
dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
DRM_MTRR_WC);
dev_priv->mtrr[0].handle =
drm_mtrr_add(dev_priv->mtrr[0].base,
dev_priv->mtrr[0].size, DRM_MTRR_WC);
dev_priv->mtrr[1].base = fb_base+0x02000000;
dev_priv->mtrr[1].size = 0x02000000;
dev_priv->mtrr[1].handle = drm_mtrr_add(
dev_priv->mtrr[1].base, dev_priv->mtrr[1].size,
DRM_MTRR_WC);
dev_priv->mtrr[1].handle =
drm_mtrr_add(dev_priv->mtrr[1].base,
dev_priv->mtrr[1].size, DRM_MTRR_WC);
dev_priv->mtrr[2].base = fb_base+0x04000000;
dev_priv->mtrr[2].size = 0x04000000;
dev_priv->mtrr[2].handle = drm_mtrr_add(
dev_priv->mtrr[2].base, dev_priv->mtrr[2].size,
DRM_MTRR_WC);
dev_priv->mtrr[2].handle =
drm_mtrr_add(dev_priv->mtrr[2].base,
dev_priv->mtrr[2].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08lx\n",
drm_get_resource_len(dev, 0));
@ -619,9 +616,9 @@ int savage_driver_firstopen(drm_device_t *dev)
* aperture. */
dev_priv->mtrr[0].base = fb_base;
dev_priv->mtrr[0].size = 0x08000000;
dev_priv->mtrr[0].handle = drm_mtrr_add(
dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
DRM_MTRR_WC);
dev_priv->mtrr[0].handle =
drm_mtrr_add(dev_priv->mtrr[0].base,
dev_priv->mtrr[0].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08lx\n",
drm_get_resource_len(dev, 1));
@ -667,8 +664,7 @@ void savage_driver_lastclose(drm_device_t *dev)
if (dev_priv->mtrr[i].handle >= 0)
drm_mtrr_del(dev_priv->mtrr[i].handle,
dev_priv->mtrr[i].base,
dev_priv->mtrr[i].size,
DRM_MTRR_WC);
dev_priv->mtrr[i].size, DRM_MTRR_WC);
}
int savage_driver_unload(drm_device_t *dev)
@ -836,9 +832,10 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
color_tile_format = SAVAGE_BD_TILE_DEST;
depth_tile_format = SAVAGE_BD_TILE_DEST;
}
front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8);
back_stride = dev_priv-> back_pitch / (dev_priv->fb_bpp/8);
depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8);
front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
depth_stride =
dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
@ -1071,7 +1068,8 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
return ret;
}
void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {
void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
{
drm_device_dma_t *dma = dev->dma;
drm_savage_private_t *dev_priv = dev->dev_private;
int i;

View File

@ -92,8 +92,9 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
addr &= ~7;
if (addr < dev_priv->texture_offset ||
addr >= dev_priv->texture_offset+dev_priv->texture_size) {
DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n",
unit, addr);
DRM_ERROR
("bad texAddr%d %08x (local addr out of range)\n",
unit, addr);
return DRM_ERR(EINVAL);
}
} else { /* AGP */
@ -106,8 +107,9 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
if (addr < dev_priv->agp_textures->offset ||
addr >= (dev_priv->agp_textures->offset +
dev_priv->agp_textures->size)) {
DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n",
unit, addr);
DRM_ERROR
("bad texAddr%d %08x (AGP addr out of range)\n",
unit, addr);
return DRM_ERR(EINVAL);
}
}
@ -148,8 +150,8 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
return savage_verify_texaddr(
dev_priv, 0, dev_priv->state.s3d.texaddr);
return savage_verify_texaddr(dev_priv, 0,
dev_priv->state.s3d.texaddr);
}
return 0;
@ -175,17 +177,17 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
/* if any texture regs were changed ... */
if (start <= SAVAGE_TEXDESCR_S4 &&
start+count > SAVAGE_TEXPALADDR_S4) {
start + count > SAVAGE_TEXPALADDR_S4) {
/* ... check texture state */
SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
ret |= savage_verify_texaddr(
dev_priv, 0, dev_priv->state.s4.texaddr0);
ret |= savage_verify_texaddr(dev_priv, 0,
dev_priv->state.s4.texaddr0);
if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
ret |= savage_verify_texaddr(
dev_priv, 1, dev_priv->state.s4.texaddr1);
ret |= savage_verify_texaddr(dev_priv, 1,
dev_priv->state.s4.texaddr1);
}
return ret;
@ -231,7 +233,8 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
/* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_DRAWCTRL0_S4) {
if (start+count > SAVAGE_DRAWCTRL1_S4+1)
count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start);
count2 = count -
(SAVAGE_DRAWCTRL1_S4 + 1 - start);
if (start+count > SAVAGE_DRAWCTRL0_S4)
count = SAVAGE_DRAWCTRL0_S4 - start;
} else if (start <= SAVAGE_DRAWCTRL1_S4) {
@ -307,8 +310,9 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
case SAVAGE_PRIM_TRISTRIP:
case SAVAGE_PRIM_TRIFAN:
if (n < 3) {
DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
DRM_ERROR
("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
return DRM_ERR(EINVAL);
}
break;
@ -319,8 +323,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip != 0) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
skip);
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
return DRM_ERR(EINVAL);
}
} else {
@ -328,8 +331,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
(skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
skip);
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
return DRM_ERR(EINVAL);
}
if (reorder) {
@ -383,7 +385,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
for (i = start+1; i+1 < start+count; i += 2)
BCI_WRITE((i + reorder[i % 3]) |
((i+1 + reorder[(i+1) % 3]) << 16));
((i + 1 +
reorder[(i + 1) % 3]) << 16));
if (i < start+count)
BCI_WRITE(i + reorder[i%3]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
@ -444,8 +447,9 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
case SAVAGE_PRIM_TRISTRIP:
case SAVAGE_PRIM_TRIFAN:
if (n < 3) {
DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
DRM_ERROR
("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
return DRM_ERR(EINVAL);
}
break;
@ -556,16 +560,15 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
prim = SAVAGE_PRIM_TRILIST;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n",
n);
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
return DRM_ERR(EINVAL);
}
break;
case SAVAGE_PRIM_TRISTRIP:
case SAVAGE_PRIM_TRIFAN:
if (n < 3) {
DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
n);
DRM_ERROR
("wrong number of indices %u in TRIFAN/STRIP\n", n);
return DRM_ERR(EINVAL);
}
break;
@ -576,8 +579,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip != 0) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
skip);
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
return DRM_ERR(EINVAL);
}
} else {
@ -585,8 +587,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
(skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
skip);
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
return DRM_ERR(EINVAL);
}
if (reorder) {
@ -643,7 +644,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
for (i = 1; i+1 < count; i += 2)
BCI_WRITE(idx[i + reorder[i % 3]] |
(idx[i+1 + reorder[(i+1) % 3]] << 16));
(idx[i + 1 +
reorder[(i + 1) % 3]] << 16));
if (i < count)
BCI_WRITE(idx[i + reorder[i%3]]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
@ -677,8 +679,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const uint16_t *idx,
const uint32_t *vtxbuf,
unsigned int vb_size,
unsigned int vb_stride)
unsigned int vb_size, unsigned int vb_stride)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->idx.prim;
@ -697,16 +698,15 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
prim = SAVAGE_PRIM_TRILIST;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n",
n);
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
return DRM_ERR(EINVAL);
}
break;
case SAVAGE_PRIM_TRISTRIP:
case SAVAGE_PRIM_TRIFAN:
if (n < 3) {
DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
n);
DRM_ERROR
("wrong number of indices %u in TRIFAN/STRIP\n", n);
return DRM_ERR(EINVAL);
}
break;
@ -808,8 +808,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
BCI_CMD_SET_ROP(clear_cmd,0xCC);
nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
((flags & SAVAGE_BACK) ? 1 : 0) +
((flags & SAVAGE_DEPTH) ? 1 : 0);
((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
if (nbufs == 0)
return 0;
@ -979,8 +978,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
if (dma && dma->buflist) {
if (cmdbuf.dma_idx > dma->buf_count) {
DRM_ERROR("vertex buffer index %u out of range (0-%u)\n",
cmdbuf.dma_idx, dma->buf_count-1);
DRM_ERROR
("vertex buffer index %u out of range (0-%u)\n",
cmdbuf.dma_idx, dma->buf_count-1);
return DRM_ERR(EINVAL);
}
dmabuf = dma->buflist[cmdbuf.dma_idx];

View File

@ -152,7 +152,7 @@ int setDestroy(set_t * set)
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -77,7 +77,7 @@ int setDestroy(set_t * set);
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -93,7 +93,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
{
drm_sis_mem_t fb;
struct sis_memreq req;
drm_sis_mem_t __user *argp = (void __user *)data;
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
int retval = 0;
DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
@ -183,7 +183,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t __user *argp = (void __user *)data;
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
drm_sis_mem_t fb;
PMemBlock block;
int retval = 0;