OSDN Git Service

initial import froyo-x86 gingerbread-x86 android-x86-2.2 android-x86-2.2-r2
authorOwen Kwon <pinebud77@hotmail.com>
Fri, 25 Jun 2010 09:19:45 +0000 (18:19 +0900)
committerOwen Kwon <pinebud77@hotmail.com>
Fri, 25 Jun 2010 09:19:45 +0000 (18:19 +0900)
145 files changed:
.gitignore [new file with mode: 0644]
AndroidBoard.mk [new file with mode: 0755]
AndroidProducts.mk [new file with mode: 0644]
BoardConfig.mk [new file with mode: 0644]
firmware/msvdx_fw.bin [new file with mode: 0644]
firmware/sd8686.bin [new file with mode: 0755]
firmware/sd8686_helper.bin [new file with mode: 0755]
init.s5.rc [new file with mode: 0644]
init.s5.sh [new file with mode: 0755]
pointercal [new file with mode: 0644]
psb-kernel-source-4.41.1/.gitignore [new file with mode: 0644]
psb-kernel-source-4.41.1/Android.mk [new file with mode: 0644]
psb-kernel-source-4.41.1/Config.in [new file with mode: 0644]
psb-kernel-source-4.41.1/Doxyfile [new file with mode: 0644]
psb-kernel-source-4.41.1/GPLv2_License.txt [new file with mode: 0644]
psb-kernel-source-4.41.1/Kconfig [new file with mode: 0644]
psb-kernel-source-4.41.1/Makefile [new file with mode: 0644]
psb-kernel-source-4.41.1/Makefile.kernel [new file with mode: 0644]
psb-kernel-source-4.41.1/Module.markers [new file with mode: 0644]
psb-kernel-source-4.41.1/README.drm [new file with mode: 0644]
psb-kernel-source-4.41.1/ati_pcigart.c [new file with mode: 0644]
psb-kernel-source-4.41.1/build.sh [new file with mode: 0755]
psb-kernel-source-4.41.1/create_linux_pci_lists.sh [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/changelog [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/compat [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/control [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/copyright [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/dirs [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/dkms.conf.in [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/patches/00list [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/patches/use_udev.dpatch [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/postinst [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/postrm [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/prerm [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/psb-kernel-headers.dirs [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/psb-kernel-headers.install [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/psb-kernel-headers.postrm [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/psb-kernel-headers.preinst [new file with mode: 0644]
psb-kernel-source-4.41.1/debian/rules [new file with mode: 0644]
psb-kernel-source-4.41.1/drm.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drmP.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_agpsupport.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_auth.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_bo.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_bo_lock.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_bo_move.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_bufs.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_compat.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_compat.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_context.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_core.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_crtc.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_crtc.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_dma.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_drawable.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_drv.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_edid.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_edid.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_fb.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_fence.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_fops.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_hashtab.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_hashtab.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_internal.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_ioc32.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_ioctl.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_irq.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_lock.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_memory.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_memory.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_memory_debug.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_memory_debug.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_mm.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_modes.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_object.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_objects.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_os_linux.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_pci.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_pciids.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_pciids.txt [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_proc.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_regman.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_sarea.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_scatter.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_sman.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_sman.h [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_stub.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_sysfs.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_ttm.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_vm.c [new file with mode: 0644]
psb-kernel-source-4.41.1/drm_vm_nopage_compat.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_buffer.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_compat.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_dma.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_drm.h [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_drv.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_drv.h [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_fence.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_init.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_ioc32.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_irq.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_mem.c [new file with mode: 0644]
psb-kernel-source-4.41.1/i915_reg.h [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_crt.c [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_display.c [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_drv.h [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_fb.c [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_i2c.c [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_lvds.c [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_lvds.h [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_modes.c [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_sdvo.c [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_sdvo_regs.h [new file with mode: 0644]
psb-kernel-source-4.41.1/intel_setup.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_buffer.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_detear.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_detear.h [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_drm.h [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_drv.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_drv.h [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_fb.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_fence.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_gtt.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_i2c.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_irq.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_mmu.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_msvdx.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_msvdx.h [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_msvdxinit.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_reg.h [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_regman.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_reset.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_scene.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_scene.h [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_schedule.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_schedule.h [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_setup.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_sgx.c [new file with mode: 0644]
psb-kernel-source-4.41.1/psb_xhw.c [new file with mode: 0644]
s5.mk [new file with mode: 0644]
s5_defconfig [new file with mode: 0644]
s5_info [new file with mode: 0644]
system.prop [new file with mode: 0644]
ts.conf [new file with mode: 0644]
ts.env [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..9e3f011
--- /dev/null
@@ -0,0 +1,9 @@
+*.o.cmd
+.tmp_versions
+*.o
+*.ko
+*.mod.c
+*.ko.cmd
+Modules.symvers
+Module.symvers
+modules.order
diff --git a/AndroidBoard.mk b/AndroidBoard.mk
new file mode 100755 (executable)
index 0000000..8a519c2
--- /dev/null
@@ -0,0 +1,30 @@
+LOCAL_PATH := $(call my-dir)
+LOCAL_FIRMWARES := 
+TARGET_INITRD_SCRIPTS := $(LOCAL_PATH)/s5_info
+TARGET_PREBUILT_APPS := $(subst $(LOCAL_PATH)/,,$(wildcard $(LOCAL_PATH)/app/*))
+TARGET_KERNEL_CONFIG := $(LOCAL_PATH)/s5_defconfig
+
+#copy firmware files
+$(call add-prebuilt-targets,$(TARGET_OUT)/lib/firmware, firmware/msvdx_fw.bin)
+$(call add-prebuilt-targets,$(TARGET_OUT)/lib/firmware, firmware/sd8686.bin)
+$(call add-prebuilt-targets,$(TARGET_OUT)/lib/firmware, firmware/sd8686_helper.bin)
+
+#compile and add psb modules
+#this is not an Android Makefile format :(
+#ToDo : need to find a way to configure the path -_-;;
+KBUILD_OUTPUT := $(CURDIR)/$(TARGET_OUT_INTERMEDIATES)/kernel
+PSB_SRC_DIR := psb-kernel-source-4.41.1
+$(LOCAL_PATH)/drm.ko : kernel $(LOCAL_PATH)/psb.ko
+       cp $(TARGET_DEVICE_DIR)/$(PSB_SRC_DIR)/drm.ko $(TARGET_DEVICE_DIR)
+
+
+$(LOCAL_PATH)/psb.ko : kernel
+       $(hide) $(MAKE) -C$(TARGET_DEVICE_DIR)/$(PSB_SRC_DIR) \
+               LINUXDIR=$(KBUILD_OUTPUT) DRM_MODULES=psb
+       cp $(TARGET_DEVICE_DIR)/$(PSB_SRC_DIR)/psb.ko $(TARGET_DEVICE_DIR)
+
+$(call add-prebuilt-targets,$(TARGET_OUT)/lib/modules, drm.ko)
+$(call add-prebuilt-targets,$(TARGET_OUT)/lib/modules, psb.ko)
+
+include $(GENERIC_X86_ANDROID_MK)
+
diff --git a/AndroidProducts.mk b/AndroidProducts.mk
new file mode 100644 (file)
index 0000000..d12c16f
--- /dev/null
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# This file should set PRODUCT_MAKEFILES to a list of product makefiles
+# to expose to the build system.  LOCAL_DIR will already be set to
+# the directory containing this file.
+#
+# This file may not rely on the value of any variable other than
+# LOCAL_DIR; do not use any conditionals, and do not look up the
+# value of any variable that isn't set in this file or in a file that
+# it includes.
+#
+
+PRODUCT_MAKEFILES := \
+    $(LOCAL_DIR)/s5.mk
diff --git a/BoardConfig.mk b/BoardConfig.mk
new file mode 100644 (file)
index 0000000..c050f8b
--- /dev/null
@@ -0,0 +1,6 @@
+RGET_HAS_THIRD_PARTY_APPS := true
+BOARD_WPA_SUPPLICANT_DRIVER := true
+BOARD_USES_TSLIB := true
+
+include $(GENERIC_X86_CONFIG_MK)
+
diff --git a/firmware/msvdx_fw.bin b/firmware/msvdx_fw.bin
new file mode 100644 (file)
index 0000000..7ee9ce0
Binary files /dev/null and b/firmware/msvdx_fw.bin differ
diff --git a/firmware/sd8686.bin b/firmware/sd8686.bin
new file mode 100755 (executable)
index 0000000..4f5675c
Binary files /dev/null and b/firmware/sd8686.bin differ
diff --git a/firmware/sd8686_helper.bin b/firmware/sd8686_helper.bin
new file mode 100755 (executable)
index 0000000..f450ee6
Binary files /dev/null and b/firmware/sd8686_helper.bin differ
diff --git a/init.s5.rc b/init.s5.rc
new file mode 100644 (file)
index 0000000..d3bd900
--- /dev/null
@@ -0,0 +1,2 @@
+service s5hw /system/bin/sh /system/etc/init.s5.sh
+       oneshot
diff --git a/init.s5.sh b/init.s5.sh
new file mode 100755 (executable)
index 0000000..2f7acf0
--- /dev/null
@@ -0,0 +1,22 @@
+#!/system/bin/sh
+
+# no sleep!
+echo s5NoSleep > /sys/power/wake_lock
+
+#turn on wifi
+/system/xbin/s5_onoff 0x7 0x1
+
+#turn on BT
+/system/xbin/s5_onoff 0x2 0x1
+#modprobe libertas_sdio                # moved to initrd"
+
+#netcfg eth0 dhcp
+#setprop net.dns1 4.2.2.2
+
+## For wifi, we'll need this:
+
+# insmod ath_hal.ko
+# insmod wlan.ko
+# insmod wlan_scan_sta.ko
+# insmod ath_rate_sample.ko
+# insmod ath_pci.ko
diff --git a/pointercal b/pointercal
new file mode 100644 (file)
index 0000000..204844f
--- /dev/null
@@ -0,0 +1 @@
+77039 -493 -4216930 195 51908 -5652733 65536 1024 600#
diff --git a/psb-kernel-source-4.41.1/.gitignore b/psb-kernel-source-4.41.1/.gitignore
new file mode 100644 (file)
index 0000000..9e3f011
--- /dev/null
@@ -0,0 +1,9 @@
+*.o.cmd
+.tmp_versions
+*.o
+*.ko
+*.mod.c
+*.ko.cmd
+Modules.symvers
+Module.symvers
+modules.order
diff --git a/psb-kernel-source-4.41.1/Android.mk b/psb-kernel-source-4.41.1/Android.mk
new file mode 100644 (file)
index 0000000..5632356
--- /dev/null
@@ -0,0 +1,4 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR-VARS)
+
+
diff --git a/psb-kernel-source-4.41.1/Config.in b/psb-kernel-source-4.41.1/Config.in
new file mode 100644 (file)
index 0000000..46ba48d
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# Drm device configuration
+#
+# This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+#
+
+tristate '  3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX
+#tristate '  3dlabs GMX 2000' CONFIG_DRM_GAMMA
+tristate '  ATI Rage 128' CONFIG_DRM_R128
+tristate '  ATI Radeon' CONFIG_DRM_RADEON
+dep_tristate '  Intel I810' CONFIG_DRM_I810 $CONFIG_AGP
+dep_tristate '  Intel 830M/845G/852GM/855GM/865G' CONFIG_DRM_I830 $CONFIG_AGP
+dep_tristate '  Matrox g200/g400' CONFIG_DRM_MGA $CONFIG_AGP
+tristate '  SiS' CONFIG_DRM_SIS
+tristate '  Via Unichrome' CONFIG_DRM_VIA
+
diff --git a/psb-kernel-source-4.41.1/Doxyfile b/psb-kernel-source-4.41.1/Doxyfile
new file mode 100644 (file)
index 0000000..97efeaa
--- /dev/null
@@ -0,0 +1,1161 @@
+# Doxyfile 1.3.8
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = "Direct Rendering Module"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
+# This could be handy for archiving the generated documentation or 
+# if some version control system is used.
+
+PROJECT_NUMBER         = 
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
+# base path where the generated documentation will be put. 
+# If a relative path is entered, it will be relative to the location 
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = 
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 
+# 4096 sub-directories (in 2 levels) under the output directory of each output 
+# format and will distribute the generated files over these directories. 
+# Enabling this option can be useful when feeding doxygen a huge amount of source 
+# files, where putting all generated files in the same directory would otherwise 
+# cause performance problems for the file system.
+
+CREATE_SUBDIRS         = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
+# documentation generated by doxygen is written. Doxygen will use this 
+# information to generate all constant output in the proper language. 
+# The default language is English, other supported languages are: 
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, 
+# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, 
+# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, 
+# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, 
+# Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE        = English
+
+# This tag can be used to specify the encoding used in the generated output. 
+# The encoding is not always determined by the language that is chosen, 
+# but also whether or not the output is meant for Windows or non-Windows users. 
+# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES 
+# forces the Windows encoding (this is the default for the Windows binary), 
+# whereas setting the tag to NO uses a Unix-style encoding (the default for 
+# all platforms other than Windows).
+
+USE_WINDOWS_ENCODING   = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
+# include brief member descriptions after the members that are listed in 
+# the file and class documentation (similar to JavaDoc). 
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
+# the brief description of a member or function before the detailed description. 
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator 
+# that is used to form the text in various listings. Each string 
+# in this list, if found as the leading text of the brief description, will be 
+# stripped from the text and the result after processing the whole list, is used 
+# as the annotated text. Otherwise, the brief description is used as-is. If left 
+# blank, the following values are used ("$name" is automatically replaced with the 
+# name of the entity): "The $name class" "The $name widget" "The $name file" 
+# "is" "provides" "specifies" "contains" "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF       = 
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
+# Doxygen will generate a detailed section even if there is only a brief 
+# description.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited 
+# members of a class in the documentation of that class as if those members were 
+# ordinary class members. Constructors, destructors and assignment operators of 
+# the base classes will not be shown.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
+# path before files name in the file list and in the header files. If set 
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES        = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
+# can be used to strip a user-defined part of the path. Stripping is 
+# only done if one of the specified strings matches the left-hand part of 
+# the path. The tag can be used to show relative paths in the file list. 
+# If left blank the directory from which doxygen is run is used as the 
+# path to strip.
+
+STRIP_FROM_PATH        = 
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of 
+# the path mentioned in the documentation of a class, which tells 
+# the reader which header file to include in order to use a class. 
+# If left blank only the name of the header file containing the class 
+# definition is used. Otherwise one should specify the include paths that 
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH    = 
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
+# (but less readable) file names. This can be useful is your file systems 
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
+# will interpret the first line (until the first dot) of a JavaDoc-style 
+# comment as the brief description. If set to NO, the JavaDoc 
+# comments will behave just like the Qt-style comments (thus requiring an 
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF      = YES
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen 
+# treat a multi-line C++ special comment block (i.e. a block of //! or /// 
+# comments) as a brief description. This used to be the default behaviour. 
+# The new default is to treat a multi-line C++ comment block as a detailed 
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen 
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member 
+# documentation.
+
+DETAILS_AT_TOP         = YES
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
+# member inherits the documentation from any documented member that it 
+# re-implements.
+
+INHERIT_DOCS           = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
+# tag is set to YES, then doxygen will reuse the documentation of the first 
+# member in the group (if any) for the other members of the group. By default 
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE               = 8
+
+# This tag can be used to specify a number of aliases that acts 
+# as commands in the documentation. An alias has the form "name=value". 
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
+# put the command \sideeffect (or @sideeffect) in the documentation, which 
+# will result in a user-defined paragraph with heading "Side Effects:". 
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                = 
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources 
+# only. Doxygen will then generate output that is more tailored for C. 
+# For instance, some of the names that are used will be different. The list 
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources 
+# only. Doxygen will then generate output that is more tailored for Java. 
+# For instance, namespaces will be presented as packages, qualified scopes 
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of 
+# the same type (for instance a group of public functions) to be put as a 
+# subgroup of that type (e.g. under the Public Functions section). Set it to 
+# NO to prevent subgrouping. Alternatively, this can be done per class using 
+# the \nosubgrouping command.
+
+SUBGROUPING            = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
+# documentation are documented, even if no documentation was available. 
+# Private class members and static file members will be hidden unless 
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
+# will be included in the documentation.
+
+EXTRACT_PRIVATE        = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file 
+# will be included in the documentation.
+
+EXTRACT_STATIC         = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
+# defined locally in source files will be included in the documentation. 
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local 
+# methods, which are defined in the implementation section but not in 
+# the interface are included in the documentation. 
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
+# undocumented members of documented classes, files or namespaces. 
+# If set to NO (the default) these members will be included in the 
+# various overviews, but no documentation section is generated. 
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
+# undocumented classes that are normally visible in the class hierarchy. 
+# If set to NO (the default) these classes will be included in the various 
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all 
+# friend (class|struct|union) declarations. 
+# If set to NO (the default) these declarations will be included in the 
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any 
+# documentation blocks found inside the body of a function. 
+# If set to NO (the default) these blocks will be appended to the 
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation 
+# that is typed after a \internal command is included. If the tag is set 
+# to NO (the default) then the documentation will be excluded. 
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
+# file names in lower-case letters. If set to YES upper-case letters are also 
+# allowed. This is useful if you have classes or files whose names only differ 
+# in case and if your file system supports case sensitive file names. Windows 
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
+# will show members with their full class and namespace scopes in the 
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
+# will put a list of the files that are included by a file in the documentation 
+# of that file.
+
+SHOW_INCLUDE_FILES     = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
+# is inserted in the documentation for inline members.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
+# will sort the (detailed) documentation of file and class members 
+# alphabetically by member name. If set to NO the members will appear in 
+# declaration order.
+
+SORT_MEMBER_DOCS       = NO
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the 
+# brief documentation of file, namespace and class members alphabetically 
+# by member name. If set to NO (the default) the members will appear in 
+# declaration order.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be 
+# sorted by fully-qualified names, including namespaces. If set to 
+# NO (the default), the class list will be sorted only by class name, 
+# not including the namespace part. 
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the 
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or 
+# disable (NO) the todo list. This list is created by putting \todo 
+# commands in the documentation.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or 
+# disable (NO) the test list. This list is created by putting \test 
+# commands in the documentation.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or 
+# disable (NO) the bug list. This list is created by putting \bug 
+# commands in the documentation.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or 
+# disable (NO) the deprecated list. This list is created by putting 
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional 
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       = 
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
+# the initial value of a variable or define consists of for it to appear in 
+# the documentation. If the initializer consists of more lines than specified 
+# here it will be hidden. Use a value of 0 to hide initializers completely. 
+# The appearance of the initializer of individual variables and defines in the 
+# documentation can be controlled using \showinitializer or \hideinitializer 
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
+# at the bottom of the documentation of classes and structs. If set to YES the 
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES        = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated 
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET                  = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are 
+# generated by doxygen. Possible values are YES and NO. If left blank 
+# NO is used.
+
+WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED   = NO
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for 
+# potential errors in the documentation, such as not documenting some 
+# parameters in a documented function, or documenting parameters that 
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR      = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that 
+# doxygen can produce. The string should contain the $file, $line, and $text 
+# tags, which will be replaced by the file and line number from which the 
+# warning originated and the warning text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning 
+# and error messages should be written. If left blank the output is written 
+# to stderr.
+
+WARN_LOGFILE           = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain 
+# documented source files. You may enter file names like "myfile.cpp" or 
+# directories like "/usr/src/myproject". Separate the files or directories 
+# with spaces.
+
+INPUT                  = . \
+                         ../shared-core
+
+# If the value of the INPUT tag contains directories, you can use the 
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank the following patterns are tested: 
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp 
+# *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm
+
+FILE_PATTERNS          = *.c \
+                         *.h
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
+# should be searched for input files as well. Possible values are YES and NO. 
+# If left blank NO is used.
+
+RECURSIVE              = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should 
+# excluded from the INPUT source files. This way you can easily exclude a 
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE                = 
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories 
+# that are symbolic links (a Unix filesystem feature) are excluded from the input.
+
+EXCLUDE_SYMLINKS       = YES
+
+# If the value of the INPUT tag contains directories, you can use the 
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
+# certain files from those directories.
+
+EXCLUDE_PATTERNS       = 
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or 
+# directories that contain example code fragments that are included (see 
+# the \include command).
+
+EXAMPLE_PATH           = 
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank all files are included.
+
+EXAMPLE_PATTERNS       = 
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
+# searched for input files to be used with the \include or \dontinclude 
+# commands irrespective of the value of the RECURSIVE tag. 
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or 
+# directories that contain image that are included in the documentation (see 
+# the \image command).
+
+IMAGE_PATH             = 
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should 
+# invoke to filter for each input file. Doxygen will invoke the filter program 
+# by executing (via popen()) the command <filter> <input-file>, where <filter> 
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
+# input file. Doxygen will then use the output that the filter program writes 
+# to standard output.  If FILTER_PATTERNS is specified, this tag will be 
+# ignored.
+
+INPUT_FILTER           = 
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern 
+# basis.  Doxygen will compare the file name with each pattern and apply the 
+# filter if there is a match.  The filters are a list of the form: 
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further 
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER 
+# is applied to all files.
+
+FILTER_PATTERNS        = 
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
+# INPUT_FILTER) will be used to filter the input files when producing source 
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES    = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
+# be generated. Documented entities will be cross-referenced with these sources. 
+# Note: To get rid of all source code in the generated output, make sure also 
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body 
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
+# doxygen to hide any special comment blocks from generated source code 
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default) 
+# then for each documented function all documented 
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default) 
+# then for each documented function all documented entities 
+# called/used by that function will be listed.
+
+REFERENCES_RELATION    = YES
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
+# will generate a verbatim copy of the header file for each class for 
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS       = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
+# of all compounds will be generated. Enable this if the project 
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX     = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all 
+# classes will be put under the same header in the alphabetical index. 
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX          = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard header.
+
+HTML_HEADER            = 
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard footer.
+
+HTML_FOOTER            = 
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading 
+# style sheet that is used by each HTML page. It can be used to 
+# fine-tune the look of the HTML output. If the tag is left blank doxygen 
+# will generate a default style sheet. Note that doxygen will try to copy 
+# the style sheet file to the HTML output directory, so don't put your own 
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET        = 
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, 
+# files or namespaces will be aligned in HTML using tables. If set to 
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS     = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files 
+# will be generated that can be used as input for tools like the 
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) 
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can 
+# be used to specify the file name of the resulting .chm file. You 
+# can add a path in front of the file if the result should not be 
+# written to the html output directory.
+
+CHM_FILE               = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can 
+# be used to specify the location (absolute path including file name) of 
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run 
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION           = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag 
+# controls if a separate .chi index file is generated (YES) or that 
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag 
+# controls whether a binary table of contents is generated (YES) or a 
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members 
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at 
+# top of each HTML page. The value NO (the default) enables the index and 
+# the value YES disables it.
+
+DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [1..20]) 
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that 
+# is generated for HTML Help). For this to work a browser that supports 
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, 
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are 
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW      = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be 
+# used to set the initial width (in pixels) of the frame in which the tree 
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
+# generate Latex output.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be 
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to 
+# generate index for LaTeX. If left blank `makeindex' will be used as the 
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
+# LaTeX documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used 
+# by the printer. Possible values are: a4, a4wide, letter, legal and 
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         = 
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
+# the generated latex document. The header should contain everything until 
+# the first chapter. If it is left blank doxygen will generate a 
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           = 
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
+# contain links (just like the HTML output) instead of page references 
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
+# plain latex in the generated Makefile. Set this option to YES to get a 
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
+# command to the generated LaTeX files. This will instruct LaTeX to keep 
+# running if errors occur, instead of asking the user for help. 
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not 
+# include the index chapters (such as File Index, Compound Index, etc.) 
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
+# The RTF output is optimized for Word 97 and may not look very pretty with 
+# other RTF readers or editors.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
+# RTF documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
+# will contain hyperlink fields. The RTF file will 
+# contain links (just like the HTML output) instead of page references. 
+# This makes the output suitable for online browsing using WORD or other 
+# programs which support those fields. 
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's 
+# config file, i.e. a series of assignments. You only have to provide 
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE    = 
+
+# Set optional variables used in the generation of an rtf document. 
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE    = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
+# generate man pages
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to 
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
+# then it will generate one additional man file for each entity 
+# documented in the real man page(s). These additional files 
+# only source the real man page, but without them the man command 
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will 
+# generate an XML file that captures the structure of 
+# the code including all documentation.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT             = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
+XML_SCHEMA             = 
+
+# The XML_DTD tag can be used to specify an XML DTD, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
+XML_DTD                = 
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will 
+# dump the program listings (including syntax highlighting 
+# and cross-referencing information) to the XML output. Note that 
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
+# generate an AutoGen Definitions (see autogen.sf.net) file 
+# that captures the structure of the code including all 
+# documentation. Note that this feature is still experimental 
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will 
+# generate a Perl module file that captures the structure of 
+# the code including all documentation. Note that this 
+# feature is still experimental and incomplete at the 
+# moment.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate 
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able 
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be 
+# nicely formatted so it can be parsed by a human reader.  This is useful 
+# if you want to understand what is going on.  On the other hand, if this 
+# tag is set to NO the size of the Perl module output will be much smaller 
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file 
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. 
+# This is useful so different doxyrules.make files included by the same 
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX = 
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
+# evaluate all C-preprocessor directives found in the sources and include 
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
+# names in the source code. If set to NO (the default) only conditional 
+# compilation will be performed. Macro expansion can be done in a controlled 
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
+# then the macro expansion is limited to the macros specified with the 
+# PREDEFINED and EXPAND_AS_PREDEFINED tags.
+
+EXPAND_ONLY_PREDEF     = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that 
+# contain include files that are not input files but should be processed by 
+# the preprocessor.
+
+INCLUDE_PATH           = 
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
+# patterns (like *.h and *.hpp) to filter out the header-files in the 
+# directories. If left blank, the patterns specified with FILE_PATTERNS will 
+# be used.
+
+INCLUDE_FILE_PATTERNS  = 
+
+# The PREDEFINED tag can be used to specify one or more macro names that 
+# are defined before the preprocessor is started (similar to the -D option of 
+# gcc). The argument of the tag is a list of macros of the form: name 
+# or name=definition (no spaces). If the definition and the = are 
+# omitted =1 is assumed.
+
+PREDEFINED             = __KERNEL__ \
+                         DRM(x)=x \
+                         __OS_HAS_AGP=1 \
+                         __OS_HAS_MTRR=1
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
+# this tag can be used to specify a list of macro names that should be expanded. 
+# The macro definition that is found in the sources will be used. 
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED      = DRMFILE \
+                         DRM_IOCTL_ARGS \
+                         DRM_IRQ_ARGS \
+                         DRM_TASKQUEUE_ARGS
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
+# doxygen's preprocessor will remove all function-like macros that are alone 
+# on a line, have an all uppercase name, and do not end with a semicolon. Such 
+# function macros are typically used for boiler-plate code, and will confuse the 
+# parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. 
+# Optionally an initial location of the external documentation 
+# can be added for each tagfile. The format of a tag file without 
+# this location is as follows: 
+#   TAGFILES = file1 file2 ... 
+# Adding location for the tag files is done as follows: 
+#   TAGFILES = file1=loc1 "file2 = loc2" ... 
+# where "loc1" and "loc2" can be relative or absolute paths or 
+# URLs. If a location is present for each tag, the installdox tool 
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen 
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES               = 
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE       = 
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
+# in the class index. If set to NO only the inherited external classes 
+# will be listed.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
+# in the modules index. If set to NO, only the current project's groups will 
+# be listed.
+
+EXTERNAL_GROUPS        = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script 
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base or 
+# super classes. Setting the tag to NO turns the diagrams off. Note that this 
+# option is superseded by the HAVE_DOT option below. This is only a fallback. It is 
+# recommended to install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS         = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide 
+# inheritance and usage relations if the target is undocumented 
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
+# available from the path. This tool is part of Graphviz, a graph visualization 
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT               = NO
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect inheritance relations. Setting this tag to YES will force the 
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect implementation dependencies (inheritance, containment, and 
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH    = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and 
+# collaboration diagrams in a style similar to the OMG's Unified Modeling 
+# Language.
+
+UML_LOOK               = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the 
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS     = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
+# tags are set to YES then doxygen will generate a graph for each documented 
+# file showing the direct and indirect include dependencies of the file with 
+# other documented files.
+
+INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
+# documented header file showing the documented files that directly or 
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will 
+# generate a call dependency graph for every global function or class method. 
+# Note that enabling this option will significantly increase the time of a run. 
+# So in most cases it will be better to enable call graphs for selected 
+# functions only using the \callgraph command.
+
+CALL_GRAPH             = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT       = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be 
+# found. If left blank, it is assumed the dot tool can be found on the path.
+
+DOT_PATH               = 
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that 
+# contain dot files that are included in the documentation (see the 
+# \dotfile command).
+
+DOTFILE_DIRS           = 
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width 
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
+# this value, doxygen will try to truncate the graph, so that it fits within 
+# the specified constraint. Beware that most browsers cannot cope with very 
+# large images.
+
+MAX_DOT_GRAPH_WIDTH    = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height 
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
+# this value, doxygen will try to truncate the graph, so that it fits within 
+# the specified constraint. Beware that most browsers cannot cope with very 
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT   = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the 
+# graphs generated by dot. A depth value of 3 means that only nodes reachable 
+# from the root by following a path via at most 3 edges will be shown. Nodes that 
+# lay further from the root node will be omitted. Note that setting this option to 
+# 1 or 2 may greatly reduce the computation time needed for large code bases. Also 
+# note that a graph may be further truncated if the graph's image dimensions are 
+# not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH and MAX_DOT_GRAPH_HEIGHT). 
+# If 0 is used for the depth value (the default), the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
+# generate a legend page explaining the meaning of the various boxes and 
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
+# remove the intermediate dot files that are used to generate 
+# the various graphs.
+
+DOT_CLEANUP            = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine   
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be 
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE           = NO
diff --git a/psb-kernel-source-4.41.1/GPLv2_License.txt b/psb-kernel-source-4.41.1/GPLv2_License.txt
new file mode 100644 (file)
index 0000000..ce7ac8e
--- /dev/null
@@ -0,0 +1,341 @@
+\r
+                   GNU GENERAL PUBLIC LICENSE\r
+                      Version 2, June 1991\r
+\r
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.\r
+                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\r
+ Everyone is permitted to copy and distribute verbatim copies\r
+ of this license document, but changing it is not allowed.\r
+\r
+                           Preamble\r
+\r
+  The licenses for most software are designed to take away your\r
+freedom to share and change it.  By contrast, the GNU General Public\r
+License is intended to guarantee your freedom to share and change free\r
+software--to make sure the software is free for all its users.  This\r
+General Public License applies to most of the Free Software\r
+Foundation's software and to any other program whose authors commit to\r
+using it.  (Some other Free Software Foundation software is covered by\r
+the GNU Library General Public License instead.)  You can apply it to\r
+your programs, too.\r
+\r
+  When we speak of free software, we are referring to freedom, not\r
+price.  Our General Public Licenses are designed to make sure that you\r
+have the freedom to distribute copies of free software (and charge for\r
+this service if you wish), that you receive source code or can get it\r
+if you want it, that you can change the software or use pieces of it\r
+in new free programs; and that you know you can do these things.\r
+\r
+  To protect your rights, we need to make restrictions that forbid\r
+anyone to deny you these rights or to ask you to surrender the rights.\r
+These restrictions translate to certain responsibilities for you if you\r
+distribute copies of the software, or if you modify it.\r
+\r
+  For example, if you distribute copies of such a program, whether\r
+gratis or for a fee, you must give the recipients all the rights that\r
+you have.  You must make sure that they, too, receive or can get the\r
+source code.  And you must show them these terms so they know their\r
+rights.\r
+\r
+  We protect your rights with two steps: (1) copyright the software, and\r
+(2) offer you this license which gives you legal permission to copy,\r
+distribute and/or modify the software.\r
+\r
+  Also, for each author's protection and ours, we want to make certain\r
+that everyone understands that there is no warranty for this free\r
+software.  If the software is modified by someone else and passed on, we\r
+want its recipients to know that what they have is not the original, so\r
+that any problems introduced by others will not reflect on the original\r
+authors' reputations.\r
+\r
+  Finally, any free program is threatened constantly by software\r
+patents.  We wish to avoid the danger that redistributors of a free\r
+program will individually obtain patent licenses, in effect making the\r
+program proprietary.  To prevent this, we have made it clear that any\r
+patent must be licensed for everyone's free use or not licensed at all.\r
+\r
+  The precise terms and conditions for copying, distribution and\r
+modification follow.\r
+\f\r
+                   GNU GENERAL PUBLIC LICENSE\r
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\r
+\r
+  0. This License applies to any program or other work which contains\r
+a notice placed by the copyright holder saying it may be distributed\r
+under the terms of this General Public License.  The "Program", below,\r
+refers to any such program or work, and a "work based on the Program"\r
+means either the Program or any derivative work under copyright law:\r
+that is to say, a work containing the Program or a portion of it,\r
+either verbatim or with modifications and/or translated into another\r
+language.  (Hereinafter, translation is included without limitation in\r
+the term "modification".)  Each licensee is addressed as "you".\r
+\r
+Activities other than copying, distribution and modification are not\r
+covered by this License; they are outside its scope.  The act of\r
+running the Program is not restricted, and the output from the Program\r
+is covered only if its contents constitute a work based on the\r
+Program (independent of having been made by running the Program).\r
+Whether that is true depends on what the Program does.\r
+\r
+  1. You may copy and distribute verbatim copies of the Program's\r
+source code as you receive it, in any medium, provided that you\r
+conspicuously and appropriately publish on each copy an appropriate\r
+copyright notice and disclaimer of warranty; keep intact all the\r
+notices that refer to this License and to the absence of any warranty;\r
+and give any other recipients of the Program a copy of this License\r
+along with the Program.\r
+\r
+You may charge a fee for the physical act of transferring a copy, and\r
+you may at your option offer warranty protection in exchange for a fee.\r
+\r
+  2. You may modify your copy or copies of the Program or any portion\r
+of it, thus forming a work based on the Program, and copy and\r
+distribute such modifications or work under the terms of Section 1\r
+above, provided that you also meet all of these conditions:\r
+\r
+    a) You must cause the modified files to carry prominent notices\r
+    stating that you changed the files and the date of any change.\r
+\r
+    b) You must cause any work that you distribute or publish, that in\r
+    whole or in part contains or is derived from the Program or any\r
+    part thereof, to be licensed as a whole at no charge to all third\r
+    parties under the terms of this License.\r
+\r
+    c) If the modified program normally reads commands interactively\r
+    when run, you must cause it, when started running for such\r
+    interactive use in the most ordinary way, to print or display an\r
+    announcement including an appropriate copyright notice and a\r
+    notice that there is no warranty (or else, saying that you provide\r
+    a warranty) and that users may redistribute the program under\r
+    these conditions, and telling the user how to view a copy of this\r
+    License.  (Exception: if the Program itself is interactive but\r
+    does not normally print such an announcement, your work based on\r
+    the Program is not required to print an announcement.)\r
+\f\r
+These requirements apply to the modified work as a whole.  If\r
+identifiable sections of that work are not derived from the Program,\r
+and can be reasonably considered independent and separate works in\r
+themselves, then this License, and its terms, do not apply to those\r
+sections when you distribute them as separate works.  But when you\r
+distribute the same sections as part of a whole which is a work based\r
+on the Program, the distribution of the whole must be on the terms of\r
+this License, whose permissions for other licensees extend to the\r
+entire whole, and thus to each and every part regardless of who wrote it.\r
+\r
+Thus, it is not the intent of this section to claim rights or contest\r
+your rights to work written entirely by you; rather, the intent is to\r
+exercise the right to control the distribution of derivative or\r
+collective works based on the Program.\r
+\r
+In addition, mere aggregation of another work not based on the Program\r
+with the Program (or with a work based on the Program) on a volume of\r
+a storage or distribution medium does not bring the other work under\r
+the scope of this License.\r
+\r
+  3. You may copy and distribute the Program (or a work based on it,\r
+under Section 2) in object code or executable form under the terms of\r
+Sections 1 and 2 above provided that you also do one of the following:\r
+\r
+    a) Accompany it with the complete corresponding machine-readable\r
+    source code, which must be distributed under the terms of Sections\r
+    1 and 2 above on a medium customarily used for software interchange; or,\r
+\r
+    b) Accompany it with a written offer, valid for at least three\r
+    years, to give any third party, for a charge no more than your\r
+    cost of physically performing source distribution, a complete\r
+    machine-readable copy of the corresponding source code, to be\r
+    distributed under the terms of Sections 1 and 2 above on a medium\r
+    customarily used for software interchange; or,\r
+\r
+    c) Accompany it with the information you received as to the offer\r
+    to distribute corresponding source code.  (This alternative is\r
+    allowed only for noncommercial distribution and only if you\r
+    received the program in object code or executable form with such\r
+    an offer, in accord with Subsection b above.)\r
+\r
+The source code for a work means the preferred form of the work for\r
+making modifications to it.  For an executable work, complete source\r
+code means all the source code for all modules it contains, plus any\r
+associated interface definition files, plus the scripts used to\r
+control compilation and installation of the executable.  However, as a\r
+special exception, the source code distributed need not include\r
+anything that is normally distributed (in either source or binary\r
+form) with the major components (compiler, kernel, and so on) of the\r
+operating system on which the executable runs, unless that component\r
+itself accompanies the executable.\r
+\r
+If distribution of executable or object code is made by offering\r
+access to copy from a designated place, then offering equivalent\r
+access to copy the source code from the same place counts as\r
+distribution of the source code, even though third parties are not\r
+compelled to copy the source along with the object code.\r
+\f\r
+  4. You may not copy, modify, sublicense, or distribute the Program\r
+except as expressly provided under this License.  Any attempt\r
+otherwise to copy, modify, sublicense or distribute the Program is\r
+void, and will automatically terminate your rights under this License.\r
+However, parties who have received copies, or rights, from you under\r
+this License will not have their licenses terminated so long as such\r
+parties remain in full compliance.\r
+\r
+  5. You are not required to accept this License, since you have not\r
+signed it.  However, nothing else grants you permission to modify or\r
+distribute the Program or its derivative works.  These actions are\r
+prohibited by law if you do not accept this License.  Therefore, by\r
+modifying or distributing the Program (or any work based on the\r
+Program), you indicate your acceptance of this License to do so, and\r
+all its terms and conditions for copying, distributing or modifying\r
+the Program or works based on it.\r
+\r
+  6. Each time you redistribute the Program (or any work based on the\r
+Program), the recipient automatically receives a license from the\r
+original licensor to copy, distribute or modify the Program subject to\r
+these terms and conditions.  You may not impose any further\r
+restrictions on the recipients' exercise of the rights granted herein.\r
+You are not responsible for enforcing compliance by third parties to\r
+this License.\r
+\r
+  7. If, as a consequence of a court judgment or allegation of patent\r
+infringement or for any other reason (not limited to patent issues),\r
+conditions are imposed on you (whether by court order, agreement or\r
+otherwise) that contradict the conditions of this License, they do not\r
+excuse you from the conditions of this License.  If you cannot\r
+distribute so as to satisfy simultaneously your obligations under this\r
+License and any other pertinent obligations, then as a consequence you\r
+may not distribute the Program at all.  For example, if a patent\r
+license would not permit royalty-free redistribution of the Program by\r
+all those who receive copies directly or indirectly through you, then\r
+the only way you could satisfy both it and this License would be to\r
+refrain entirely from distribution of the Program.\r
+\r
+If any portion of this section is held invalid or unenforceable under\r
+any particular circumstance, the balance of the section is intended to\r
+apply and the section as a whole is intended to apply in other\r
+circumstances.\r
+\r
+It is not the purpose of this section to induce you to infringe any\r
+patents or other property right claims or to contest validity of any\r
+such claims; this section has the sole purpose of protecting the\r
+integrity of the free software distribution system, which is\r
+implemented by public license practices.  Many people have made\r
+generous contributions to the wide range of software distributed\r
+through that system in reliance on consistent application of that\r
+system; it is up to the author/donor to decide if he or she is willing\r
+to distribute software through any other system and a licensee cannot\r
+impose that choice.\r
+\r
+This section is intended to make thoroughly clear what is believed to\r
+be a consequence of the rest of this License.\r
+\f\r
+  8. If the distribution and/or use of the Program is restricted in\r
+certain countries either by patents or by copyrighted interfaces, the\r
+original copyright holder who places the Program under this License\r
+may add an explicit geographical distribution limitation excluding\r
+those countries, so that distribution is permitted only in or among\r
+countries not thus excluded.  In such case, this License incorporates\r
+the limitation as if written in the body of this License.\r
+\r
+  9. The Free Software Foundation may publish revised and/or new versions\r
+of the General Public License from time to time.  Such new versions will\r
+be similar in spirit to the present version, but may differ in detail to\r
+address new problems or concerns.\r
+\r
+Each version is given a distinguishing version number.  If the Program\r
+specifies a version number of this License which applies to it and "any\r
+later version", you have the option of following the terms and conditions\r
+either of that version or of any later version published by the Free\r
+Software Foundation.  If the Program does not specify a version number of\r
+this License, you may choose any version ever published by the Free Software\r
+Foundation.\r
+\r
+  10. If you wish to incorporate parts of the Program into other free\r
+programs whose distribution conditions are different, write to the author\r
+to ask for permission.  For software which is copyrighted by the Free\r
+Software Foundation, write to the Free Software Foundation; we sometimes\r
+make exceptions for this.  Our decision will be guided by the two goals\r
+of preserving the free status of all derivatives of our free software and\r
+of promoting the sharing and reuse of software generally.\r
+\r
+                           NO WARRANTY\r
+\r
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\r
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN\r
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\r
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\r
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS\r
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE\r
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\r
+REPAIR OR CORRECTION.\r
+\r
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\r
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\r
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\r
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\r
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\r
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\r
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\r
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\r
+POSSIBILITY OF SUCH DAMAGES.\r
+\r
+                    END OF TERMS AND CONDITIONS\r
+\f\r
+           How to Apply These Terms to Your New Programs\r
+\r
+  If you develop a new program, and you want it to be of the greatest\r
+possible use to the public, the best way to achieve this is to make it\r
+free software which everyone can redistribute and change under these terms.\r
+\r
+  To do so, attach the following notices to the program.  It is safest\r
+to attach them to the start of each source file to most effectively\r
+convey the exclusion of warranty; and each file should have at least\r
+the "copyright" line and a pointer to where the full notice is found.\r
+\r
+    <one line to give the program's name and a brief idea of what it does.>\r
+    Copyright (C) <year>  <name of author>\r
+\r
+    This program is free software; you can redistribute it and/or modify\r
+    it under the terms of the GNU General Public License as published by\r
+    the Free Software Foundation; either version 2 of the License, or\r
+    (at your option) any later version.\r
+\r
+    This program is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
+    GNU General Public License for more details.\r
+\r
+    You should have received a copy of the GNU General Public License\r
+    along with this program; if not, write to the Free Software\r
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\r
+\r
+\r
+Also add information on how to contact you by electronic and paper mail.\r
+\r
+If the program is interactive, make it output a short notice like this\r
+when it starts in an interactive mode:\r
+\r
+    Gnomovision version 69, Copyright (C) year name of author\r
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\r
+    This is free software, and you are welcome to redistribute it\r
+    under certain conditions; type `show c' for details.\r
+\r
+The hypothetical commands `show w' and `show c' should show the appropriate\r
+parts of the General Public License.  Of course, the commands you use may\r
+be called something other than `show w' and `show c'; they could even be\r
+mouse-clicks or menu items--whatever suits your program.\r
+\r
+You should also get your employer (if you work as a programmer) or your\r
+school, if any, to sign a "copyright disclaimer" for the program, if\r
+necessary.  Here is a sample; alter the names:\r
+\r
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program\r
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.\r
+\r
+  <signature of Ty Coon>, 1 April 1989\r
+  Ty Coon, President of Vice\r
+\r
+This General Public License does not permit incorporating your program into\r
+proprietary programs.  If your program is a subroutine library, you may\r
+consider it more useful to permit linking proprietary applications with the\r
+library.  If this is what you want to do, use the GNU Library General\r
+Public License instead of this License.\r
diff --git a/psb-kernel-source-4.41.1/Kconfig b/psb-kernel-source-4.41.1/Kconfig
new file mode 100644 (file)
index 0000000..96015b1
--- /dev/null
@@ -0,0 +1,103 @@
+#
+# Drm device configuration
+#
+# This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+#
+config DRM
+       bool "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
+       help
+         Kernel-level support for the Direct Rendering Infrastructure (DRI)
+         introduced in XFree86 4.0. If you say Y here, you need to select
+         the module that's right for your graphics card from the list below.
+         These modules provide support for synchronization, security, and
+         DMA transfers. Please see <http://dri.sourceforge.net/> for more
+         details.  You should also select and configure AGP
+         (/dev/agpgart) support.
+
+config DRM_TDFX
+       tristate "3dfx Banshee/Voodoo3+"
+       depends on DRM && PCI
+       help
+         Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
+         graphics card.  If M is selected, the module will be called tdfx.
+
+config DRM_R128
+       tristate "ATI Rage 128"
+       depends on DRM && PCI
+       help
+         Choose this option if you have an ATI Rage 128 graphics card.  If M
+         is selected, the module will be called r128.  AGP support for
+         this card is strongly suggested (unless you have a PCI version).
+
+config DRM_RADEON
+       tristate "ATI Radeon"
+       depends on DRM && PCI
+       help
+         Choose this option if you have an ATI Radeon graphics card.  There
+         are both PCI and AGP versions.  You don't need to choose this to
+         run the Radeon in plain VGA mode.  There is a product page at
+         <http://www.ati.com/na/pages/products/pc/radeon32/index.html>.
+         If M is selected, the module will be called radeon.
+
+config DRM_I810
+       tristate "Intel I810"
+       depends on DRM && AGP && AGP_INTEL
+       help
+         Choose this option if you have an Intel I810 graphics card.  If M is
+         selected, the module will be called i810.  AGP support is required
+         for this driver to work.
+
+choice
+       prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
+       depends on DRM && AGP && AGP_INTEL
+       optional
+
+config DRM_I915
+       tristate "i915 driver"
+       depends on DRM && AGP && AGP_INTEL
+       help
+         Choose this option if you have a system that has Intel 830M, 845G,
+         852GM, 855GM, 865G, 915G, 915GM, 945G, 945GM and 965G integrated 
+         graphics.  If M is selected, the module will be called i915.  
+         AGP support is required for this driver to work.
+       
+endchoice
+
+config DRM_MGA
+       tristate "Matrox g200/g400"
+       depends on DRM && (!X86_64 || BROKEN) && (!PPC || BROKEN)
+       help
+         Choose this option if you have a Matrox G200, G400, G450 or G550
+         graphics card.  If M is selected, the module will be called mga.
+
+config DRM_SIS
+       tristate "SiS video cards"
+       depends on DRM
+       help
+         Choose this option if you have a SiS 630 or compatible video 
+         chipset. If M is selected the module will be called sis.
+
+config DRM_VIA
+       tristate "Via unichrome video cards"
+       depends on DRM 
+       help
+         Choose this option if you have a Via unichrome or compatible video 
+         chipset. If M is selected the module will be called via.
+
+config DRM_MACH64
+       tristate "ATI Rage Pro (Mach64)"
+       depends on DRM && PCI
+       help
+         Choose this option if you have an ATI Rage Pro (mach64 chipset)
+         graphics card.  Example cards include:  3D Rage Pro, Xpert 98,
+         3D Rage LT Pro, 3D Rage XL/XC, and 3D Rage Mobility (P/M, M1).
+         Cards earlier than ATI Rage Pro (e.g. Rage II) are not supported.
+         If M is selected, the module will be called mach64.  AGP support for
+         this card is strongly suggested (unless you have a PCI version).
+
+config DRM_PSB
+       tristate "Intel Poulsbo"
+       depends on DRM && PCI && I2C_ALGOBIT
+       help
+         Choose
diff --git a/psb-kernel-source-4.41.1/Makefile b/psb-kernel-source-4.41.1/Makefile
new file mode 100644 (file)
index 0000000..1684557
--- /dev/null
@@ -0,0 +1,369 @@
+# Makefile -- For the Direct Rendering Manager module (drm)
+#
+# Based on David Woodhouse's mtd build.
+#
+# Modified to handle the DRM requirements and builds on a wider range of
+# platforms in a flexible way by David Dawes.  It's not clear, however,
+# that this approach is simpler than the old one.
+#
+# The purpose of this Makefile is to handle setting up everything
+# needed for an out-of-kernel source build.  Makefile.kernel contains
+# everything required for in-kernel source builds.  It is included into
+# this file, so none of that should be duplicated here.
+#
+# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.linux,v 1.40 2003/08/17 17:12:25 dawes Exp $
+#
+
+#
+# By default, the build is done against the running linux kernel source.
+# To build against a different kernel source tree, set LINUXDIR:
+#
+#    make LINUXDIR=/path/to/kernel/source
+
+#
+# To build only some modules, either set DRM_MODULES to the list of modules,
+# or specify the modules as targets:
+#
+#    make r128.o radeon.o
+#
+# or:
+#
+#    make DRM_MODULES="r128 radeon"
+#
+
+SHELL=/bin/sh
+
+.SUFFIXES:
+
+ifndef LINUXDIR
+RUNNING_REL := $(shell uname -r)
+
+LINUXDIR := $(shell if [ -e /lib/modules/$(RUNNING_REL)/source ]; then \
+                echo /lib/modules/$(RUNNING_REL)/source; \
+                else echo /lib/modules/$(RUNNING_REL)/build; fi)
+endif
+
+ifndef O
+O := $(shell if [ -e /lib/modules/$(RUNNING_REL)/build ]; then \
+                echo /lib/modules/$(RUNNING_REL)/build; \
+                else echo ""; fi)
+#O := $(LINUXDIR)
+endif
+
+ifdef ARCH
+MACHINE := $(ARCH)
+else
+MACHINE := $(shell uname -m)
+endif
+
+# Modules for all architectures
+MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
+               mach64.o nv.o nouveau.o psb.o xgi.o
+
+# Modules only for ix86 architectures
+ifneq (,$(findstring 86,$(MACHINE)))
+ARCHX86 := 1
+MODULE_LIST += i810.o i915.o
+endif
+
+ifneq (,$(findstring sparc64,$(MACHINE)))
+ARCHSPARC64 := 1
+#MODULE_LIST += ffb.o
+endif
+
+DRM_MODULES ?= $(MODULE_LIST)
+
+# These definitions are for handling dependencies in the out of kernel build.
+
+DRMHEADERS =    drmP.h drm_compat.h drm_os_linux.h drm.h drm_sarea.h
+COREHEADERS =   drm_core.h drm_sman.h drm_hashtab.h 
+
+TDFXHEADERS =   tdfx_drv.h $(DRMHEADERS)
+R128HEADERS =   r128_drv.h r128_drm.h $(DRMHEADERS)
+RADEONHEADERS = radeon_drv.h radeon_drm.h r300_reg.h $(DRMHEADERS)
+MGAHEADERS =    mga_drv.h mga_drm.h mga_ucode.h $(DRMHEADERS)
+I810HEADERS =   i810_drv.h i810_drm.h $(DRMHEADERS)
+I915HEADERS =   i915_drv.h i915_drm.h $(DRMHEADERS)
+SISHEADERS=     sis_drv.h sis_drm.h drm_hashtab.h drm_sman.h $(DRMHEADERS)
+SAVAGEHEADERS=  savage_drv.h savage_drm.h $(DRMHEADERS)
+VIAHEADERS =   via_drm.h via_drv.h via_3d_reg.h via_verifier.h $(DRMHEADERS)
+MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
+NVHEADERS =     nv_drv.h $(DRMHEADERS)
+FFBHEADERS =   ffb_drv.h $(DRMHEADERS)
+NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
+PSBHEADERS=    psb_drv.h psb_drm.h psb_reg.h psb_kreg.h psb_scene.h \
+       psb_schedule.h psb_detear.h $(DRMHEADERS)
+XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
+
+PROGS = dristat drmstat
+
+CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c .tmp_versions
+
+# VERSION is not defined from the initial invocation.  It is defined when
+# this Makefile is invoked from the kernel's root Makefile.
+
+ifndef VERSION
+
+ifdef RUNNING_REL
+
+# SuSE has the version.h and autoconf.h headers for the current kernel
+# in /boot as /boot/vmlinuz.version.h and /boot/vmlinuz.autoconf.h.
+# Check these first to see if they match the running kernel.
+
+BOOTVERSION_PREFIX = /boot/vmlinuz.
+
+V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \
+       grep UTS_RELEASE $(BOOTVERSION_PREFIX)version.h | \
+       cut -d' ' -f3; fi)
+
+ifeq ($(V),"$(RUNNING_REL)")
+HEADERFROMBOOT := 1
+GETCONFIG := MAKEFILES=$(shell pwd)/.config
+HAVECONFIG := y
+endif
+
+# On Red Hat we need to check if there is a .config file in the kernel
+# source directory.  If there isn't, we need to check if there's a
+# matching file in the configs subdirectory.
+
+ifneq ($(HAVECONFIG),y)
+HAVECONFIG := $(shell if [ -e $(LINUXDIR)/.config ]; then echo y; fi)
+endif
+
+ifneq ($(HAVECONFIG),y)
+REL_BASE := $(shell echo $(RUNNING_REL) | sed 's/-.*//')
+REL_TYPE := $(shell echo $(RUNNING_REL) | sed 's/[0-9.-]//g')
+ifeq ($(REL_TYPE),)
+RHCONFIG := configs/kernel-$(REL_BASE)-$(MACHINE).config
+else
+RHCONFIG := configs/kernel-$(REL_BASE)-$(MACHINE)-$(REL_TYPE).config
+endif
+HAVECONFIG := $(shell if [ -e $(LINUXDIR)/$(RHCONFIG) ]; then echo y; fi)
+ifneq ($(HAVECONFIG),y)
+RHCONFIG :=
+endif
+endif
+
+ifneq ($(HAVECONFIG),y)
+ifneq ($(0),$(LINUXDIR))
+GETCONFIG += O=$(O)
+endif
+HAVECONFIG := $(shell if [ -e $(O)/.config ]; then echo y; fi)
+endif
+
+GETCONFIG += O=$(O)
+
+ifneq ($(HAVECONFIG),y)
+$(error Cannot find a kernel config file)
+endif
+
+endif
+
+CLEANCONFIG := $(shell if cmp -s $(LINUXDIR)/.config .config; then echo y; fi)
+ifeq ($(CLEANCONFIG),y)
+CLEANFILES += $(LINUXDIR)/.config .config $(LINUXDIR)/tmp_include_depends
+endif
+
+all: modules
+
+modules: includes
+       +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
+
+ifeq ($(HEADERFROMBOOT),1)
+
+BOOTHEADERS = version.h autoconf.h
+BOOTCONFIG = .config
+
+CLEANFILES += $(BOOTHEADERS) $(BOOTCONFIG)
+
+includes:: $(BOOTHEADERS) $(BOOTCONFIG)
+
+version.h: $(BOOTVERSION_PREFIX)version.h
+       rm -f $@
+       ln -s $< $@
+
+autoconf.h: $(BOOTVERSION_PREFIX)autoconf.h
+       rm -f $@
+       ln -s $< $@
+
+.config: $(BOOTVERSION_PREFIX)config
+       rm -f $@
+       ln -s $< $@
+endif
+
+# This prepares an unused Red Hat kernel tree for the build.
+ifneq ($(RHCONFIG),)
+includes:: $(LINUXDIR)/.config $(LINUXDIR)/tmp_include_depends .config
+
+$(LINUXDIR)/.config: $(LINUXDIR)/$(RHCONFIG)
+       rm -f $@
+       ln -s $< $@
+
+.config: $(LINUXDIR)/$(RHCONFIG)
+       rm -f $@
+       ln -s $< $@
+
+$(LINUXDIR)/tmp_include_depends:
+       echo all: > $@
+endif
+
+# Make sure that the shared source files are linked into this directory.
+
+
+SHAREDDIR := .
+
+ifeq ($(shell if [ -d $(SHAREDDIR) ]; then echo y; fi),y)
+includes::  drm_pciids.h
+
+drm_pciids.h: $(SHAREDDIR)/drm_pciids.txt
+       sh ./create_linux_pci_lists.sh < $(SHAREDDIR)/drm_pciids.txt
+else
+includes::
+
+endif
+
+clean cleandir:
+       rm -rf $(CLEANFILES)
+
+$(MODULE_LIST)::
+       make DRM_MODULES=$@ modules
+
+# Build test utilities
+
+PRGCFLAGS = $(CFLAGS) -g -ansi -pedantic -DPOSIX_C_SOURCE=199309L \
+           -D_POSIX_SOURCE -D_XOPEN_SOURCE -D_BSD_SOURCE -D_SVID_SOURCE \
+           -I. -I../../..
+
+DRMSTATLIBS = -L../../.. -L.. -ldrm -lxf86_os \
+             -L../../../../dummylib -ldummy -lm
+
+programs: $(PROGS)
+
+dristat: dristat.c
+       $(CC) $(PRGCFLAGS) $< -o $@
+
+drmstat: drmstat.c
+       $(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
+
+install:
+       make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
+
+else
+
+# Check for kernel versions that we don't support.
+
+BELOW26 := $(shell if [ $(VERSION) -lt 2 -o $(PATCHLEVEL) -lt 6 ]; then \
+               echo y; fi)
+
+ifeq ($(BELOW26),y)
+$(error Only 2.6.x and later kernels are supported \
+       ($(VERSION).$(PATCHLEVEL).$(SUBLEVEL)))
+endif
+
+ifdef ARCHX86
+ifndef CONFIG_X86_CMPXCHG
+$(error CONFIG_X86_CMPXCHG needs to be enabled in the kernel)
+endif
+endif
+
+# This needs to go before all other include paths.
+CC += -I$(DRMSRCDIR)
+
+# Check for PAGE_AGP definition
+PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \
+               grep -c PAGE_AGP)
+
+ifneq ($(PAGE_AGP),0)
+EXTRA_CFLAGS += -DHAVE_PAGE_AGP
+endif
+
+# Start with all modules turned off.
+CONFIG_DRM_GAMMA := n
+CONFIG_DRM_TDFX := n
+CONFIG_DRM_MGA := n
+CONFIG_DRM_I810 := n
+CONFIG_DRM_R128 := n
+CONFIG_DRM_RADEON := n
+CONFIG_DRM_I915 := n
+CONFIG_DRM_SIS := n
+CONFIG_DRM_FFB := n
+CONFIG_DRM_SAVAGE := n
+CONFIG_DRM_VIA := n
+CONFIG_DRM_MACH64 := n
+CONFIG_DRM_NV := n
+CONFIG_DRM_NOUVEAU := n
+CONFIG_DRM_PSB := n
+CONFIG_DRM_XGI := n
+
+# Enable module builds for the modules requested/supported.
+
+ifneq (,$(findstring tdfx,$(DRM_MODULES)))
+CONFIG_DRM_TDFX := m
+endif
+ifneq (,$(findstring r128,$(DRM_MODULES)))
+CONFIG_DRM_R128 := m
+endif
+ifneq (,$(findstring radeon,$(DRM_MODULES)))
+CONFIG_DRM_RADEON := m
+endif
+ifneq (,$(findstring sis,$(DRM_MODULES)))
+CONFIG_DRM_SIS := m
+endif
+ifneq (,$(findstring via,$(DRM_MODULES)))
+CONFIG_DRM_VIA := m
+endif
+ifneq (,$(findstring mach64,$(DRM_MODULES)))
+CONFIG_DRM_MACH64 := m
+endif
+ifneq (,$(findstring ffb,$(DRM_MODULES)))
+CONFIG_DRM_FFB := m
+endif
+ifneq (,$(findstring savage,$(DRM_MODULES)))
+CONFIG_DRM_SAVAGE := m
+endif
+ifneq (,$(findstring mga,$(DRM_MODULES)))
+CONFIG_DRM_MGA := m
+endif
+ifneq (,$(findstring nv,$(DRM_MODULES)))
+CONFIG_DRM_NV := m
+endif
+ifneq (,$(findstring nouveau,$(DRM_MODULES)))
+CONFIG_DRM_NOUVEAU := m
+endif
+ifneq (,$(findstring xgi,$(DRM_MODULES)))
+CONFIG_DRM_XGI := m
+endif
+
+# These require AGP support
+
+ifneq (,$(findstring i810,$(DRM_MODULES)))
+CONFIG_DRM_I810 := m
+endif
+ifneq (,$(findstring i915,$(DRM_MODULES)))
+CONFIG_DRM_I915 := m
+endif
+ifneq (,$(findstring psb,$(DRM_MODULES)))
+CONFIG_DRM_PSB := m
+endif
+include $(DRMSRCDIR)/Makefile.kernel
+
+# Depencencies
+$(drm-objs):   $(DRMHEADERS) $(COREHEADERS)
+$(tdfx-objs):  $(TDFXHEADERS)
+$(r128-objs):  $(R128HEADERS)
+$(mga-objs):   $(MGAHEADERS)
+$(i810-objs):  $(I810HEADERS)
+$(i915-objs):  $(I915HEADERS)
+$(radeon-objs):        $(RADEONHEADERS)
+$(sis-objs):   $(SISHEADERS)
+$(ffb-objs):   $(FFBHEADERS)
+$(savage-objs): $(SAVAGEHEADERS)
+$(via-objs):   $(VIAHEADERS)
+$(mach64-objs): $(MACH64HEADERS)
+$(nv-objs):     $(NVHEADERS)
+$(nouveau-objs): $(NOUVEAUHEADERS)
+$(psb-objs):   $(PSBHEADERS)
+$(xgi-objs):    $(XGIHEADERS)
+
+endif
+
diff --git a/psb-kernel-source-4.41.1/Makefile.kernel b/psb-kernel-source-4.41.1/Makefile.kernel
new file mode 100644 (file)
index 0000000..702bf28
--- /dev/null
@@ -0,0 +1,76 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+#
+# Based on David Woodhouse's mtd build.
+#
+# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.18 2003/08/16 17:59:17 dawes Exp $
+#
+
+drm-objs    := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
+               drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
+               drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+               drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
+               drm_memory_debug.o ati_pcigart.o drm_sman.o \
+               drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
+               drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \
+               drm_edid.o drm_modes.o drm_bo_lock.o drm_regman.o drm_vm_nopage_compat.o
+tdfx-objs   := tdfx_drv.o
+r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
+mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
+i810-objs   := i810_drv.o i810_dma.o
+i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
+               i915_buffer.o intel_setup.o intel_i2c.o i915_init.o intel_fb.o \
+               i915_compat.o
+nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
+               nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
+               nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \
+               nv04_timer.o \
+               nv04_mc.o nv40_mc.o nv50_mc.o \
+               nv04_fb.o nv10_fb.o nv40_fb.o \
+               nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
+               nv04_graph.o nv10_graph.o nv20_graph.o \
+               nv40_graph.o nv50_graph.o \
+               nv04_instmem.o nv50_instmem.o
+radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_fence.o radeon_buffer.o
+sis-objs    := sis_drv.o sis_mm.o
+ffb-objs    := ffb_drv.o ffb_context.o
+savage-objs := savage_drv.o savage_bci.o savage_state.o
+via-objs    := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
+               via_video.o via_dmablit.o via_fence.o via_buffer.o
+mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
+nv-objs := nv_drv.o
+psb-objs    := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o \
+               psb_buffer.o psb_gtt.o psb_setup.o psb_i2c.o psb_fb.o \
+               psb_schedule.o psb_scene.o psb_reset.o \
+               psb_regman.o psb_xhw.o psb_msvdx.o psb_msvdxinit.o \
+               psb_detear.o
+xgi-objs    := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
+               xgi_fence.o
+
+ifeq ($(CONFIG_COMPAT),y)
+drm-objs    += drm_ioc32.o
+radeon-objs += radeon_ioc32.o
+mga-objs    += mga_ioc32.o
+r128-objs   += r128_ioc32.o
+i915-objs   += i915_ioc32.o
+nouveau-objs += nouveau_ioc32.o
+xgi-objs    += xgi_ioc32.o
+endif
+
+obj-m                  += drm.o
+obj-$(CONFIG_DRM_TDFX) += tdfx.o
+obj-$(CONFIG_DRM_R128) += r128.o
+obj-$(CONFIG_DRM_RADEON)+= radeon.o
+obj-$(CONFIG_DRM_MGA)  += mga.o
+obj-$(CONFIG_DRM_I810) += i810.o
+obj-$(CONFIG_DRM_I915) += i915.o
+obj-$(CONFIG_DRM_SIS)   += sis.o
+obj-$(CONFIG_DRM_FFB)   += ffb.o
+obj-$(CONFIG_DRM_SAVAGE)+= savage.o
+obj-$(CONFIG_DRM_VIA)   += via.o
+obj-$(CONFIG_DRM_MACH64)+= mach64.o
+obj-$(CONFIG_DRM_NV)    += nv.o
+obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
+obj-$(CONFIG_DRM_PSB)   += psb.o
+obj-$(CONFIG_DRM_XGI)   += xgi.o
diff --git a/psb-kernel-source-4.41.1/Module.markers b/psb-kernel-source-4.41.1/Module.markers
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/psb-kernel-source-4.41.1/README.drm b/psb-kernel-source-4.41.1/README.drm
new file mode 100644 (file)
index 0000000..7bcd619
--- /dev/null
@@ -0,0 +1,25 @@
+************************************************************
+* For the very latest on DRI development, please see:      *
+*     http://dri.freedesktop.org/                          *
+************************************************************
+
+The Direct Rendering Manager (drm) is a device-independent kernel-level
+device driver that provides support for the XFree86 Direct Rendering
+Infrastructure (DRI).
+
+The DRM supports the Direct Rendering Infrastructure (DRI) in four major
+ways:
+
+    1. The DRM provides synchronized access to the graphics hardware via
+       the use of an optimized two-tiered lock.
+
+    2. The DRM enforces the DRI security policy for access to the graphics
+       hardware by only allowing authenticated X11 clients access to
+       restricted regions of memory.
+
+    3. The DRM provides a generic DMA engine, complete with multiple
+       queues and the ability to detect the need for an OpenGL context
+       switch.
+
+    4. The DRM is extensible via the use of small device-specific modules
+       that rely extensively on the API exported by the DRM module.
diff --git a/psb-kernel-source-4.41.1/ati_pcigart.c b/psb-kernel-source-4.41.1/ati_pcigart.c
new file mode 100644 (file)
index 0000000..5fafbb9
--- /dev/null
@@ -0,0 +1,411 @@
+/**
+ * \file ati_pcigart.c
+ * ATI PCI GART support
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+# define ATI_PCIGART_PAGE_SIZE         4096    /**< PCI GART page size */
+
+static __inline__ void insert_page_into_table(struct drm_ati_pcigart_info *info, u32 page_base, u32 *pci_gart)
+{
+       switch(info->gart_reg_if) {
+       case DRM_ATI_GART_IGP:
+               *pci_gart = cpu_to_le32((page_base) | 0xc);
+               break;
+       case DRM_ATI_GART_PCIE:
+               *pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
+               break;
+       default:
+       case DRM_ATI_GART_PCI:
+               *pci_gart = cpu_to_le32(page_base);
+               break;
+       }
+}
+
+static __inline__ u32 get_page_base_from_table(struct drm_ati_pcigart_info *info, u32 *pci_gart)
+{
+       u32 retval;
+       switch(info->gart_reg_if) {
+       case DRM_ATI_GART_IGP:
+               retval = *pci_gart;
+               retval &= ~0xc;
+               break;
+       case DRM_ATI_GART_PCIE:
+               retval = *pci_gart;
+               retval &= ~0xc;
+               retval <<= 8;
+               break;
+       default:
+       case DRM_ATI_GART_PCI:
+               retval = *pci_gart;
+               break;
+       }
+       return retval;
+}
+
+
+
+static void *drm_ati_alloc_pcigart_table(int order)
+{
+       unsigned long address;
+       struct page *page;
+       int i;
+
+       DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
+
+       address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
+                                  order);
+       if (address == 0UL) {
+               return NULL;
+       }
+
+       page = virt_to_page(address);
+
+       for (i = 0; i < order; i++, page++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
+               get_page(page);
+#endif
+               SetPageReserved(page);
+       }
+
+       DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
+       return (void *)address;
+}
+
+static void drm_ati_free_pcigart_table(void *address, int order)
+{
+       struct page *page;
+       int i;
+       int num_pages = 1 << order;
+       DRM_DEBUG("%s\n", __FUNCTION__);
+
+       page = virt_to_page((unsigned long)address);
+
+       for (i = 0; i < num_pages; i++, page++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
+               __put_page(page);
+#endif
+               ClearPageReserved(page);
+       }
+
+       free_pages((unsigned long)address, order);
+}
+
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long pages;
+       int i;
+       int order;
+       int num_pages, max_pages;
+
+       /* we need to support large memory configurations */
+       if (!entry) {
+               DRM_ERROR("no scatter/gather memory!\n");
+               return 0;
+       }
+
+       order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
+       num_pages = 1 << order;
+
+       if (gart_info->bus_addr) {
+               if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+                       pci_unmap_single(dev->pdev, gart_info->bus_addr,
+                                        num_pages * PAGE_SIZE,
+                                        PCI_DMA_TODEVICE);
+               }
+
+               max_pages = (gart_info->table_size / sizeof(u32));
+               pages = (entry->pages <= max_pages)
+                 ? entry->pages : max_pages;
+
+               for (i = 0; i < pages; i++) {
+                       if (!entry->busaddr[i])
+                               break;
+                       pci_unmap_single(dev->pdev, entry->busaddr[i],
+                                        PAGE_SIZE, PCI_DMA_TODEVICE);
+               }
+
+               if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+                       gart_info->bus_addr = 0;
+       }
+
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
+           && gart_info->addr) {
+
+               drm_ati_free_pcigart_table(gart_info->addr, order);
+               gart_info->addr = NULL;
+       }
+
+       return 1;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
+
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+       struct drm_sg_mem *entry = dev->sg;
+       void *address = NULL;
+       unsigned long pages;
+       u32 *pci_gart, page_base, bus_address = 0;
+       int i, j, ret = 0;
+       int order;
+       int max_pages;
+       int num_pages;
+
+       if (!entry) {
+               DRM_ERROR("no scatter/gather memory!\n");
+               goto done;
+       }
+
+       if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+               DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+
+               order = drm_order((gart_info->table_size +
+                                  (PAGE_SIZE-1)) / PAGE_SIZE);
+               num_pages = 1 << order;
+               address = drm_ati_alloc_pcigart_table(order);
+               if (!address) {
+                       DRM_ERROR("cannot allocate PCI GART page!\n");
+                       goto done;
+               }
+
+               if (!dev->pdev) {
+                       DRM_ERROR("PCI device unknown!\n");
+                       goto done;
+               }
+
+               bus_address = pci_map_single(dev->pdev, address,
+                                            num_pages * PAGE_SIZE,
+                                            PCI_DMA_TODEVICE);
+               if (bus_address == 0) {
+                       DRM_ERROR("unable to map PCIGART pages!\n");
+                       order = drm_order((gart_info->table_size +
+                                          (PAGE_SIZE-1)) / PAGE_SIZE);
+                       drm_ati_free_pcigart_table(address, order);
+                       address = NULL;
+                       goto done;
+               }
+       } else {
+               address = gart_info->addr;
+               bus_address = gart_info->bus_addr;
+               DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
+                         bus_address, (unsigned long)address);
+       }
+
+       pci_gart = (u32 *) address;
+
+       max_pages = (gart_info->table_size / sizeof(u32));
+       pages = (entry->pages <= max_pages)
+           ? entry->pages : max_pages;
+
+       memset(pci_gart, 0, max_pages * sizeof(u32));
+
+       for (i = 0; i < pages; i++) {
+               /* we need to support large memory configurations */
+               entry->busaddr[i] = pci_map_single(dev->pdev,
+                                                  page_address(entry->
+                                                               pagelist[i]),
+                                                  PAGE_SIZE, PCI_DMA_TODEVICE);
+               if (entry->busaddr[i] == 0) {
+                       DRM_ERROR("unable to map PCIGART pages!\n");
+                       drm_ati_pcigart_cleanup(dev, gart_info);
+                       address = NULL;
+                       bus_address = 0;
+                       goto done;
+               }
+               page_base = (u32) entry->busaddr[i];
+
+               for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+                       insert_page_into_table(gart_info, page_base, pci_gart);
+                       pci_gart++;
+                       page_base += ATI_PCIGART_PAGE_SIZE;
+               }
+       }
+
+       ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+       wbinvd();
+#else
+       mb();
+#endif
+
+      done:
+       gart_info->addr = address;
+       gart_info->bus_addr = bus_address;
+       return ret;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_init);
+
+static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
+{
+       return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+}
+
+static int ati_pcigart_populate(struct drm_ttm_backend *backend,
+                               unsigned long num_pages,
+                               struct page **pages)
+{
+       ati_pcigart_ttm_backend_t *atipci_be =
+               container_of(backend, ati_pcigart_ttm_backend_t, backend);
+
+       DRM_ERROR("%ld\n", num_pages);
+       atipci_be->pages = pages;
+       atipci_be->num_pages = num_pages;
+       atipci_be->populated = 1;
+       return 0;
+}
+
+static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
+                               struct drm_bo_mem_reg *bo_mem)
+{
+       ati_pcigart_ttm_backend_t *atipci_be =
+               container_of(backend, ati_pcigart_ttm_backend_t, backend);
+        off_t j;
+       int i;
+       struct drm_ati_pcigart_info *info = atipci_be->gart_info;
+       u32 *pci_gart;
+       u32 page_base;
+       unsigned long offset = bo_mem->mm_node->start;
+       pci_gart = info->addr;
+
+       DRM_ERROR("Offset is %08lX\n", bo_mem->mm_node->start);
+        j = offset;
+        while (j < (offset + atipci_be->num_pages)) {
+               if (get_page_base_from_table(info, pci_gart+j))
+                       return -EBUSY;
+                j++;
+        }
+
+        for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
+               struct page *cur_page = atipci_be->pages[i];
+                /* write value */
+               page_base = page_to_phys(cur_page);
+               insert_page_into_table(info, page_base, pci_gart + j);
+        }
+
+#if defined(__i386__) || defined(__x86_64__)
+       wbinvd();
+#else
+       mb();
+#endif
+
+       atipci_be->gart_flush_fn(atipci_be->dev);
+
+       atipci_be->bound = 1;
+       atipci_be->offset = offset;
+        /* need to traverse table and add entries */
+       DRM_DEBUG("\n");
+       return 0;
+}
+
+static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend)
+{
+       ati_pcigart_ttm_backend_t *atipci_be =
+               container_of(backend, ati_pcigart_ttm_backend_t, backend);
+       struct drm_ati_pcigart_info *info = atipci_be->gart_info;       
+       unsigned long offset = atipci_be->offset;
+       int i;
+       off_t j;
+       u32 *pci_gart = info->addr;
+
+       DRM_DEBUG("\n");
+
+       if (atipci_be->bound != 1)
+               return -EINVAL;
+
+       for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
+               *(pci_gart + j) = 0;
+       }
+       atipci_be->gart_flush_fn(atipci_be->dev);
+       atipci_be->bound = 0;
+       atipci_be->offset = 0;
+       return 0;
+}
+
+static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend)
+{
+       ati_pcigart_ttm_backend_t *atipci_be =
+               container_of(backend, ati_pcigart_ttm_backend_t, backend);
+
+       DRM_DEBUG("\n");        
+       if (atipci_be->pages) {
+               backend->func->unbind(backend);
+               atipci_be->pages = NULL;
+
+       }
+       atipci_be->num_pages = 0;
+}
+
+static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend)
+{
+       ati_pcigart_ttm_backend_t *atipci_be;
+       if (backend) {
+               DRM_DEBUG("\n");
+               atipci_be = container_of(backend, ati_pcigart_ttm_backend_t, backend);
+               if (atipci_be) {
+                       if (atipci_be->pages) {
+                               backend->func->clear(backend);
+                       }
+                       drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM);
+               }
+       }
+}
+
+static struct drm_ttm_backend_func ati_pcigart_ttm_backend = 
+{
+       .needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust,
+       .populate = ati_pcigart_populate,
+       .clear = ati_pcigart_clear_ttm,
+       .bind = ati_pcigart_bind_ttm,
+       .unbind = ati_pcigart_unbind_ttm,
+       .destroy =  ati_pcigart_destroy_ttm,
+};
+
+struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
+{
+       ati_pcigart_ttm_backend_t *atipci_be;
+
+       atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM);
+       if (!atipci_be)
+               return NULL;
+       
+       atipci_be->populated = 0;
+       atipci_be->backend.func = &ati_pcigart_ttm_backend;
+       atipci_be->gart_info = info;
+       atipci_be->gart_flush_fn = gart_flush_fn;
+       atipci_be->dev = dev;
+
+       return &atipci_be->backend;
+}
+EXPORT_SYMBOL(ati_pcigart_init_ttm);
diff --git a/psb-kernel-source-4.41.1/build.sh b/psb-kernel-source-4.41.1/build.sh
new file mode 100755 (executable)
index 0000000..af62546
--- /dev/null
@@ -0,0 +1 @@
+LINUXDIR=/home/pinebud/android/android-x86/out/target/product/s5/obj/kernel DRM_MODULES=psb make
diff --git a/psb-kernel-source-4.41.1/create_linux_pci_lists.sh b/psb-kernel-source-4.41.1/create_linux_pci_lists.sh
new file mode 100644 (file)
index 0000000..bb0e687
--- /dev/null
@@ -0,0 +1,40 @@
+#! /bin/bash 
+#
+# Script to output Linux compatible pci ids file
+#  - Copyright Dave Airlie 2004 (airlied@linux.ie)
+#
+OUTFILE=drm_pciids.h
+
+finished=0
+
+cat > $OUTFILE <<EOF
+/*
+   This file is auto-generated from the drm_pciids.txt in the DRM CVS
+   Please contact dri-devel@lists.sf.net to add new cards to this list
+*/
+EOF
+
+while read pcivend pcidev attribs pciname
+do
+       if [ "x$pcivend" = "x" ]; then
+               if [ "$finished" = "0" ]; then
+                       finished=1
+                       echo "  {0, 0, 0}" >> $OUTFILE
+                       echo >> $OUTFILE
+               fi
+       else
+       
+               cardtype=`echo "$pcivend" | cut -s -f2 -d'[' | cut -s -f1 -d']'`
+               if [ "x$cardtype" = "x" ];
+               then
+                       echo "  {$pcivend, $pcidev, PCI_ANY_ID, PCI_ANY_ID, 0, 0, $attribs}, \\" >> $OUTFILE
+               else
+                       echo "#define "$cardtype"_PCI_IDS \\" >> $OUTFILE
+                       finished=0
+               fi
+       fi
+done
+
+if [ "$finished" = "0" ]; then
+       echo "  {0, 0, 0}" >> $OUTFILE
+fi
diff --git a/psb-kernel-source-4.41.1/debian/changelog b/psb-kernel-source-4.41.1/debian/changelog
new file mode 100644 (file)
index 0000000..554e8f4
--- /dev/null
@@ -0,0 +1,5 @@
+psb-kernel-source (4.41.1-0ubuntu1~904um1) jaunty; urgency=low
+
+  * Initial package for Jaunty, utilising DKMS.
+
+ -- Steve Kowalik <stevenk@ubuntu.com>  Thu, 25 Jun 2009 11:44:04 +1000
diff --git a/psb-kernel-source-4.41.1/debian/compat b/psb-kernel-source-4.41.1/debian/compat
new file mode 100644 (file)
index 0000000..7f8f011
--- /dev/null
@@ -0,0 +1 @@
+7
diff --git a/psb-kernel-source-4.41.1/debian/control b/psb-kernel-source-4.41.1/debian/control
new file mode 100644 (file)
index 0000000..58d11cf
--- /dev/null
@@ -0,0 +1,26 @@
+Source: psb-kernel-source
+Section: misc
+Priority: optional
+Maintainer: Ubuntu Mobile Developers <ubuntu-mobile@lists.ubuntu.com>
+XSBC-Original-Maintainer: Waldo Bastian <waldo.bastian@intel.com>
+Build-Depends: debhelper (>= 7), dpatch, linux-headers-generic [i386 amd64], linux-headers-lpia [lpia]
+Standards-Version: 3.8.0
+
+Package: psb-kernel-source
+Architecture: all
+Depends: dkms, make
+Description: Kernel module for the Poulsbo (psb) 2D X11 driver
+ This package contains the source for the Poulsbo (psb) 2D X11 driver, and 
+ hooks to use DKMS to build it on install.
+
+Package: psb-kernel-headers
+Architecture: all
+Description: Kernel module headers for the Poulsbo (psb) 2D X11 driver
+ This package contains the kernel headers for the Poulsbo (psb) 2D X11 driver.
+
+Package: psb-modules
+Architecture: any
+Depends: linux-generic [i386 amd64], linux-lpia [lpia]
+Description: Kernel module built for -generic or -lpia kernel
+ This package provides the psb kernel module for the -generic or -lpia kernel.
+
diff --git a/psb-kernel-source-4.41.1/debian/copyright b/psb-kernel-source-4.41.1/debian/copyright
new file mode 100644 (file)
index 0000000..f66d5bf
--- /dev/null
@@ -0,0 +1,53 @@
+Upstream Author(s): 
+
+    Intel Corporation, http://www.intel.com
+
+Copyright: 
+
+    Copyright (c) 1997-2003 by The XFree86 Project, Inc.
+    Copyright (c) 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+    Copyright (c) 2000-2001 VA Linux Systems, Inc., Sunnyvale, California.
+    Copyright (c) 2002-2003, 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+    Copyright (c) 2003 Jos�Fonseca.
+    Copyright (c) 2003 Leif Delgass.
+    Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
+    Copyright (c) 2003-2004 IBM Corp.
+    Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
+    Copyright (c) 2005 Alan Hourihane
+    Copyright (c) 2005 Paul Mackerras
+    Copyright (c) 2005-2007 Imagination Technologies Limited, UK
+    Copyright (c) 2006 Dennis Munsie <dmunsie@cecropia.com>
+    Copyright (c) 2006 Keith Packard
+    Copyright (c) 2006-2007 Dave Airlie <airlied@linux.ie>
+    Copyright (c) 2006-2008 Intel Corporation
+
+License:
+
+    All Rights Reserved.
+    
+    Permission is hereby granted, free of charge, to any person obtaining a
+    copy of this software and associated documentation files (the "Software"),
+    to deal in the Software without restriction, including without limitation
+    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+    and/or sell copies of the Software, and to permit persons to whom the
+    Software is furnished to do so, subject to the following conditions:
+    
+    The above copyright notice and this permission notice (including the next
+    paragraph) shall be included in all copies or substantial portions of the
+    Software.
+    
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+    VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+    OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+    ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+    OTHER DEALINGS IN THE SOFTWARE.
+
+
+On Debian systems, the complete text of the GNU General
+Public License can be found in `/usr/share/common-licenses/GPL-2'.
+
+The Debian packaging is (C) Canonical Ltd, is under the terms of the GPL,
+version 2.
+
diff --git a/psb-kernel-source-4.41.1/debian/dirs b/psb-kernel-source-4.41.1/debian/dirs
new file mode 100644 (file)
index 0000000..b601f22
--- /dev/null
@@ -0,0 +1 @@
+usr/src
diff --git a/psb-kernel-source-4.41.1/debian/dkms.conf.in b/psb-kernel-source-4.41.1/debian/dkms.conf.in
new file mode 100644 (file)
index 0000000..ef78d69
--- /dev/null
@@ -0,0 +1,9 @@
+PACKAGE_NAME="psb-kernel-source"
+PACKAGE_VERSION="#VERSION#"
+CLEAN="make clean"
+BUILT_MODULE_NAME[0]="psb"
+BUILT_MODULE_LOCATION[0]="."
+DEST_MODULE_LOCATION[0]="/kernel/../updates/char/drm"
+BUILT_MODULE_NAME[1]="drm"
+DEST_MODULE_LOCATION[1]="/kernel/../updates/char/drm"
+MAKE[0]="make LINUXDIR=/lib/modules/$kernelver/build DRM_MODULES=psb"
diff --git a/psb-kernel-source-4.41.1/debian/patches/00list b/psb-kernel-source-4.41.1/debian/patches/00list
new file mode 100644 (file)
index 0000000..942477e
--- /dev/null
@@ -0,0 +1 @@
+use_udev
diff --git a/psb-kernel-source-4.41.1/debian/patches/use_udev.dpatch b/psb-kernel-source-4.41.1/debian/patches/use_udev.dpatch
new file mode 100644 (file)
index 0000000..f19bead
--- /dev/null
@@ -0,0 +1,36 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## use_udev_for_the_love_of_god.patch.dpatch by Steve Kowalik <stevenk@ubuntu.com>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: Use udev!!
+
+@DPATCH@
+diff -urNad psb-kernel-source-4.37~/drm_sysfs.c psb-kernel-source-4.37/drm_sysfs.c
+--- psb-kernel-source-4.37~/drm_sysfs.c        2009-04-17 18:35:58.000000000 +1000
++++ psb-kernel-source-4.37/drm_sysfs.c 2009-04-17 18:52:59.000000000 +1000
+@@ -167,7 +167,7 @@
+        * will create the device node.  We don't want to do that just
+        * yet...
+        */
+-      /* dev->dev.devt = head->device; */
++      dev->dev.devt = head->device;
+       snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor);
+       err = device_register(&dev->dev);
+@@ -176,7 +176,7 @@
+               goto err_out;
+       }
+-      for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
++/*    for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+               err = device_create_file(&dev->dev, &device_attrs[i]);
+               if (err)
+                       goto err_out_files;
+@@ -189,6 +189,7 @@
+               for (j = 0; j < i; j++)
+                       device_remove_file(&dev->dev, &device_attrs[i]);
+       device_unregister(&dev->dev);
++*/
+ err_out:
+       return err;
diff --git a/psb-kernel-source-4.41.1/debian/postinst b/psb-kernel-source-4.41.1/debian/postinst
new file mode 100644 (file)
index 0000000..200c9c3
--- /dev/null
@@ -0,0 +1,73 @@
+#!/bin/sh
+
+set -e
+
+NAME=psb-kernel-source
+VERSION=$(dpkg-query -W -f='${Version}' $NAME | awk -F "-" '{print $1}' | cut -d\: -f2)
+
+case "$1" in
+    configure)
+      # Determine current arch / kernel
+      c_arch=`uname -m`
+      c_kern=`uname -r`
+
+      if [ -e "/var/lib/dkms/$NAME/$VERSION" ]; then
+         echo "Removing old $NAME-$VERSION DKMS files..."
+         dkms remove -m $NAME -v $VERSION --all
+      fi
+
+      echo "Loading new $NAME-$VERSION DKMS files..."
+      if [ -f "/usr/src/$NAME-$VERSION.dkms.tar.gz" ]; then
+          dkms ldtarball --archive "/usr/src/$NAME-$VERSION.dkms.tar.gz"
+      else
+          dkms add -m $NAME -v $VERSION
+      fi
+
+      echo "Installing prebuilt kernel module binaries (if any)"
+      set +e
+      IFS='
+'
+      for kern in `dkms status -m $NAME -v $VERSION -a $c_arch | grep ": built" | awk {'print $3'} | sed 's/,$//'`; do
+         echo "Trying kernel: $kern"
+         dkms install --force -m $NAME -v $VERSION -k $kern -a $c_arch
+      done
+      unset IFS
+      set -e
+
+      # If none installed, install.
+      ##if [ `dkms status -m $NAME -v $VERSION -k $c_kern -a $c_arch | grep -c ": installed"` -eq 0 ]; then
+      dkms_status=`dkms status -m $NAME -v $VERSION -k $c_kern -a $c_arch`
+      if [ `echo $dkms_status | grep -c ": installed"` -eq 0 ]; then
+         if [ `echo $c_kern | grep -c "BOOT"` -eq 0 ] && [ -e "/lib/modules/$c_kern/build/include" ]; then
+            # Only build if we need to.
+            if [ `echo $dkms_status | grep -c ": built"` -eq 0 ]; then
+               echo "Building module..."
+               dkms build -m $NAME -v $VERSION
+            fi
+            echo "Installing module..."
+            dkms install -m $NAME -v $VERSION
+         elif [ `echo $c_kern | grep -c "BOOT"` -gt 0 ]; then
+            echo ""
+            echo "Module build for the currently running kernel was skipped since you"
+            echo "are running a BOOT variant of the kernel."
+         else
+            echo ""
+            echo "Module build for the currently running kernel was skipped since the"
+            echo "kernel source for this kernel does not seem to be installed."
+         fi
+      fi
+    ;;
+
+    abort-upgrade|abort-remove|abort-deconfigure)
+    ;;
+
+    *)
+        echo "postinst called with unknown argument \`$1'" >&2
+        exit 1
+    ;;
+esac
+
+#DEBHELPER#
+
+exit 0
+
diff --git a/psb-kernel-source-4.41.1/debian/postrm b/psb-kernel-source-4.41.1/debian/postrm
new file mode 100644 (file)
index 0000000..edd4db6
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+set -e
+
+NAME=psb-kernel-source
+VERSION=$(dpkg-query -W -f='${Version}' $NAME | awk -F "-" '{print $1}' | cut -d\: -f2)
+
+case "$1" in
+    purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
+               moddir="/lib/modules/`uname -r`/kernel/"
+      depmod
+      if [ -d "/usr/src/$NAME-$VERSION" ]; then
+         echo "Removing old module source..."
+         rm -rf "/usr/src/$NAME-$VERSION"
+      fi
+    ;;
+
+    *)
+        echo "postrm called with unknown argument \`$1'" >&2
+        exit 1
+    ;;
+esac
+
+#DEBHELPER#
+
+exit 0
+
diff --git a/psb-kernel-source-4.41.1/debian/prerm b/psb-kernel-source-4.41.1/debian/prerm
new file mode 100644 (file)
index 0000000..b9bf3cf
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+set -e
+
+NAME=psb-kernel-source
+VERSION=$(dpkg-query -W -f='${Version}' $NAME | awk -F "-" '{print $1}' | cut -d\: -f2)
+
+case "$1" in
+    remove|upgrade|deconfigure)
+      if [  "`dkms status -m $NAME`" ]; then
+         dkms remove -m $NAME -v $VERSION --all
+      fi
+    ;;
+
+    failed-upgrade)
+    ;;
+
+    *)
+        echo "prerm called with unknown argument \`$1'" >&2
+        exit 1
+    ;;
+esac
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/psb-kernel-source-4.41.1/debian/psb-kernel-headers.dirs b/psb-kernel-source-4.41.1/debian/psb-kernel-headers.dirs
new file mode 100644 (file)
index 0000000..65c6f0d
--- /dev/null
@@ -0,0 +1 @@
+usr/include/drm
diff --git a/psb-kernel-source-4.41.1/debian/psb-kernel-headers.install b/psb-kernel-source-4.41.1/debian/psb-kernel-headers.install
new file mode 100644 (file)
index 0000000..6da797a
--- /dev/null
@@ -0,0 +1 @@
+*.h /usr/include/drm
diff --git a/psb-kernel-source-4.41.1/debian/psb-kernel-headers.postrm b/psb-kernel-source-4.41.1/debian/psb-kernel-headers.postrm
new file mode 100644 (file)
index 0000000..02edd5a
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+    remove|purge)
+        for file in drm_sarea.h drm.h drm_hashtab.h i915_drm.h; do
+            dpkg-divert --remove --rename /usr/include/drm/$file
+        done
+       rm -f /usr/include/drm-linux-libc
+    ;;
+esac
+
+#DEBHELPER#
+
diff --git a/psb-kernel-source-4.41.1/debian/psb-kernel-headers.preinst b/psb-kernel-source-4.41.1/debian/psb-kernel-headers.preinst
new file mode 100644 (file)
index 0000000..fffbf79
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+    install|upgrade)
+       if [ ! -d /usr/include/drm-linux-libc ]; then
+           mkdir /usr/include/drm-linux-libc
+       fi
+       for file in drm_sarea.h drm.h drm_hashtab.h i915_drm.h; do
+           dpkg-divert --add --rename --package psb-kernel-headers --divert \
+               /usr/include/drm-linux-libc/$file /usr/include/drm/$file
+       done
+    ;;
+esac
+
+#DEBHELPER#
+
diff --git a/psb-kernel-source-4.41.1/debian/rules b/psb-kernel-source-4.41.1/debian/rules
new file mode 100644 (file)
index 0000000..7b8621b
--- /dev/null
@@ -0,0 +1,38 @@
+#!/usr/bin/make -f
+
+include /usr/share/dpatch/dpatch.make
+
+DEB_NAME=psb-kernel-source
+VERSION := $(shell dpkg-parsechangelog | grep '^Version:' | cut -d' ' -f2 | cut -d- -f1 | cut -d\: -f2)
+KERNEL_VER := $(shell ls -1 /lib/modules)
+
+build: build-stamp
+build-stamp: patch
+       $(MAKE) LINUXDIR=/lib/modules/$(KERNEL_VER)/build DRM_MODULES="psb"
+       touch $@
+
+clean: clean-patched unpatch
+clean-patched:
+       dh clean
+
+install: build install-stamp
+install-stamp:
+       dh_clean -k
+       dh_installdirs
+       mkdir $(CURDIR)/debian/$(DEB_NAME)/usr/src/$(DEB_NAME)-$(VERSION)
+       cp *.c *.h Config.in Doxyfile *.txt Kconfig Makefile* Module.* \
+               modules.order README.drm create_linux_pci_lists.sh \
+               $(CURDIR)/debian/$(DEB_NAME)/usr/src/$(DEB_NAME)-$(VERSION)
+       cat debian/dkms.conf.in | sed -e "s/#VERSION#/$(VERSION)/" > \
+               $(CURDIR)/debian/$(DEB_NAME)/usr/src/$(DEB_NAME)-$(VERSION)/dkms.conf
+       mkdir -p $(CURDIR)/debian/psb-modules/lib/modules/$(KERNEL_VER)/updates/char/drm
+       cp psb.ko drm.ko $(CURDIR)/debian/psb-modules/lib/modules/$(KERNEL_VER)/updates/char/drm
+       touch $@
+
+binary-arch: install-stamp
+       dh $@
+
+binary-indep: install-stamp
+       dh $@
+
+binary: binary-arch binary-indep
diff --git a/psb-kernel-source-4.41.1/drm.h b/psb-kernel-source-4.41.1/drm.h
new file mode 100644 (file)
index 0000000..bd4df10
--- /dev/null
@@ -0,0 +1,1192 @@
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \mainpage
+ *
+ * The Direct Rendering Manager (DRM) is a device-independent kernel-level
+ * device driver that provides support for the XFree86 Direct Rendering
+ * Infrastructure (DRI).
+ *
+ * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
+ * ways:
+ *     -# The DRM provides synchronized access to the graphics hardware via
+ *        the use of an optimized two-tiered lock.
+ *     -# The DRM enforces the DRI security policy for access to the graphics
+ *        hardware by only allowing authenticated X11 clients access to
+ *        restricted regions of memory.
+ *     -# The DRM provides a generic DMA engine, complete with multiple
+ *        queues and the ability to detect the need for an OpenGL context
+ *        switch.
+ *     -# The DRM is extensible via the use of small device-specific modules
+ *        that rely extensively on the API exported by the DRM module.
+ *
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#ifndef __user
+#define __user
+#endif
+#ifndef __iomem
+#define __iomem
+#endif
+
+#ifdef __GNUC__
+# define DEPRECATED  __attribute__ ((deprecated))
+#else
+# define DEPRECATED
+#endif
+
+#if defined(__linux__)
+#include <asm/ioctl.h>         /* For _IO* macros */
+#define DRM_IOCTL_NR(n)                _IOC_NR(n)
+#define DRM_IOC_VOID           _IOC_NONE
+#define DRM_IOC_READ           _IOC_READ
+#define DRM_IOC_WRITE          _IOC_WRITE
+#define DRM_IOC_READWRITE      _IOC_READ|_IOC_WRITE
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
+#include <sys/ioccom.h>
+#define DRM_IOCTL_NR(n)                ((n) & 0xff)
+#define DRM_IOC_VOID           IOC_VOID
+#define DRM_IOC_READ           IOC_OUT
+#define DRM_IOC_WRITE          IOC_IN
+#define DRM_IOC_READWRITE      IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#endif
+
+#ifdef __OpenBSD__
+#define DRM_MAJOR       81
+#endif
+#if defined(__linux__) || defined(__NetBSD__)
+#define DRM_MAJOR       226
+#endif
+#define DRM_MAX_MINOR   15
+
+#define DRM_NAME       "drm"     /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER  5         /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER  22        /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10       /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock)           ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock)           ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+#if defined(__linux__)
+typedef unsigned int drm_handle_t;
+#else
+#include <sys/types.h>
+typedef unsigned long drm_handle_t;    /**< To mapped regions */
+#endif
+typedef unsigned int drm_context_t;    /**< GLXContext handle */
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;      /**< Magic for authentication */
+
+/**
+ * Cliprect.
+ *
+ * \warning If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+struct drm_clip_rect {
+       unsigned short x1;
+       unsigned short y1;
+       unsigned short x2;
+       unsigned short y2;
+};
+
+/**
+ * Texture region,
+ */
+struct drm_tex_region {
+       unsigned char next;
+       unsigned char prev;
+       unsigned char in_use;
+       unsigned char padding;
+       unsigned int age;
+};
+
+/**
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer.  To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+struct drm_hw_lock {
+       __volatile__ unsigned int lock;         /**< lock variable */
+       char padding[60];                       /**< Pad to cache line */
+};
+
+/* This is beyond ugly, and only works on GCC.  However, it allows me to use
+ * drm.h in places (i.e., in the X-server) where I can't use size_t.  The real
+ * fix is to use uint32_t instead of size_t, but that fix will break existing
+ * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems.  That *will*
+ * eventually happen, though.  I chose 'unsigned long' to be the fallback type
+ * because that works on all the platforms I know about.  Hopefully, the
+ * real fix will happen before that bites us.
+ */
+
+#ifdef __SIZE_TYPE__
+# define DRM_SIZE_T __SIZE_TYPE__
+#else
+# warning "__SIZE_TYPE__ not defined.  Assuming sizeof(size_t) == sizeof(unsigned long)!"
+# define DRM_SIZE_T unsigned long
+#endif
+
+/**
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+struct drm_version {
+       int version_major;        /**< Major version */
+       int version_minor;        /**< Minor version */
+       int version_patchlevel;   /**< Patch level */
+       DRM_SIZE_T name_len;      /**< Length of name buffer */
+       char __user *name;                /**< Name of driver */
+       DRM_SIZE_T date_len;      /**< Length of date buffer */
+       char __user *date;                /**< User-space buffer to hold date */
+       DRM_SIZE_T desc_len;      /**< Length of desc buffer */
+       char __user *desc;                /**< User-space buffer to hold desc */
+};
+
+/**
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+struct drm_unique {
+       DRM_SIZE_T unique_len;    /**< Length of unique */
+       char __user *unique;              /**< Unique name for driver instantiation */
+};
+
+#undef DRM_SIZE_T
+
+struct drm_list {
+       int count;                /**< Length of user-space structures */
+       struct drm_version __user *version;
+};
+
+struct drm_block {
+       int unused;
+};
+
+/**
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+struct drm_control {
+       enum {
+               DRM_ADD_COMMAND,
+               DRM_RM_COMMAND,
+               DRM_INST_HANDLER,
+               DRM_UNINST_HANDLER
+       } func;
+       int irq;
+};
+
+/**
+ * Type of memory to map.
+ */
+enum drm_map_type {
+       _DRM_FRAME_BUFFER = 0,    /**< WC (no caching), no core dump */
+       _DRM_REGISTERS = 1,       /**< no caching, no core dump */
+       _DRM_SHM = 2,             /**< shared, cached */
+       _DRM_AGP = 3,             /**< AGP/GART */
+       _DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
+       _DRM_CONSISTENT = 5,      /**< Consistent memory for PCI DMA */
+       _DRM_TTM = 6
+};
+
+/**
+ * Memory mapping flags.
+ */
+enum drm_map_flags {
+       _DRM_RESTRICTED = 0x01,      /**< Cannot be mapped to user-virtual */
+       _DRM_READ_ONLY = 0x02,
+       _DRM_LOCKED = 0x04,          /**< shared, cached, locked */
+       _DRM_KERNEL = 0x08,          /**< kernel requires access */
+       _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+       _DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
+       _DRM_REMOVABLE = 0x40,       /**< Removable mapping */
+        _DRM_DRIVER = 0x80           /**< Driver will take care of it */
+};
+
+struct drm_ctx_priv_map {
+       unsigned int ctx_id;     /**< Context requesting private mapping */
+       void *handle;            /**< Handle of map */
+};
+
+/**
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+struct drm_map {
+       unsigned long offset;    /**< Requested physical address (0 for SAREA)*/
+       unsigned long size;      /**< Requested physical size (bytes) */
+       enum drm_map_type type;  /**< Type of memory to map */
+       enum drm_map_flags flags;        /**< Flags */
+       void *handle;            /**< User-space: "Handle" to pass to mmap() */
+                                /**< Kernel-space: kernel-virtual address */
+       int mtrr;                /**< MTRR slot used */
+       /*   Private data */
+};
+
+/**
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+struct drm_client {
+       int idx;                /**< Which client desired? */
+       int auth;               /**< Is client authenticated? */
+       unsigned long pid;      /**< Process ID */
+       unsigned long uid;      /**< User ID */
+       unsigned long magic;    /**< Magic */
+       unsigned long iocs;     /**< Ioctl count */
+};
+
+enum drm_stat_type {
+       _DRM_STAT_LOCK,
+       _DRM_STAT_OPENS,
+       _DRM_STAT_CLOSES,
+       _DRM_STAT_IOCTLS,
+       _DRM_STAT_LOCKS,
+       _DRM_STAT_UNLOCKS,
+       _DRM_STAT_VALUE,        /**< Generic value */
+       _DRM_STAT_BYTE,         /**< Generic byte counter (1024bytes/K) */
+       _DRM_STAT_COUNT,        /**< Generic non-byte counter (1000/k) */
+
+       _DRM_STAT_IRQ,          /**< IRQ */
+       _DRM_STAT_PRIMARY,      /**< Primary DMA bytes */
+       _DRM_STAT_SECONDARY,    /**< Secondary DMA bytes */
+       _DRM_STAT_DMA,          /**< DMA */
+       _DRM_STAT_SPECIAL,      /**< Special DMA (e.g., priority or polled) */
+       _DRM_STAT_MISSED        /**< Missed DMA opportunity */
+           /* Add to the *END* of the list */
+};
+
+/**
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+struct drm_stats {
+       unsigned long count;
+       struct {
+               unsigned long value;
+               enum drm_stat_type type;
+       } data[15];
+};
+
+/**
+ * Hardware locking flags.
+ */
+enum drm_lock_flags {
+       _DRM_LOCK_READY = 0x01,      /**< Wait until hardware is ready for DMA */
+       _DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
+       _DRM_LOCK_FLUSH = 0x04,      /**< Flush this context's DMA queue first */
+       _DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
+       /* These *HALT* flags aren't supported yet
+          -- they will be used to support the
+          full-screen DGA-like mode. */
+       _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+       _DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
+};
+
+/**
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+struct drm_lock {
+       int context;
+       enum drm_lock_flags flags;
+};
+
+/**
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+enum drm_dma_flags {
+       /* Flags for DMA buffer dispatch */
+       _DRM_DMA_BLOCK = 0x01,        /**<
+                                      * Block until buffer dispatched.
+                                      *
+                                      * \note The buffer may not yet have
+                                      * been processed by the hardware --
+                                      * getting a hardware lock with the
+                                      * hardware quiescent will ensure
+                                      * that the buffer has been
+                                      * processed.
+                                      */
+       _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+       _DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
+
+       /* Flags for DMA buffer request */
+       _DRM_DMA_WAIT = 0x10,         /**< Wait for free buffers */
+       _DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
+       _DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
+};
+
+/**
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+struct drm_buf_desc {
+       int count;               /**< Number of buffers of this size */
+       int size;                /**< Size in bytes */
+       int low_mark;            /**< Low water mark */
+       int high_mark;           /**< High water mark */
+       enum {
+               _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+               _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+               _DRM_SG_BUFFER  = 0x04, /**< Scatter/gather memory buffer */
+               _DRM_FB_BUFFER  = 0x08, /**< Buffer is in frame buffer */
+               _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+       } flags;
+       unsigned long agp_start; /**<
+                                 * Start address of where the AGP buffers are
+                                 * in the AGP aperture
+                                 */
+};
+
+/**
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+struct drm_buf_info {
+       int count;                /**< Number of buffers described in list */
+       struct drm_buf_desc __user *list; /**< List of buffer descriptions */
+};
+
+/**
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+struct drm_buf_free {
+       int count;
+       int __user *list;
+};
+
+/**
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+struct drm_buf_pub {
+       int idx;                       /**< Index into the master buffer list */
+       int total;                     /**< Buffer size */
+       int used;                      /**< Amount of buffer in use (for DMA) */
+       void __user *address;          /**< Address of buffer */
+};
+
+/**
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+struct drm_buf_map {
+       int count;              /**< Length of the buffer list */
+#if defined(__cplusplus)
+       void __user *c_virtual;
+#else
+       void __user *virtual;           /**< Mmap'd area in user-virtual */
+#endif
+       struct drm_buf_pub __user *list;        /**< Buffer information */
+};
+
+/**
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+struct drm_dma {
+       int context;                      /**< Context handle */
+       int send_count;                   /**< Number of buffers to send */
+       int __user *send_indices;         /**< List of handles to buffers */
+       int __user *send_sizes;           /**< Lengths of data to send */
+       enum drm_dma_flags flags;         /**< Flags */
+       int request_count;                /**< Number of buffers requested */
+       int request_size;                 /**< Desired size for buffers */
+       int __user *request_indices;     /**< Buffer information */
+       int __user *request_sizes;
+       int granted_count;                /**< Number of buffers granted */
+};
+
+enum drm_ctx_flags {
+       _DRM_CONTEXT_PRESERVED = 0x01,
+       _DRM_CONTEXT_2DONLY = 0x02
+};
+
+/**
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+struct drm_ctx {
+       drm_context_t handle;
+       enum drm_ctx_flags flags;
+};
+
+/**
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+struct drm_ctx_res {
+       int count;
+       struct drm_ctx __user *contexts;
+};
+
+/**
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+struct drm_draw {
+       drm_drawable_t handle;
+};
+
+/**
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+       DRM_DRAWABLE_CLIPRECTS,
+} drm_drawable_info_type_t;
+
+struct drm_update_draw {
+       drm_drawable_t handle;
+       unsigned int type;
+       unsigned int num;
+       unsigned long long data;
+};
+
+/**
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+struct drm_auth {
+       drm_magic_t magic;
+};
+
+/**
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+struct drm_irq_busid {
+       int irq;        /**< IRQ number */
+       int busnum;     /**< bus number */
+       int devnum;     /**< device number */
+       int funcnum;    /**< function number */
+};
+
+enum drm_vblank_seq_type {
+       _DRM_VBLANK_ABSOLUTE = 0x0,     /**< Wait for specific vblank sequence number */
+       _DRM_VBLANK_RELATIVE = 0x1,     /**< Wait for given number of vblanks */
+       _DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
+       _DRM_VBLANK_NEXTONMISS = 0x10000000,    /**< If missed, wait for next vblank */
+       _DRM_VBLANK_SECONDARY = 0x20000000,     /**< Secondary display controller */
+       _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
+};
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
+                               _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+       enum drm_vblank_seq_type type;
+       unsigned int sequence;
+       unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+       enum drm_vblank_seq_type type;
+       unsigned int sequence;
+       long tval_sec;
+       long tval_usec;
+};
+
+/**
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union drm_wait_vblank {
+       struct drm_wait_vblank_request request;
+       struct drm_wait_vblank_reply reply;
+};
+
+/**
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+struct drm_agp_mode {
+       unsigned long mode;     /**< AGP mode */
+};
+
+/**
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+struct drm_agp_buffer {
+       unsigned long size;     /**< In bytes -- will round to page boundary */
+       unsigned long handle;   /**< Used for binding / unbinding */
+       unsigned long type;     /**< Type of memory to allocate */
+       unsigned long physical; /**< Physical used by i810 */
+};
+
+/**
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+struct drm_agp_binding {
+       unsigned long handle;   /**< From drm_agp_buffer */
+       unsigned long offset;   /**< In bytes -- will round to page boundary */
+};
+
+/**
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+struct drm_agp_info {
+       int agp_version_major;
+       int agp_version_minor;
+       unsigned long mode;
+       unsigned long aperture_base;   /**< physical address */
+       unsigned long aperture_size;   /**< bytes */
+       unsigned long memory_allowed;  /**< bytes */
+       unsigned long memory_used;
+
+       /** \name PCI information */
+       /*@{ */
+       unsigned short id_vendor;
+       unsigned short id_device;
+       /*@} */
+};
+
+/**
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+struct drm_scatter_gather {
+       unsigned long size;     /**< In bytes -- will round to page boundary */
+       unsigned long handle;   /**< Used for mapping / unmapping */
+};
+
+/**
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+struct drm_set_version {
+       int drm_di_major;
+       int drm_di_minor;
+       int drm_dd_major;
+       int drm_dd_minor;
+};
+
+
+#define DRM_FENCE_FLAG_EMIT                0x00000001
+#define DRM_FENCE_FLAG_SHAREABLE           0x00000002
+#define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
+#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
+#define DRM_FENCE_FLAG_NO_USER             0x00000010
+
+/* Reserved for driver use */
+#define DRM_FENCE_MASK_DRIVER              0xFF000000
+
+#define DRM_FENCE_TYPE_EXE                 0x00000001
+
+struct drm_fence_arg {
+       unsigned int handle;
+       unsigned int fence_class;
+       unsigned int type;
+       unsigned int flags;
+       unsigned int signaled;
+       unsigned int error;
+       unsigned int sequence;
+       unsigned int pad64;
+       uint64_t expand_pad[2]; /*Future expansion */
+};
+
+/* Buffer permissions, referring to how the GPU uses the buffers.
+ * these translate to fence types used for the buffers.
+ * Typically a texture buffer is read, A destination buffer is write and
+ *  a command (batch-) buffer is exe. Can be or-ed together.
+ */
+
+#define DRM_BO_FLAG_READ        (1ULL << 0)
+#define DRM_BO_FLAG_WRITE       (1ULL << 1)
+#define DRM_BO_FLAG_EXE         (1ULL << 2)
+
+/*
+ * Status flags. Can be read to determine the actual state of a buffer.
+ * Can also be set in the buffer mask before validation.
+ */
+
+/*
+ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
+ * available to root and must be manually removed before buffer manager shutdown
+ * or lock.
+ * Flags: Acknowledge
+ */
+#define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
+
+/*
+ * Mask: Require that the buffer is placed in mappable memory when validated.
+ *       If not set the buffer may or may not be in mappable memory when validated.
+ * Flags: If set, the buffer is in mappable memory.
+ */
+#define DRM_BO_FLAG_MAPPABLE    (1ULL << 5)
+
+/* Mask: The buffer should be shareable with other processes.
+ * Flags: The buffer is shareable with other processes.
+ */
+#define DRM_BO_FLAG_SHAREABLE   (1ULL << 6)
+
+/* Mask: If set, place the buffer in cache-coherent memory if available.
+ *       If clear, never place the buffer in cache coherent memory if validated.
+ * Flags: The buffer is currently in cache-coherent memory.
+ */
+#define DRM_BO_FLAG_CACHED      (1ULL << 7)
+
+/* Mask: Make sure that every time this buffer is validated,
+ *       it ends up on the same location provided that the memory mask is the same.
+ *       The buffer will also not be evicted when claiming space for
+ *       other buffers. Basically a pinned buffer but it may be thrown out as
+ *       part of buffer manager shutdown or locking.
+ * Flags: Acknowledge.
+ */
+#define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
+
+/* Mask: Make sure the buffer is in cached memory when mapped
+ * Flags: Acknowledge.
+ * Buffers allocated with this flag should not be used for suballocators
+ * This type may have issues on CPUs with over-aggressive caching
+ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
+ */
+#define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19)
+
+
+/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
+ * Flags: Acknowledge.
+ */
+#define DRM_BO_FLAG_FORCE_CACHING  (1ULL << 13)
+
+/*
+ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
+ * Flags: Acknowledge.
+ */
+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
+#define DRM_BO_FLAG_TILE           (1ULL << 15)
+
+/*
+ * Memory type flags that can be or'ed together in the mask, but only
+ * one appears in flags.
+ */
+
+/* System memory */
+#define DRM_BO_FLAG_MEM_LOCAL  (1ULL << 24)
+/* Translation table memory */
+#define DRM_BO_FLAG_MEM_TT     (1ULL << 25)
+/* Vram memory */
+#define DRM_BO_FLAG_MEM_VRAM   (1ULL << 26)
+/* Up to the driver to define. */
+#define DRM_BO_FLAG_MEM_PRIV0  (1ULL << 27)
+#define DRM_BO_FLAG_MEM_PRIV1  (1ULL << 28)
+#define DRM_BO_FLAG_MEM_PRIV2  (1ULL << 29)
+#define DRM_BO_FLAG_MEM_PRIV3  (1ULL << 30)
+#define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
+/* We can add more of these now with a 64-bit flag type */
+
+/* Memory flag mask */
+#define DRM_BO_MASK_MEM         0x00000000FF000000ULL
+#define DRM_BO_MASK_MEMTYPE     0x00000000FF0800A0ULL
+
+/* Driver-private flags */
+#define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
+
+/* Don't block on validate and map */
+#define DRM_BO_HINT_DONT_BLOCK  0x00000002
+/* Don't place this buffer on the unfenced list.*/
+#define DRM_BO_HINT_DONT_FENCE  0x00000004
+#define DRM_BO_HINT_WAIT_LAZY   0x00000008
+#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
+
+#define DRM_BO_INIT_MAGIC 0xfe769812
+#define DRM_BO_INIT_MAJOR 1
+#define DRM_BO_INIT_MINOR 0
+#define DRM_BO_INIT_PATCH 0
+
+
+struct drm_bo_info_req {
+       uint64_t mask;
+       uint64_t flags;
+       unsigned int handle;
+       unsigned int hint;
+       unsigned int fence_class;
+       unsigned int desired_tile_stride;
+       unsigned int tile_info;
+       unsigned int pad64;
+        uint64_t presumed_offset;
+};
+
+struct drm_bo_create_req {
+       uint64_t mask;
+       uint64_t size;
+       uint64_t buffer_start;
+       unsigned int hint;
+       unsigned int page_alignment;
+};
+
+
+/*
+ * Reply flags
+ */
+
+#define DRM_BO_REP_BUSY 0x00000001
+
+struct drm_bo_info_rep {
+       uint64_t flags;
+       uint64_t mask;
+       uint64_t size;
+       uint64_t offset;
+       uint64_t arg_handle;
+       uint64_t buffer_start;
+       unsigned int handle;
+       unsigned int fence_flags;
+       unsigned int rep_flags;
+       unsigned int page_alignment;
+       unsigned int desired_tile_stride;
+       unsigned int hw_tile_stride;
+       unsigned int tile_info;
+       unsigned int pad64;
+       uint64_t expand_pad[4]; /*Future expansion */
+};
+
+struct drm_bo_arg_rep {
+       struct drm_bo_info_rep bo_info;
+       int ret;
+       unsigned int pad64;
+};
+
+struct drm_bo_create_arg {
+       union {
+               struct drm_bo_create_req req;
+               struct drm_bo_info_rep rep;
+       } d;
+};
+
+struct drm_bo_handle_arg {
+       unsigned int handle;
+};
+
+struct drm_bo_reference_info_arg {
+       union {
+               struct drm_bo_handle_arg req;
+               struct drm_bo_info_rep rep;
+       } d;
+};
+
+struct drm_bo_map_wait_idle_arg {
+       union {
+               struct drm_bo_info_req req;
+               struct drm_bo_info_rep rep;
+       } d;
+};
+
+struct drm_bo_op_req {
+       enum {
+               drm_bo_validate,
+               drm_bo_fence,
+               drm_bo_ref_fence,
+       } op;
+       unsigned int arg_handle;
+       struct drm_bo_info_req bo_req;
+};
+
+
+struct drm_bo_op_arg {
+       uint64_t next;
+       union {
+               struct drm_bo_op_req req;
+               struct drm_bo_arg_rep rep;
+       } d;
+       int handled;
+       unsigned int pad64;
+};
+
+
+#define DRM_BO_MEM_LOCAL 0
+#define DRM_BO_MEM_TT 1
+#define DRM_BO_MEM_VRAM 2
+#define DRM_BO_MEM_PRIV0 3
+#define DRM_BO_MEM_PRIV1 4
+#define DRM_BO_MEM_PRIV2 5
+#define DRM_BO_MEM_PRIV3 6
+#define DRM_BO_MEM_PRIV4 7
+
+#define DRM_BO_MEM_TYPES 8 /* For now. */
+
+#define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
+#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
+
+struct drm_bo_version_arg {
+       uint32_t major;
+       uint32_t minor;
+       uint32_t patchlevel;
+};
+
+struct drm_mm_type_arg {
+       unsigned int mem_type;
+       unsigned int lock_flags;
+};
+
+struct drm_mm_init_arg {
+       unsigned int magic;
+       unsigned int major;
+       unsigned int minor;
+       unsigned int mem_type;
+       uint64_t p_offset;
+       uint64_t p_size;
+};
+
+/*
+ * Drm mode setting
+ */
+#define DRM_DISPLAY_INFO_LEN 32
+#define DRM_OUTPUT_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN  (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C  ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C   ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED        (1<<3)
+#define DRM_MODE_TYPE_DEFAULT  (1<<4)
+#define DRM_MODE_TYPE_USERDEF  (1<<5)
+#define DRM_MODE_TYPE_DRIVER   (1<<6)
+#define DRM_MODE_TYPE_USERPREF (1<<7)
+
+struct drm_mode_modeinfo {
+
+       unsigned int id;
+
+       unsigned int clock;
+       unsigned short hdisplay, hsync_start, hsync_end, htotal, hskew;
+       unsigned short vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+       unsigned int vrefresh; /* vertical refresh * 1000 */
+
+       unsigned int flags;
+       unsigned int type;
+       char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+
+       int count_fbs;
+       unsigned int __user *fb_id;
+
+       int count_crtcs;
+       unsigned int __user *crtc_id;
+
+       int count_outputs;
+       unsigned int __user *output_id;
+
+       int count_modes;
+        struct drm_mode_modeinfo __user *modes;
+
+};
+
+struct drm_mode_crtc {
+       unsigned int crtc_id; /**< Id */
+       unsigned int fb_id; /**< Id of framebuffer */
+
+       int x, y; /**< Position on the frameuffer */
+
+       unsigned int mode; /**< Current mode used */
+
+       int count_outputs;
+       unsigned int outputs; /**< Outputs that are connected */
+
+       int count_possibles;
+       unsigned int possibles; /**< Outputs that can be connected */
+
+       unsigned int __user *set_outputs; /**< Outputs to be connected */
+
+       int gamma_size;
+
+};
+
+struct drm_mode_get_output {
+
+       unsigned int output; /**< Id */
+       unsigned int crtc; /**< Id of crtc */
+       unsigned char name[DRM_OUTPUT_NAME_LEN];
+
+       unsigned int connection;
+       unsigned int mm_width, mm_height; /**< HxW in millimeters */
+       unsigned int subpixel;
+
+       int count_crtcs;
+       unsigned int crtcs; /**< possible crtc to connect to */
+
+       int count_clones;
+       unsigned int clones; /**< list of clones */
+
+       int count_modes;
+       unsigned int __user *modes; /**< list of modes it supports */
+
+       int count_props;
+       unsigned int __user *props;
+       unsigned int __user *prop_values;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) // enumerated type with text strings
+
+struct drm_mode_property_enum {
+       uint32_t value;
+       unsigned char name[DRM_PROP_NAME_LEN];
+};
+               
+struct drm_mode_get_property {
+
+       unsigned int prop_id;
+       unsigned int flags;
+       unsigned char name[DRM_PROP_NAME_LEN];
+
+       int count_values;
+       uint32_t __user *values;
+
+       int count_enums;
+       struct drm_mode_property_enum *enums;
+};
+
+struct drm_mode_fb_cmd {
+        unsigned int buffer_id;
+        unsigned int width, height;
+        unsigned int pitch;
+        unsigned int bpp;
+        unsigned int handle;
+       unsigned int depth;
+};
+
+struct drm_mode_mode_cmd {
+       unsigned int output_id;
+       unsigned int mode_id;
+};
+
+/**
+ * \name Ioctls Definitions
+ */
+/*@{*/
+
+#define DRM_IOCTL_BASE                 'd'
+#define DRM_IO(nr)                     _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,type)               _IOR(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOW(nr,type)               _IOW(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOWR(nr,type)              _IOWR(DRM_IOCTL_BASE,nr,type)
+
+#define DRM_IOCTL_VERSION              DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE           DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC            DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID            DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION          DRM_IOWR(0x07, struct drm_set_version)
+
+#define DRM_IOCTL_SET_UNIQUE           DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC           DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK                        DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK              DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL              DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP              DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS             DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS            DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS            DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS             DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS            DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP               DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX                DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX                DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_ADD_CTX              DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX               DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX              DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX              DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX           DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX              DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX              DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW             DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW              DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA                  DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK                 DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK               DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH               DRM_IOW( 0x2c, struct drm_lock)
+
+#define DRM_IOCTL_AGP_ACQUIRE          DRM_IO(  0x30)
+#define DRM_IOCTL_AGP_RELEASE          DRM_IO(  0x31)
+#define DRM_IOCTL_AGP_ENABLE           DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO             DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC            DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE             DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND             DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND           DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC             DRM_IOW( 0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE              DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK          DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, struct drm_update_draw)
+
+#define DRM_IOCTL_MM_INIT               DRM_IOWR(0xc0, struct drm_mm_init_arg)
+#define DRM_IOCTL_MM_TAKEDOWN           DRM_IOWR(0xc1, struct drm_mm_type_arg)
+#define DRM_IOCTL_MM_LOCK               DRM_IOWR(0xc2, struct drm_mm_type_arg)
+#define DRM_IOCTL_MM_UNLOCK             DRM_IOWR(0xc3, struct drm_mm_type_arg)
+
+#define DRM_IOCTL_FENCE_CREATE          DRM_IOWR(0xc4, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_REFERENCE       DRM_IOWR(0xc6, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_UNREFERENCE     DRM_IOWR(0xc7, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_SIGNALED        DRM_IOWR(0xc8, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_FLUSH           DRM_IOWR(0xc9, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_WAIT            DRM_IOWR(0xca, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_EMIT            DRM_IOWR(0xcb, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_BUFFERS         DRM_IOWR(0xcc, struct drm_fence_arg)
+
+#define DRM_IOCTL_BO_CREATE             DRM_IOWR(0xcd, struct drm_bo_create_arg)
+#define DRM_IOCTL_BO_MAP                DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_UNMAP              DRM_IOWR(0xd0, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_REFERENCE          DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
+#define DRM_IOCTL_BO_UNREFERENCE        DRM_IOWR(0xd2, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_SETSTATUS          DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_INFO               DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
+#define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
+
+
+#define DRM_IOCTL_MODE_GETRESOURCES     DRM_IOWR(0xA0, struct drm_mode_card_res)
+#define DRM_IOCTL_MODE_GETCRTC          DRM_IOWR(0xA1, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_GETOUTPUT        DRM_IOWR(0xA2, struct drm_mode_get_output)
+#define DRM_IOCTL_MODE_SETCRTC          DRM_IOWR(0xA3, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_ADDFB            DRM_IOWR(0xA4, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_RMFB             DRM_IOWR(0xA5, unsigned int)
+#define DRM_IOCTL_MODE_GETFB            DRM_IOWR(0xA6, struct drm_mode_fb_cmd)
+
+#define DRM_IOCTL_MODE_ADDMODE         DRM_IOWR(0xA7, struct drm_mode_modeinfo)
+#define DRM_IOCTL_MODE_RMMODE          DRM_IOWR(0xA8, unsigned int)
+#define DRM_IOCTL_MODE_ATTACHMODE      DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
+#define DRM_IOCTL_MODE_DETACHMODE      DRM_IOWR(0xAA, struct drm_mode_mode_cmd)
+
+#define DRM_IOCTL_MODE_GETPROPERTY     DRM_IOWR(0xAB, struct drm_mode_get_property)
+/*@}*/
+
+/**
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x99.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE                0x40
+#define DRM_COMMAND_END                 0xA0
+
+/* typedef area */
+#if !defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+
+typedef struct drm_fence_arg drm_fence_arg_t;
+typedef struct drm_mm_type_arg drm_mm_type_arg_t;
+typedef struct drm_mm_init_arg drm_mm_init_arg_t;
+typedef enum drm_bo_type drm_bo_type_t;
+#endif
+
+#endif
diff --git a/psb-kernel-source-4.41.1/drmP.h b/psb-kernel-source-4.41.1/drmP.h
new file mode 100644 (file)
index 0000000..2449fe0
--- /dev/null
@@ -0,0 +1,1332 @@
+/**
+ * \file drmP.h
+ * Private header for Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_P_H_
+#define _DRM_P_H_
+
+#ifdef __KERNEL__
+#ifdef __alpha__
+/* add include of current.h so that "current" is defined
+ * before static inline funcs in wait.h. Doing this so we
+ * can build the DRM (part of PI DRI). 4/21/2000 S + B */
+#include <asm/current.h>
+#endif                         /* __alpha__ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>    /* For (un)lock_kernel */
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/i2c.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+#include <linux/mutex.h>
+#endif
+#if defined(__alpha__) || defined(__powerpc__)
+#include <asm/pgtable.h>       /* For pte_wrprotect */
+#endif
+#include <asm/io.h>
+#include <asm/mman.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+#include <asm/agp.h>
+#include <linux/types.h>
+#include <linux/agp_backend.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+#include <asm/pgalloc.h>
+#include "drm.h"
+#include <linux/slab.h>
+#include <linux/idr.h>
+
+#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 1
+#endif
+
+#include "drm_os_linux.h"
+#include "drm_hashtab.h"
+#include "drm_internal.h"
+
+struct drm_device;
+struct drm_file;
+
+/* If you want the memory alloc debug functionality, change define below */
+/* #define DEBUG_MEMORY */
+
+/***********************************************************************/
+/** \name DRM template customization defaults */
+/*@{*/
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP     0x1
+#define DRIVER_REQUIRE_AGP 0x2
+#define DRIVER_USE_MTRR    0x4
+#define DRIVER_PCI_DMA     0x8
+#define DRIVER_SG          0x10
+#define DRIVER_HAVE_DMA    0x20
+#define DRIVER_HAVE_IRQ    0x40
+#define DRIVER_IRQ_SHARED  0x80
+#define DRIVER_IRQ_VBL     0x100
+#define DRIVER_DMA_QUEUE   0x200
+#define DRIVER_FB_DMA      0x400
+#define DRIVER_IRQ_VBL2    0x800
+
+
+/*@}*/
+
+/***********************************************************************/
+/** \name Begin the DRM... */
+/*@{*/
+
+#define DRM_DEBUG_CODE 2         /**< Include debugging code if > 1, then
+                                    also include looping detection. */
+
+#define DRM_MAGIC_HASH_ORDER  4 /**< Size of key hash table. Must be power of 2. */
+#define DRM_KERNEL_CONTEXT    0         /**< Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1         /**< Change drm_resctx if changed */
+#define DRM_LOOPING_LIMIT     5000000
+#define DRM_TIME_SLICE       (HZ/20)  /**< Time slice for GLXContexts */
+#define DRM_LOCK_SLICE       1 /**< Time slice for lock, in jiffies */
+
+#define DRM_FLAG_DEBUG   0x01
+
+#define DRM_MEM_DMA       0
+#define DRM_MEM_SAREA     1
+#define DRM_MEM_DRIVER    2
+#define DRM_MEM_MAGIC     3
+#define DRM_MEM_IOCTLS    4
+#define DRM_MEM_MAPS      5
+#define DRM_MEM_VMAS      6
+#define DRM_MEM_BUFS      7
+#define DRM_MEM_SEGS      8
+#define DRM_MEM_PAGES     9
+#define DRM_MEM_FILES    10
+#define DRM_MEM_QUEUES   11
+#define DRM_MEM_CMDS     12
+#define DRM_MEM_MAPPINGS  13
+#define DRM_MEM_BUFLISTS  14
+#define DRM_MEM_AGPLISTS  15
+#define DRM_MEM_TOTALAGP  16
+#define DRM_MEM_BOUNDAGP  17
+#define DRM_MEM_CTXBITMAP 18
+#define DRM_MEM_STUB      19
+#define DRM_MEM_SGLISTS   20
+#define DRM_MEM_CTXLIST   21
+#define DRM_MEM_MM        22
+#define DRM_MEM_HASHTAB   23
+#define DRM_MEM_OBJECTS   24
+#define DRM_MEM_FENCE     25
+#define DRM_MEM_TTM       26
+#define DRM_MEM_BUFOBJ    27
+
+#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+#define DRM_MAP_HASH_OFFSET 0x10000000
+#define DRM_MAP_HASH_ORDER 12
+#define DRM_OBJECT_HASH_ORDER 12
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+/*
+ * This should be small enough to allow the use of kmalloc for hash tables
+ * instead of vmalloc.
+ */
+
+#define DRM_FILE_HASH_ORDER 8
+#define DRM_MM_INIT_MAX_PAGES 256
+
+/*@}*/
+
+#include "drm_compat.h"
+
+
+/***********************************************************************/
+/** \name Macros to make printk easier */
+/*@{*/
+
+/**
+ * Error output.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_ERROR(fmt, arg...) \
+       printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg)
+
+/**
+ * Memory error output.
+ *
+ * \param area memory area where the error occurred.
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_MEM_ERROR(area, fmt, arg...) \
+       printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \
+              drm_mem_stats[area].name , ##arg)
+#define DRM_INFO(fmt, arg...)  printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
+
+/**
+ * Debug output.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#if DRM_DEBUG_CODE
+#define DRM_DEBUG(fmt, arg...)                                         \
+       do {                                                            \
+               if ( drm_debug )                                        \
+                       printk(KERN_DEBUG                               \
+                              "[" DRM_NAME ":%s] " fmt ,               \
+                              __FUNCTION__ , ##arg);                   \
+       } while (0)
+#else
+#define DRM_DEBUG(fmt, arg...)          do { } while (0)
+#endif
+
+#define DRM_PROC_LIMIT (PAGE_SIZE-80)
+
+#define DRM_PROC_PRINT(fmt, arg...)                                    \
+   len += sprintf(&buf[len], fmt , ##arg);                             \
+   if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
+
+#define DRM_PROC_PRINT_RET(ret, fmt, arg...)                           \
+   len += sprintf(&buf[len], fmt , ##arg);                             \
+   if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
+
+/*@}*/
+
+/***********************************************************************/
+/** \name Internal types and structures */
+/*@{*/
+
+#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
+#define DRM_MIN(a,b) min(a,b)
+#define DRM_MAX(a,b) max(a,b)
+
+#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
+#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
+#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
+
+#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+/**
+ * Get the private SAREA mapping.
+ *
+ * \param _dev DRM device.
+ * \param _ctx context number.
+ * \param _map output mapping.
+ */
+#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do {      \
+       (_map) = (_dev)->context_sareas[_ctx];          \
+} while(0)
+
+/**
+ * Test that the hardware lock is held by the caller, returning otherwise.
+ *
+ * \param dev DRM device.
+ * \param file_priv DRM file private pointer of the caller.
+ */
+#define LOCK_TEST_WITH_RETURN( dev, file_priv )                                \
+do {                                                                   \
+       if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||           \
+            dev->lock.file_priv != file_priv ) {                       \
+               DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
+                          __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
+                          dev->lock.file_priv, file_priv );            \
+               return -EINVAL;                                         \
+       }                                                               \
+} while (0)
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+#define DRM_COPY( name, value )                                                \
+       len = strlen( value );                                          \
+       if ( len > name##_len ) len = name##_len;                       \
+       name##_len = strlen( value );                                   \
+       if ( len && name ) {                                            \
+               if ( copy_to_user( name, value, len ) )                 \
+                       return -EFAULT;                                 \
+       }
+
+/**
+ * Ioctl function type.
+ *
+ * \param dev DRM device structure
+ * \param data pointer to kernel-space stored data, copied in and out according
+ *            to ioctl description.
+ * \param file_priv DRM file private pointer.
+ */
+typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+
+typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+                              unsigned long arg);
+
+#define DRM_AUTH        0x1
+#define DRM_MASTER      0x2
+#define DRM_ROOT_ONLY   0x4
+
+struct drm_ioctl_desc {
+       unsigned int cmd;
+       drm_ioctl_t *func;
+       int flags;
+};
+/**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+ * ioctl, for use by drm_ioctl().
+ */
+#define DRM_IOCTL_DEF(ioctl, func, flags) \
+       [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
+
+struct drm_magic_entry {
+       struct list_head head;
+       struct drm_hash_item hash_item;
+       struct drm_file *priv;
+};
+
+struct drm_vma_entry {
+       struct list_head head;
+       struct vm_area_struct *vma;
+       pid_t pid;
+};
+
+/**
+ * DMA buffer.
+ */
+struct drm_buf {
+       int idx;                       /**< Index into master buflist */
+       int total;                     /**< Buffer size */
+       int order;                     /**< log-base-2(total) */
+       int used;                      /**< Amount of buffer in use (for DMA) */
+       unsigned long offset;          /**< Byte offset (used internally) */
+       void *address;                 /**< Address of buffer */
+       unsigned long bus_address;     /**< Bus address of buffer */
+       struct drm_buf *next;          /**< Kernel-only: used for free list */
+       __volatile__ int waiting;      /**< On kernel DMA queue */
+       __volatile__ int pending;      /**< On hardware DMA queue */
+       wait_queue_head_t dma_wait;    /**< Processes waiting */
+       struct drm_file *file_priv;    /**< Private of holding file descr */
+       int context;                   /**< Kernel queue for this buffer */
+       int while_locked;              /**< Dispatch this buffer while locked */
+       enum {
+               DRM_LIST_NONE = 0,
+               DRM_LIST_FREE = 1,
+               DRM_LIST_WAIT = 2,
+               DRM_LIST_PEND = 3,
+               DRM_LIST_PRIO = 4,
+               DRM_LIST_RECLAIM = 5
+       } list;                        /**< Which list we're on */
+
+       int dev_priv_size;              /**< Size of buffer private storage */
+       void *dev_private;              /**< Per-buffer private storage */
+};
+
+/** bufs is one longer than it has to be */
+struct drm_waitlist {
+       int count;                      /**< Number of possible buffers */
+       struct drm_buf **bufs;          /**< List of pointers to buffers */
+       struct drm_buf **rp;                    /**< Read pointer */
+       struct drm_buf **wp;                    /**< Write pointer */
+       struct drm_buf **end;           /**< End pointer */
+       spinlock_t read_lock;
+       spinlock_t write_lock;
+};
+
+struct drm_freelist {
+       int initialized;               /**< Freelist in use */
+       atomic_t count;                /**< Number of free buffers */
+       struct drm_buf *next;          /**< End pointer */
+
+       wait_queue_head_t waiting;     /**< Processes waiting on free bufs */
+       int low_mark;                  /**< Low water mark */
+       int high_mark;                 /**< High water mark */
+       atomic_t wfh;                  /**< If waiting for high mark */
+       spinlock_t lock;
+};
+
+typedef struct drm_dma_handle {
+       dma_addr_t busaddr;
+       void *vaddr;
+       size_t size;
+} drm_dma_handle_t;
+
+/**
+ * Buffer entry.  There is one of this for each buffer size order.
+ */
+struct drm_buf_entry {
+       int buf_size;                   /**< size */
+       int buf_count;                  /**< number of buffers */
+       struct drm_buf *buflist;                /**< buffer list */
+       int seg_count;
+       int page_order;
+       struct drm_dma_handle **seglist;
+       struct drm_freelist freelist;
+};
+
+
+enum drm_ref_type {
+       _DRM_REF_USE = 0,
+       _DRM_REF_TYPE1,
+       _DRM_NO_REF_TYPES
+};
+
+
+/** File private data */
+struct drm_file {
+       int authenticated;
+       int master;
+       int minor;
+       pid_t pid;
+       uid_t uid;
+       drm_magic_t magic;
+       unsigned long ioctl_count;
+       struct list_head lhead;
+       struct drm_head *head;
+       int remove_auth_on_close;
+       unsigned long lock_count;
+
+       /*
+        * The user object hash table is global and resides in the
+        * drm_device structure. We protect the lists and hash tables with the
+        * device struct_mutex. A bit coarse-grained but probably the best
+        * option.
+        */
+
+       struct list_head refd_objects;
+
+       struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
+       struct file *filp;
+       void *driver_priv;
+
+       struct list_head fbs;
+};
+
+/** Wait queue */
+struct drm_queue {
+       atomic_t use_count;             /**< Outstanding uses (+1) */
+       atomic_t finalization;          /**< Finalization in progress */
+       atomic_t block_count;           /**< Count of processes waiting */
+       atomic_t block_read;            /**< Queue blocked for reads */
+       wait_queue_head_t read_queue;   /**< Processes waiting on block_read */
+       atomic_t block_write;           /**< Queue blocked for writes */
+       wait_queue_head_t write_queue;  /**< Processes waiting on block_write */
+#if 1
+       atomic_t total_queued;          /**< Total queued statistic */
+       atomic_t total_flushed;         /**< Total flushes statistic */
+       atomic_t total_locks;           /**< Total locks statistics */
+#endif
+       enum drm_ctx_flags flags;       /**< Context preserving and 2D-only */
+       struct drm_waitlist waitlist;   /**< Pending buffers */
+       wait_queue_head_t flush_queue;  /**< Processes waiting until flush */
+};
+
+/**
+ * Lock data.
+ */
+struct drm_lock_data {
+       struct drm_hw_lock *hw_lock;            /**< Hardware lock */
+       /** Private of lock holder's file (NULL=kernel) */
+       struct drm_file *file_priv;
+       wait_queue_head_t lock_queue;   /**< Queue of blocked processes */
+       unsigned long lock_time;        /**< Time of last lock in jiffies */
+       spinlock_t spinlock;
+       uint32_t kernel_waiters;
+       uint32_t user_waiters;
+       int idle_has_lock;
+};
+
+/**
+ * DMA data.
+ */
+struct drm_device_dma {
+
+       struct drm_buf_entry bufs[DRM_MAX_ORDER + 1];   /**< buffers, grouped by their size order */
+       int buf_count;                  /**< total number of buffers */
+       struct drm_buf **buflist;               /**< Vector of pointers into drm_device_dma::bufs */
+       int seg_count;
+       int page_count;                 /**< number of pages */
+       unsigned long *pagelist;        /**< page list */
+       unsigned long byte_count;
+       enum {
+               _DRM_DMA_USE_AGP = 0x01,
+               _DRM_DMA_USE_SG = 0x02,
+               _DRM_DMA_USE_FB = 0x04,
+               _DRM_DMA_USE_PCI_RO = 0x08
+       } flags;
+
+};
+
+/**
+ * AGP memory entry.  Stored as a doubly linked list.
+ */
+struct drm_agp_mem {
+       unsigned long handle;           /**< handle */
+       DRM_AGP_MEM *memory;
+       unsigned long bound;            /**< address */
+       int pages;
+       struct list_head head;
+};
+
+/**
+ * AGP data.
+ *
+ * \sa drm_agp_init() and drm_device::agp.
+ */
+struct drm_agp_head {
+       DRM_AGP_KERN agp_info;          /**< AGP device information */
+       struct list_head memory;
+       unsigned long mode;             /**< AGP mode */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
+       struct agp_bridge_data *bridge;
+#endif
+       int enabled;                    /**< whether the AGP bus as been enabled */
+       int acquired;                   /**< whether the AGP device has been acquired */
+       unsigned long base;
+       int agp_mtrr;
+       int cant_use_aperture;
+       unsigned long page_mask;
+};
+
+/**
+ * Scatter-gather memory.
+ */
+struct drm_sg_mem {
+       unsigned long handle;
+       void *virtual;
+       int pages;
+       struct page **pagelist;
+       dma_addr_t *busaddr;
+};
+
+struct drm_sigdata {
+       int context;
+       struct drm_hw_lock *lock;
+};
+
+
+/*
+ * Generic memory manager structs
+ */
+
+struct drm_mm_node {
+       struct list_head fl_entry;
+       struct list_head ml_entry;
+       int free;
+       unsigned long start;
+       unsigned long size;
+       struct drm_mm *mm;
+       void *private;
+};
+
+struct drm_mm {
+       struct list_head fl_entry;
+       struct list_head ml_entry;
+};
+
+
+/**
+ * Mappings list
+ */
+struct drm_map_list {
+       struct list_head head;          /**< list head */
+       struct drm_hash_item hash;
+       struct drm_map *map;                    /**< mapping */
+       uint64_t user_token;
+       struct drm_mm_node *file_offset_node;
+};
+
+typedef struct drm_map drm_local_map_t;
+
+/**
+ * Context handle list
+ */
+struct drm_ctx_list {
+       struct list_head head;          /**< list head */
+       drm_context_t handle;           /**< context handle */
+       struct drm_file *tag;           /**< associated fd private data */
+};
+
+struct drm_vbl_sig {
+       struct list_head head;
+       unsigned int sequence;
+       struct siginfo info;
+       struct task_struct *task;
+};
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB   2
+
+#define DRM_ATI_GART_PCI 1
+#define DRM_ATI_GART_PCIE 2
+#define DRM_ATI_GART_IGP 3
+
+struct drm_ati_pcigart_info {
+       int gart_table_location;
+       int gart_reg_if;
+       void *addr;
+       dma_addr_t bus_addr;
+       drm_local_map_t mapping;
+       int table_size;
+};
+
+#include "drm_objects.h"
+#include "drm_edid.h"
+#include "drm_crtc.h"
+
+/**
+ * DRM driver structure. This structure represent the common code for
+ * a family of cards. There will one drm_device for each card present
+ * in this family
+ */
+
+struct drm_driver {
+       int (*load) (struct drm_device *, unsigned long flags);
+       int (*firstopen) (struct drm_device *);
+       int (*open) (struct drm_device *, struct drm_file *);
+       void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+       void (*postclose) (struct drm_device *, struct drm_file *);
+       void (*lastclose) (struct drm_device *);
+       int (*unload) (struct drm_device *);
+       int (*suspend) (struct drm_device *);
+       int (*resume) (struct drm_device *);
+       int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+       void (*dma_ready) (struct drm_device *);
+       int (*dma_quiescent) (struct drm_device *);
+       int (*context_ctor) (struct drm_device *dev, int context);
+       int (*context_dtor) (struct drm_device *dev, int context);
+       int (*kernel_context_switch) (struct drm_device *dev, int old,
+                                     int new);
+       void (*kernel_context_switch_unlock) (struct drm_device *dev);
+       int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
+       int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
+       int (*dri_library_name) (struct drm_device *dev, char * buf);
+
+       /**
+        * Called by \c drm_device_is_agp.  Typically used to determine if a
+        * card is really attached to AGP or not.
+        *
+        * \param dev  DRM device handle
+        *
+        * \returns
+        * One of three values is returned depending on whether or not the
+        * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
+        * (return of 1), or may or may not be AGP (return of 2).
+        */
+       int (*device_is_agp) (struct drm_device *dev);
+
+/* these have to be filled in */
+        irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
+       void (*irq_preinstall) (struct drm_device *dev);
+       void (*irq_postinstall) (struct drm_device *dev);
+       void (*irq_uninstall) (struct drm_device *dev);
+       void (*reclaim_buffers) (struct drm_device *dev,
+                                struct drm_file *file_priv);
+       void (*reclaim_buffers_locked) (struct drm_device *dev,
+                                       struct drm_file *file_priv);
+       void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
+                                           struct drm_file *file_priv);
+       unsigned long (*get_map_ofs) (struct drm_map *map);
+       unsigned long (*get_reg_ofs) (struct drm_device *dev);
+       void (*set_version) (struct drm_device *dev,
+                            struct drm_set_version *sv);
+
+       /* FB routines, if present */
+       int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
+       int (*fb_remove)(struct drm_device *dev, struct drm_crtc *crtc);
+
+       struct drm_fence_driver *fence_driver;
+       struct drm_bo_driver *bo_driver;
+
+       int major;
+       int minor;
+       int patchlevel;
+       char *name;
+       char *desc;
+       char *date;
+
+/* variables */
+       u32 driver_features;
+       int dev_priv_size;
+       struct drm_ioctl_desc *ioctls;
+       int num_ioctls;
+       struct file_operations fops;
+       struct pci_driver pci_driver;
+};
+
+/**
+ * DRM head structure. This structure represent a video head on a card
+ * that may contain multiple heads. Embed one per head of these in the
+ * private drm_device structure.
+ */
+struct drm_head {
+       int minor;                      /**< Minor device number */
+       struct drm_device *dev;
+       struct proc_dir_entry *dev_root;  /**< proc directory entry */
+       dev_t device;                   /**< Device number for mknod */
+       struct class_device *dev_class;
+};
+
+
+/**
+ * DRM device structure. This structure represent a complete card that
+ * may contain multiple heads.
+ */
+struct drm_device {
+       struct device dev;              /**< Linux device */
+       char *unique;                   /**< Unique identifier: e.g., busid */
+       int unique_len;                 /**< Length of unique field */
+       char *devname;                  /**< For /proc/interrupts */
+       int if_version;                 /**< Highest interface version set */
+
+       int blocked;                    /**< Blocked due to VC switch? */
+
+       /** \name Locks */
+       /*@{ */
+       spinlock_t count_lock;          /**< For inuse, drm_device::open_count, drm_device::buf_use */
+       struct mutex struct_mutex;      /**< For others */
+       /*@} */
+
+       /** \name Usage Counters */
+       /*@{ */
+       int open_count;                 /**< Outstanding files open */
+       atomic_t ioctl_count;           /**< Outstanding IOCTLs pending */
+       atomic_t vma_count;             /**< Outstanding vma areas open */
+       int buf_use;                    /**< Buffers in use -- cannot alloc */
+       atomic_t buf_alloc;             /**< Buffer allocation in progress */
+       /*@} */
+
+       /** \name Performance counters */
+       /*@{ */
+       unsigned long counters;
+       enum drm_stat_type types[15];
+       atomic_t counts[15];
+       /*@} */
+
+       /** \name Authentication */
+       /*@{ */
+       struct list_head filelist;
+       struct drm_open_hash magiclist;
+       struct list_head magicfree;
+       /*@} */
+
+       /** \name Memory management */
+       /*@{ */
+       struct list_head maplist;       /**< Linked list of regions */
+       int map_count;                  /**< Number of mappable regions */
+       struct drm_open_hash map_hash;       /**< User token hash table for maps */
+       struct drm_mm offset_manager;        /**< User token manager */
+       struct drm_open_hash object_hash;    /**< User token hash table for objects */
+       struct address_space *dev_mapping;  /**< For unmap_mapping_range() */
+       struct page *ttm_dummy_page;
+
+       /** \name Context handle management */
+       /*@{ */
+       struct list_head ctxlist;       /**< Linked list of context handles */
+       int ctx_count;                  /**< Number of context handles */
+       struct mutex ctxlist_mutex;     /**< For ctxlist */
+
+       struct idr ctx_idr;
+
+       struct list_head vmalist;       /**< List of vmas (for debugging) */
+       struct drm_lock_data lock;              /**< Information on hardware lock */
+       /*@} */
+
+       /** \name DMA queues (contexts) */
+       /*@{ */
+       int queue_count;                /**< Number of active DMA queues */
+       int queue_reserved;             /**< Number of reserved DMA queues */
+       int queue_slots;                /**< Actual length of queuelist */
+       struct drm_queue **queuelist;   /**< Vector of pointers to DMA queues */
+       struct drm_device_dma *dma;             /**< Optional pointer for DMA support */
+       /*@} */
+
+       /** \name Context support */
+       /*@{ */
+       int irq;                        /**< Interrupt used by board */
+       int irq_enabled;                /**< True if irq handler is enabled */
+       __volatile__ long context_flag; /**< Context swapping flag */
+       __volatile__ long interrupt_flag; /**< Interruption handler flag */
+       __volatile__ long dma_flag;     /**< DMA dispatch flag */
+       struct timer_list timer;        /**< Timer for delaying ctx switch */
+       wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
+       int last_checked;               /**< Last context checked for DMA */
+       int last_context;               /**< Last current context */
+       unsigned long last_switch;      /**< jiffies at last context switch */
+       /*@} */
+
+       struct work_struct work;
+
+       /** \name VBLANK IRQ support */
+       /*@{ */
+
+       wait_queue_head_t vbl_queue;    /**< VBLANK wait queue */
+       atomic_t vbl_received;
+       atomic_t vbl_received2;         /**< number of secondary VBLANK interrupts */
+       spinlock_t vbl_lock;
+       struct list_head vbl_sigs;              /**< signal list to send on VBLANK */
+       struct list_head vbl_sigs2;     /**< signals to send on secondary VBLANK */
+       unsigned int vbl_pending;
+       spinlock_t tasklet_lock;        /**< For drm_locked_tasklet */
+       void (*locked_tasklet_func)(struct drm_device *dev);
+
+       /*@} */
+       cycles_t ctx_start;
+       cycles_t lck_start;
+
+       struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
+       wait_queue_head_t buf_readers;  /**< Processes waiting to read */
+       wait_queue_head_t buf_writers;  /**< Processes waiting to ctx switch */
+
+       struct drm_agp_head *agp;               /**< AGP data */
+
+       struct pci_dev *pdev;           /**< PCI device structure */
+       int pci_vendor;                 /**< PCI vendor id */
+       int pci_device;                 /**< PCI device id */
+#ifdef __alpha__
+       struct pci_controller *hose;
+#endif
+       struct drm_sg_mem *sg;          /**< Scatter gather memory */
+       void *dev_private;              /**< device private data */
+       struct drm_sigdata sigdata;             /**< For block_all_signals */
+       sigset_t sigmask;
+
+       struct drm_driver *driver;
+       drm_local_map_t *agp_buffer_map;
+       unsigned int agp_buffer_token;
+       struct drm_head primary;                /**< primary screen head */
+
+       struct drm_fence_manager fm;
+       struct drm_buffer_manager bm;
+
+       /** \name Drawable information */
+       /*@{ */
+       spinlock_t drw_lock;
+       struct idr drw_idr;
+       /*@} */
+
+       /* DRM mode setting */
+       struct drm_mode_config mode_config;
+};
+
+#if __OS_HAS_AGP
+struct drm_agp_ttm_backend {
+       struct drm_ttm_backend backend;
+       DRM_AGP_MEM *mem;
+       struct agp_bridge_data *bridge;
+       int populated;
+};
+#endif
+
+typedef struct ati_pcigart_ttm_backend {
+       struct drm_ttm_backend backend;
+       int populated;
+       void (*gart_flush_fn)(struct drm_device *dev);
+       struct drm_ati_pcigart_info *gart_info;
+       unsigned long offset;
+       struct page **pages;
+       int num_pages;
+       int bound;
+       struct drm_device *dev;
+} ati_pcigart_ttm_backend_t;
+
+static __inline__ int drm_core_check_feature(struct drm_device *dev,
+                                            int feature)
+{
+       return ((dev->driver->driver_features & feature) ? 1 : 0);
+}
+
+#ifdef __alpha__
+#define drm_get_pci_domain(dev) dev->hose->index
+#else
+#define drm_get_pci_domain(dev) 0
+#endif
+
+#if __OS_HAS_AGP
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+       return drm_core_check_feature(dev, DRIVER_USE_AGP);
+}
+#else
+#define drm_core_has_AGP(dev) (0)
+#endif
+
+#if __OS_HAS_MTRR
+static inline int drm_core_has_MTRR(struct drm_device *dev)
+{
+       return drm_core_check_feature(dev, DRIVER_USE_MTRR);
+}
+
+#define DRM_MTRR_WC            MTRR_TYPE_WRCOMB
+
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+                              unsigned int flags)
+{
+       return mtrr_add(offset, size, flags, 1);
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+                              unsigned long size, unsigned int flags)
+{
+       return mtrr_del(handle, offset, size);
+}
+
+#else
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+                              unsigned int flags)
+{
+       return -ENODEV;
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+                              unsigned long size, unsigned int flags)
+{
+       return -ENODEV;
+}
+
+#define drm_core_has_MTRR(dev) (0)
+#define DRM_MTRR_WC            0
+#endif
+
+
+/******************************************************************/
+/** \name Internal function definitions */
+/*@{*/
+
+                               /* Driver support (drm_drv.h) */
+extern int drm_fb_loaded;
+extern int drm_init(struct drm_driver *driver,
+                             struct pci_device_id *pciidlist);
+extern void drm_exit(struct drm_driver *driver);
+extern void drm_cleanup_pci(struct pci_dev *pdev);
+extern int drm_ioctl(struct inode *inode, struct file *filp,
+                    unsigned int cmd, unsigned long arg);
+extern long drm_unlocked_ioctl(struct file *filp,
+                              unsigned int cmd, unsigned long arg);
+extern long drm_compat_ioctl(struct file *filp,
+                            unsigned int cmd, unsigned long arg);
+
+extern int drm_lastclose(struct drm_device *dev);
+
+                               /* Device support (drm_fops.h) */
+extern int drm_open(struct inode *inode, struct file *filp);
+extern int drm_stub_open(struct inode *inode, struct file *filp);
+extern int drm_fasync(int fd, struct file *filp, int on);
+extern int drm_release(struct inode *inode, struct file *filp);
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+
+                               /* Mapping support (drm_vm.h) */
+extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
+extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
+extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
+extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
+
+                               /* Memory management support (drm_memory.h) */
+#include "drm_memory.h"
+extern void drm_mem_init(void);
+extern int drm_mem_info(char *buf, char **start, off_t offset,
+                       int request, int *eof, void *data);
+extern void *drm_calloc(size_t nmemb, size_t size, int area);
+extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
+extern unsigned long drm_alloc_pages(int order, int area);
+extern void drm_free_pages(unsigned long address, int order, int area);
+extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
+extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
+extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+extern int drm_unbind_agp(DRM_AGP_MEM * handle);
+
+extern void drm_free_memctl(size_t size);
+extern int drm_alloc_memctl(size_t size);
+extern void drm_query_memctl(uint64_t *cur_used,
+                            uint64_t *emer_used,
+                            uint64_t *low_threshold,
+                            uint64_t *high_threshold,
+                            uint64_t *emer_threshold);
+extern void drm_init_memctl(size_t low_threshold,
+                           size_t high_threshold,
+                           size_t unit_size);
+
+                               /* Misc. IOCTL support (drm_ioctl.h) */
+extern int drm_irq_by_busid(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
+extern int drm_getunique(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+extern int drm_setunique(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+extern int drm_getmap(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv);
+extern int drm_getclient(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+extern int drm_getstats(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+extern int drm_setversion(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+extern int drm_noop(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv);
+
+                               /* Context IOCTL support (drm_context.h) */
+extern int drm_resctx(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv);
+extern int drm_addctx(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv);
+extern int drm_modctx(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv);
+extern int drm_getctx(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv);
+extern int drm_switchctx(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+extern int drm_newctx(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv);
+extern int drm_rmctx(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv);
+
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+
+extern int drm_setsareactx(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+extern int drm_getsareactx(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+
+                               /* Drawable IOCTL support (drm_drawable.h) */
+extern int drm_adddraw(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+extern int drm_rmdraw(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv);
+extern int drm_update_drawable_info(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
+                                                      drm_drawable_t id);
+extern void drm_drawable_free_all(struct drm_device *dev);
+
+                               /* Authentication IOCTL support (drm_auth.h) */
+extern int drm_getmagic(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+extern int drm_authmagic(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+
+                               /* Locking IOCTL support (drm_lock.h) */
+extern int drm_lock(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv);
+extern int drm_unlock(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv);
+extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+extern void drm_idlelock_release(struct drm_lock_data *lock_data);
+
+/*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ */
+
+extern int drm_i_have_hw_lock(struct drm_device *dev,
+                             struct drm_file *file_priv);
+
+                               /* Buffer management support (drm_bufs.h) */
+extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addmap(struct drm_device *dev, unsigned int offset,
+                     unsigned int size, enum drm_map_type type,
+                     enum drm_map_flags flags, drm_local_map_t ** map_ptr);
+extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
+extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
+extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
+extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+extern int drm_addbufs(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+extern int drm_infobufs(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+extern int drm_markbufs(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+extern int drm_freebufs(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+extern int drm_mapbufs(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+extern int drm_order(unsigned long size);
+extern unsigned long drm_get_resource_start(struct drm_device *dev,
+                                           unsigned int resource);
+extern unsigned long drm_get_resource_len(struct drm_device *dev,
+                                         unsigned int resource);
+extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+                                                 drm_local_map_t *map);
+
+
+                               /* DMA support (drm_dma.h) */
+extern int drm_dma_setup(struct drm_device *dev);
+extern void drm_dma_takedown(struct drm_device *dev);
+extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
+extern void drm_core_reclaim_buffers(struct drm_device *dev,
+                                    struct drm_file *filp);
+
+                               /* IRQ support (drm_irq.h) */
+extern int drm_control(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+extern int drm_irq_install(struct drm_device *dev);
+extern int drm_irq_uninstall(struct drm_device *dev);
+extern void drm_driver_irq_preinstall(struct drm_device *dev);
+extern void drm_driver_irq_postinstall(struct drm_device *dev);
+extern void drm_driver_irq_uninstall(struct drm_device *dev);
+
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+extern void drm_vbl_send_signals(struct drm_device *dev);
+extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
+
+                               /* AGP/GART support (drm_agpsupport.h) */
+extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern int drm_agp_acquire(struct drm_device *dev);
+extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+                                struct drm_file *file_priv);
+extern int drm_agp_release(struct drm_device *dev);
+extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+                                struct drm_file *file_priv);
+extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type);
+#else
+extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
+#endif
+extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
+extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
+extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
+extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
+extern void drm_agp_chipset_flush(struct drm_device *dev);
+                               /* Stub support (drm_stub.h) */
+extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+                    struct drm_driver *driver);
+extern int drm_put_dev(struct drm_device *dev);
+extern int drm_put_head(struct drm_head * head);
+extern unsigned int drm_debug; /* 1 to enable debug output */
+extern unsigned int drm_cards_limit;
+extern struct drm_head **drm_heads;
+extern struct class *drm_class;
+extern struct proc_dir_entry *drm_proc_root;
+
+extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
+
+                               /* Proc support (drm_proc.h) */
+extern int drm_proc_init(struct drm_device *dev,
+                        int minor,
+                        struct proc_dir_entry *root,
+                        struct proc_dir_entry **dev_root);
+extern int drm_proc_cleanup(int minor,
+                           struct proc_dir_entry *root,
+                           struct proc_dir_entry *dev_root);
+
+                               /* Scatter Gather Support (drm_scatter.h) */
+extern void drm_sg_cleanup(struct drm_sg_mem * entry);
+extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
+extern int drm_sg_free(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+
+                              /* ATI PCIGART support (ati_pcigart.h) */
+extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
+extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
+
+extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
+                          size_t align, dma_addr_t maxaddr);
+extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
+extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
+
+                              /* sysfs support (drm_sysfs.c) */
+struct drm_sysfs_class;
+extern struct class *drm_sysfs_create(struct module *owner, char *name);
+extern void drm_sysfs_destroy(void);
+extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);
+extern void drm_sysfs_device_remove(struct drm_device *dev);
+
+/*
+ * Basic memory manager support (drm_mm.c)
+ */
+
+extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
+                                              unsigned alignment);
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
+                                               unsigned alignment, int best_match);
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
+
+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+{
+       return block->mm;
+}
+
+extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
+extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
+
+static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
+                                                  unsigned int token)
+{
+       struct drm_map_list *_entry;
+       list_for_each_entry(_entry, &dev->maplist, head)
+               if (_entry->user_token == token)
+                       return _entry->map;
+       return NULL;
+}
+
+static __inline__ int drm_device_is_agp(struct drm_device *dev)
+{
+       if ( dev->driver->device_is_agp != NULL ) {
+               int err = (*dev->driver->device_is_agp)(dev);
+
+               if (err != 2) {
+                       return err;
+               }
+       }
+
+       return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
+}
+
+static __inline__ int drm_device_is_pcie(struct drm_device *dev)
+{
+       return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
+}
+
+static __inline__ void drm_core_dropmap(struct drm_map *map)
+{
+}
+
+#ifndef DEBUG_MEMORY
+/** Wrapper around kmalloc() */
+static __inline__ void *drm_alloc(size_t size, int area)
+{
+       return kmalloc(size, GFP_KERNEL);
+}
+
+/** Wrapper around kfree() */
+static __inline__ void drm_free(void *pt, size_t size, int area)
+{
+       kfree(pt);
+}
+#else
+extern void *drm_alloc(size_t size, int area);
+extern void drm_free(void *pt, size_t size, int area);
+#endif
+
+/*
+ * Accounting variants of standard calls.
+ */
+
+static inline void *drm_ctl_alloc(size_t size, int area)
+{
+       void *ret;
+       if (drm_alloc_memctl(size))
+               return NULL;
+       ret = drm_alloc(size, area);
+       if (!ret)
+               drm_free_memctl(size);
+       return ret;
+}
+
+static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
+{
+       void *ret;
+
+       if (drm_alloc_memctl(nmemb*size))
+               return NULL;
+       ret = drm_calloc(nmemb, size, area);
+       if (!ret)
+               drm_free_memctl(nmemb*size);
+       return ret;
+}
+
+static inline void drm_ctl_free(void *pt, size_t size, int area)
+{
+       drm_free(pt, size, area);
+       drm_free_memctl(size);
+}
+
+static inline size_t drm_size_align(size_t size)
+{
+       size_t tmpSize = 4;
+       if (size > PAGE_SIZE)
+               return PAGE_ALIGN(size);
+
+       while (tmpSize < size)
+               tmpSize <<= 1;
+
+       return (size_t) tmpSize;
+}
+
+
+/*@}*/
+
+#endif                         /* __KERNEL__ */
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_agpsupport.c b/psb-kernel-source-4.41.1/drm_agpsupport.c
new file mode 100644 (file)
index 0000000..5808e21
--- /dev/null
@@ -0,0 +1,652 @@
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include <linux/module.h>
+
+#if __OS_HAS_AGP
+
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
+{
+       DRM_AGP_KERN *kern;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+
+       kern = &dev->agp->agp_info;
+       info->agp_version_major = kern->version.major;
+       info->agp_version_minor = kern->version.minor;
+       info->mode = kern->mode;
+       info->aperture_base = kern->aper_base;
+       info->aperture_size = kern->aper_size * 1024 * 1024;
+       info->memory_allowed = kern->max_memory << PAGE_SHIFT;
+       info->memory_used = kern->current_memory << PAGE_SHIFT;
+       info->id_vendor = kern->device->vendor;
+       info->id_device = kern->device->device;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_info);
+
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_info *info = data;
+       int err;
+
+       err = drm_agp_info(dev, info);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
+{
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+       int retcode;
+#endif
+
+       if (!dev->agp)
+               return -ENODEV;
+       if (dev->agp->acquired)
+               return -EBUSY;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+       if ((retcode = agp_backend_acquire()))
+               return retcode;
+#else
+       if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
+               return -ENODEV;
+#endif
+
+       dev->agp->acquired = 1;
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_acquire);
+
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       return drm_agp_acquire((struct drm_device *) file_priv->head->dev);
+}
+
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
+int drm_agp_release(struct drm_device *dev)
+{
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+       agp_backend_release();
+#else
+       agp_backend_release(dev->agp->bridge);
+#endif
+       dev->agp->acquired = 0;
+       return 0;
+
+}
+EXPORT_SYMBOL(drm_agp_release);
+
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       return drm_agp_release(dev);
+}
+
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
+{
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+
+       dev->agp->mode = mode.mode;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+       agp_enable(mode.mode);
+#else
+       agp_enable(dev->agp->bridge, mode.mode);
+#endif
+       dev->agp->enabled = 1;
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_enable);
+
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_agp_mode *mode = data;
+
+       return drm_agp_enable(dev, *mode);
+}
+
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ */
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+       struct drm_agp_mem *entry;
+       DRM_AGP_MEM *memory;
+       unsigned long pages;
+       u32 type;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
+               return -ENOMEM;
+
+       memset(entry, 0, sizeof(*entry));
+
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       type = (u32) request->type;
+       if (!(memory = drm_alloc_agp(dev, pages, type))) {
+               drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+               return -ENOMEM;
+       }
+
+       entry->handle = (unsigned long)memory->key + 1;
+       entry->memory = memory;
+       entry->bound = 0;
+       entry->pages = pages;
+       list_add(&entry->head, &dev->agp->memory);
+
+       request->handle = entry->handle;
+       request->physical = memory->physical;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_alloc);
+
+
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_agp_buffer *request = data;
+
+       return drm_agp_alloc(dev, request);
+}
+
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+                                          unsigned long handle)
+{
+       struct drm_agp_mem *entry;
+
+       list_for_each_entry(entry, &dev->agp->memory, head) {
+               if (entry->handle == handle)
+                       return entry;
+       }
+       return NULL;
+}
+
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+       struct drm_agp_mem *entry;
+       int ret;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (!entry->bound)
+               return -EINVAL;
+       ret = drm_unbind_agp(entry->memory);
+       if (ret == 0)
+               entry->bound = 0;
+       return ret;
+}
+EXPORT_SYMBOL(drm_agp_unbind);
+
+
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_agp_binding *request = data;
+
+       return drm_agp_unbind(dev, request);
+}
+
+
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+       struct drm_agp_mem *entry;
+       int retcode;
+       int page;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (entry->bound)
+               return -EINVAL;
+       page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       if ((retcode = drm_bind_agp(entry->memory, page)))
+               return retcode;
+       entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+       DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+                 dev->agp->base, entry->bound);
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_bind);
+
+
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_binding *request = data;
+
+       return drm_agp_bind(dev, request);
+}
+
+
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+       struct drm_agp_mem *entry;
+
+       if (!dev->agp || !dev->agp->acquired)
+               return -EINVAL;
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+               return -EINVAL;
+       if (entry->bound)
+               drm_unbind_agp(entry->memory);
+
+       list_del(&entry->head);
+
+       drm_free_agp(entry->memory, entry->pages);
+       drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_free);
+
+
+
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_agp_buffer *request = data;
+
+       return drm_agp_free(dev, request);
+}
+
+
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+       struct drm_agp_head *head = NULL;
+
+       if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
+               return NULL;
+       memset((void *)head, 0, sizeof(*head));
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+       agp_copy_info(&head->agp_info);
+#else
+       head->bridge = agp_find_bridge(dev->pdev);
+       if (!head->bridge) {
+               if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
+                       drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
+                       return NULL;
+               }
+               agp_copy_info(head->bridge, &head->agp_info);
+               agp_backend_release(head->bridge);
+       } else {
+               agp_copy_info(head->bridge, &head->agp_info);
+       }
+#endif
+       if (head->agp_info.chipset == NOT_SUPPORTED) {
+               drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
+               return NULL;
+       }
+       INIT_LIST_HEAD(&head->memory);
+       head->cant_use_aperture = head->agp_info.cant_use_aperture;
+       head->page_mask = head->agp_info.page_mask;
+       head->base = head->agp_info.aper_base;
+       return head;
+}
+
+/** Calls agp_allocate_memory() */
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type)
+{
+       return agp_allocate_memory(pages, type);
+}
+#else
+DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
+                                    size_t pages, u32 type)
+{
+       return agp_allocate_memory(bridge, pages, type);
+}
+#endif
+
+/** Calls agp_free_memory() */
+int drm_agp_free_memory(DRM_AGP_MEM * handle)
+{
+       if (!handle)
+               return 0;
+       agp_free_memory(handle);
+       return 1;
+}
+
+/** Calls agp_bind_memory() */
+int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
+{
+       if (!handle)
+               return -EINVAL;
+       return agp_bind_memory(handle, start);
+}
+EXPORT_SYMBOL(drm_agp_bind_memory);
+
+/** Calls agp_unbind_memory() */
+int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
+{
+       if (!handle)
+               return -EINVAL;
+       return agp_unbind_memory(handle);
+}
+
+
+
+/*
+ * AGP ttm backend interface.
+ */
+
+#ifndef AGP_USER_TYPES
+#define AGP_USER_TYPES (1 << 16)
+#define AGP_USER_MEMORY (AGP_USER_TYPES)
+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
+#endif
+#define AGP_REQUIRED_MAJOR 0
+#define AGP_REQUIRED_MINOR 102
+
+static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
+{
+       return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+}
+
+
+static int drm_agp_populate(struct drm_ttm_backend *backend,
+                           unsigned long num_pages, struct page **pages)
+{
+       struct drm_agp_ttm_backend *agp_be =
+               container_of(backend, struct drm_agp_ttm_backend, backend);
+       struct page **cur_page, **last_page = pages + num_pages;
+       DRM_AGP_MEM *mem;
+
+       DRM_DEBUG("drm_agp_populate_ttm\n");
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+       mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
+#else
+       mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+#endif
+       if (!mem)
+               return -ENOMEM;
+
+       DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
+       mem->page_count = 0;
+       for (cur_page = pages; cur_page < last_page; ++cur_page)
+               mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
+       agp_be->mem = mem;
+       return 0;
+}
+
+static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
+                           struct drm_bo_mem_reg *bo_mem)
+{
+       struct drm_agp_ttm_backend *agp_be =
+               container_of(backend, struct drm_agp_ttm_backend, backend);
+       DRM_AGP_MEM *mem = agp_be->mem;
+       int ret;
+       int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
+
+       DRM_DEBUG("drm_agp_bind_ttm\n");
+       mem->is_flushed = TRUE;
+       mem->type = AGP_USER_MEMORY;
+       /* CACHED MAPPED implies not snooped memory */
+       if (snooped)
+               mem->type = AGP_USER_CACHED_MEMORY;
+
+       ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
+       if (ret)
+               DRM_ERROR("AGP Bind memory failed\n");
+
+       DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
+                       DRM_BE_FLAG_BOUND_CACHED : 0,
+                       DRM_BE_FLAG_BOUND_CACHED);
+       return ret;
+}
+
+static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
+{
+       struct drm_agp_ttm_backend *agp_be =
+               container_of(backend, struct drm_agp_ttm_backend, backend);
+
+       DRM_DEBUG("drm_agp_unbind_ttm\n");
+       if (agp_be->mem->is_bound)
+               return drm_agp_unbind_memory(agp_be->mem);
+       else
+               return 0;
+}
+
+static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
+{
+       struct drm_agp_ttm_backend *agp_be =
+               container_of(backend, struct drm_agp_ttm_backend, backend);
+       DRM_AGP_MEM *mem = agp_be->mem;
+
+       DRM_DEBUG("drm_agp_clear_ttm\n");
+       if (mem) {
+               backend->func->unbind(backend);
+               agp_free_memory(mem);
+       }
+       agp_be->mem = NULL;
+}
+
+static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
+{
+       struct drm_agp_ttm_backend *agp_be;
+
+       if (backend) {
+               DRM_DEBUG("drm_agp_destroy_ttm\n");
+               agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
+               if (agp_be && agp_be->mem)
+                       backend->func->clear(backend);
+       }
+}
+
+static struct drm_ttm_backend_func agp_ttm_backend = {
+       .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
+       .populate = drm_agp_populate,
+       .clear = drm_agp_clear_ttm,
+       .bind = drm_agp_bind_ttm,
+       .unbind = drm_agp_unbind_ttm,
+       .destroy =  drm_agp_destroy_ttm,
+};
+
+struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
+{
+
+       struct drm_agp_ttm_backend *agp_be;
+       struct agp_kern_info *info;
+
+       if (!dev->agp) {
+               DRM_ERROR("AGP is not initialized.\n");
+               return NULL;
+       }
+       info = &dev->agp->agp_info;
+
+       if (info->version.major != AGP_REQUIRED_MAJOR ||
+           info->version.minor < AGP_REQUIRED_MINOR) {
+               DRM_ERROR("Wrong agpgart version %d.%d\n"
+                         "\tYou need at least version %d.%d.\n",
+                         info->version.major,
+                         info->version.minor,
+                         AGP_REQUIRED_MAJOR,
+                         AGP_REQUIRED_MINOR);
+               return NULL;
+       }
+
+
+       agp_be = drm_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
+       if (!agp_be)
+               return NULL;
+
+       agp_be->mem = NULL;
+
+       agp_be->bridge = dev->agp->bridge;
+       agp_be->populated = FALSE;
+       agp_be->backend.func = &agp_ttm_backend;
+       agp_be->backend.dev = dev;
+
+       return &agp_be->backend;
+}
+EXPORT_SYMBOL(drm_agp_init_ttm);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
+void drm_agp_flush_chipset(struct drm_device *dev)
+{
+       agp_flush_chipset(dev->agp->bridge);
+}
+EXPORT_SYMBOL(drm_agp_flush_chipset);
+#endif
+
+#endif                         /* __OS_HAS_AGP */
diff --git a/psb-kernel-source-4.41.1/drm_auth.c b/psb-kernel-source-4.41.1/drm_auth.c
new file mode 100644 (file)
index 0000000..c904a91
--- /dev/null
@@ -0,0 +1,189 @@
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
+ */
+static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
+{
+       struct drm_file *retval = NULL;
+       struct drm_magic_entry *pt;
+       struct drm_hash_item *hash;
+
+       mutex_lock(&dev->struct_mutex);
+       if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+               pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+               retval = pt->priv;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return retval;
+}
+
+/**
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
+ */
+static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
+                        drm_magic_t magic)
+{
+       struct drm_magic_entry *entry;
+
+       DRM_DEBUG("%d\n", magic);
+
+       entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
+       if (!entry)
+               return -ENOMEM;
+       memset(entry, 0, sizeof(*entry));
+       entry->priv = priv;
+       entry->hash_item.key = (unsigned long)magic;
+       mutex_lock(&dev->struct_mutex);
+       drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
+       list_add_tail(&entry->head, &dev->magicfree);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
+{
+       struct drm_magic_entry *pt;
+       struct drm_hash_item *hash;
+
+       DRM_DEBUG("%d\n", magic);
+
+       mutex_lock(&dev->struct_mutex);
+       if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+       pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+       drm_ht_remove_item(&dev->magiclist, hash);
+       list_del(&pt->head);
+       mutex_unlock(&dev->struct_mutex);
+
+       drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+
+       return 0;
+}
+
+/**
+ * Get a unique magic number (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ */
+int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       static drm_magic_t sequence = 0;
+       static DEFINE_SPINLOCK(lock);
+       struct drm_auth *auth = data;
+
+       /* Find unique magic */
+       if (file_priv->magic) {
+               auth->magic = file_priv->magic;
+       } else {
+               do {
+                       spin_lock(&lock);
+                       if (!sequence)
+                               ++sequence;     /* reserve 0 */
+                       auth->magic = sequence++;
+                       spin_unlock(&lock);
+               } while (drm_find_file(dev, auth->magic));
+               file_priv->magic = auth->magic;
+               drm_add_magic(dev, file_priv, auth->magic);
+       }
+
+       DRM_DEBUG("%u\n", auth->magic);
+
+       return 0;
+}
+
+/**
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ */
+int drm_authmagic(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_auth *auth = data;
+       struct drm_file *file;
+
+       DRM_DEBUG("%u\n", auth->magic);
+       if ((file = drm_find_file(dev, auth->magic))) {
+               file->authenticated = 1;
+               drm_remove_magic(dev, auth->magic);
+               return 0;
+       }
+       return -EINVAL;
+}
diff --git a/psb-kernel-source-4.41.1/drm_bo.c b/psb-kernel-source-4.41.1/drm_bo.c
new file mode 100644 (file)
index 0000000..7066c3a
--- /dev/null
@@ -0,0 +1,2668 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+/*
+ * Locking may look a bit complicated but isn't really:
+ *
+ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
+ * when there is a chance that it can be zero before or after the operation.
+ *
+ * dev->struct_mutex also protects all lists and list heads,
+ * Hash tables and hash heads.
+ *
+ * bo->mutex protects the buffer object itself excluding the usage field.
+ * bo->mutex does also protect the buffer list heads, so to manipulate those,
+ * we need both the bo->mutex and the dev->struct_mutex.
+ *
+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
+ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
+ * the list traversal will, in general, need to be restarted.
+ *
+ */
+
+static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
+static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
+
+static inline uint64_t drm_bo_type_flags(unsigned type)
+{
+       return (1ULL << (24 + type));
+}
+
+/*
+ * bo locked. dev->struct_mutex locked.
+ */
+
+void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
+{
+       struct drm_mem_type_manager *man;
+
+       DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+       DRM_ASSERT_LOCKED(&bo->mutex);
+
+       man = &bo->dev->bm.man[bo->pinned_mem_type];
+       list_add_tail(&bo->pinned_lru, &man->pinned);
+}
+
+void drm_bo_add_to_lru(struct drm_buffer_object *bo)
+{
+       struct drm_mem_type_manager *man;
+
+       DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+
+       if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
+           || bo->mem.mem_type != bo->pinned_mem_type) {
+               man = &bo->dev->bm.man[bo->mem.mem_type];
+               list_add_tail(&bo->lru, &man->lru);
+       } else {
+               INIT_LIST_HEAD(&bo->lru);
+       }
+}
+
+static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
+{
+#ifdef DRM_ODD_MM_COMPAT
+       int ret;
+
+       if (!bo->map_list.map)
+               return 0;
+
+       ret = drm_bo_lock_kmm(bo);
+       if (ret)
+               return ret;
+       drm_bo_unmap_virtual(bo);
+       if (old_is_pci)
+               drm_bo_finish_unmap(bo);
+#else
+       if (!bo->map_list.map)
+               return 0;
+
+       drm_bo_unmap_virtual(bo);
+#endif
+       return 0;
+}
+
+static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
+{
+#ifdef DRM_ODD_MM_COMPAT
+       int ret;
+
+       if (!bo->map_list.map)
+               return;
+
+       ret = drm_bo_remap_bound(bo);
+       if (ret) {
+               DRM_ERROR("Failed to remap a bound buffer object.\n"
+                         "\tThis might cause a sigbus later.\n");
+       }
+       drm_bo_unlock_kmm(bo);
+#endif
+}
+
+/*
+ * Call bo->mutex locked.
+ */
+
+static int drm_bo_add_ttm(struct drm_buffer_object *bo)
+{
+       struct drm_device *dev = bo->dev;
+       int ret = 0;
+
+       DRM_ASSERT_LOCKED(&bo->mutex);
+       bo->ttm = NULL;
+
+       switch (bo->type) {
+       case drm_bo_type_dc:
+       case drm_bo_type_kernel:
+               bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+               if (!bo->ttm)
+                       ret = -ENOMEM;
+               break;
+       case drm_bo_type_user:
+               bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+               if (!bo->ttm)
+                       ret = -ENOMEM;
+
+               ret = drm_ttm_set_user(bo->ttm, current,
+                                      bo->mem.mask & DRM_BO_FLAG_WRITE,
+                                      bo->buffer_start,
+                                      bo->num_pages,
+                                      dev->bm.dummy_read_page);
+               if (ret)
+                       return ret;
+
+               break;
+       default:
+               DRM_ERROR("Illegal buffer object type\n");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
+                                 struct drm_bo_mem_reg *mem,
+                                 int evict, int no_wait)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+       int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
+       int new_is_pci = drm_mem_reg_is_pci(dev, mem);
+       struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
+       struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
+       int ret = 0;
+
+       if (old_is_pci || new_is_pci ||
+           ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
+               ret = drm_bo_vm_pre_move(bo, old_is_pci);
+       if (ret)
+               return ret;
+
+       /*
+        * Create and bind a ttm if required.
+        */
+
+       if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
+               ret = drm_bo_add_ttm(bo);
+               if (ret)
+                       goto out_err;
+
+               if (mem->mem_type != DRM_BO_MEM_LOCAL) {
+                       ret = drm_bind_ttm(bo->ttm, mem);
+                       if (ret)
+                               goto out_err;
+               }
+
+               if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
+
+                       struct drm_bo_mem_reg *old_mem = &bo->mem;
+                       uint64_t save_flags = old_mem->flags;
+                       uint64_t save_mask = old_mem->mask;
+                       
+                       *old_mem = *mem;
+                       mem->mm_node = NULL;
+                       old_mem->mask = save_mask;
+                       DRM_FLAG_MASKED(save_flags, mem->flags, 
+                                       DRM_BO_MASK_MEMTYPE);
+                       goto moved;
+               }
+               
+       }
+
+       if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
+                  !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
+
+               ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
+
+       } else if (dev->driver->bo_driver->move) {
+               ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
+
+       } else {
+
+               ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
+
+       }
+
+       if (ret)
+               goto out_err;
+
+moved:
+       if (old_is_pci || new_is_pci)
+               drm_bo_vm_post_move(bo);
+
+       if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
+               ret =
+                   dev->driver->bo_driver->invalidate_caches(dev,
+                                                             bo->mem.flags);
+               if (ret)
+                       DRM_ERROR("Can not flush read caches\n");
+       }
+
+       DRM_FLAG_MASKED(bo->priv_flags,
+                       (evict) ? _DRM_BO_FLAG_EVICTED : 0,
+                       _DRM_BO_FLAG_EVICTED);
+
+       if (bo->mem.mm_node)
+               bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+                       bm->man[bo->mem.mem_type].gpu_offset;
+
+
+       return 0;
+
+out_err:
+       if (old_is_pci || new_is_pci)
+               drm_bo_vm_post_move(bo);
+
+       new_man = &bm->man[bo->mem.mem_type];
+       if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
+               drm_ttm_unbind(bo->ttm);
+               drm_destroy_ttm(bo->ttm);
+               bo->ttm = NULL;
+       }
+
+       return ret;
+}
+
+/*
+ * Call bo->mutex locked.
+ * Wait until the buffer is idle.
+ */
+
+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
+               int no_wait)
+{
+       int ret;
+
+       DRM_ASSERT_LOCKED(&bo->mutex);
+
+       if (bo->fence) {
+               if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
+                       return 0;
+               }
+               if (no_wait)
+                       return -EBUSY;
+
+               ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
+                                         bo->fence_type);
+               if (ret) {
+                       return ret;
+               }
+
+               drm_fence_usage_deref_unlocked(&bo->fence);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_bo_wait);
+
+static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+
+       if (bo->fence) {
+               if (bm->nice_mode) {
+                       unsigned long _end = jiffies + 3 * DRM_HZ;
+                       int ret;
+                       do {
+                               ret = drm_bo_wait(bo, 0, 1, 0);
+                               if (ret && allow_errors)
+                                       return ret;
+
+                       } while (ret && !time_after_eq(jiffies, _end));
+
+                       if (bo->fence) {
+                               bm->nice_mode = 0;
+                               DRM_ERROR("Detected GPU lockup or "
+                                         "fence driver was taken down. "
+                                         "Evicting buffer.\n");
+                       }
+               }
+               if (bo->fence)
+                       drm_fence_usage_deref_unlocked(&bo->fence);
+       }
+       return 0;
+}
+
+/*
+ * Call dev->struct_mutex locked.
+ * Attempts to remove all private references to a buffer by expiring its
+ * fence object and removing from lru lists and memory managers.
+ */
+
+static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
+       atomic_inc(&bo->usage);
+       mutex_unlock(&dev->struct_mutex);
+       mutex_lock(&bo->mutex);
+
+       DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+
+       if (bo->fence && drm_fence_object_signaled(bo->fence,
+                                                  bo->fence_type))
+               drm_fence_usage_deref_unlocked(&bo->fence);
+
+       if (bo->fence && remove_all)
+               (void)drm_bo_expire_fence(bo, 0);
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (!atomic_dec_and_test(&bo->usage))
+               goto out;
+
+       if (!bo->fence) {
+               list_del_init(&bo->lru);
+               if (bo->mem.mm_node) {
+                       drm_mm_put_block(bo->mem.mm_node);
+                       if (bo->pinned_node == bo->mem.mm_node)
+                               bo->pinned_node = NULL;
+                       bo->mem.mm_node = NULL;
+               }
+               list_del_init(&bo->pinned_lru);
+               if (bo->pinned_node) {
+                       drm_mm_put_block(bo->pinned_node);
+                       bo->pinned_node = NULL;
+               }
+               list_del_init(&bo->ddestroy);
+               mutex_unlock(&bo->mutex);
+               drm_bo_destroy_locked(bo);
+               return;
+       }
+
+       if (list_empty(&bo->ddestroy)) {
+               drm_fence_object_flush(bo->fence, bo->fence_type);
+               list_add_tail(&bo->ddestroy, &bm->ddestroy);
+               schedule_delayed_work(&bm->wq,
+                                     ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
+       }
+
+out:
+       mutex_unlock(&bo->mutex);
+       return;
+}
+
+static void drm_bo_unreserve_size(unsigned long size)
+{
+       drm_free_memctl(size);
+}
+
+/*
+ * Verify that refcount is 0 and that there are no internal references
+ * to the buffer object. Then destroy it.
+ */
+
+static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+       unsigned long reserved_size;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
+       if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
+           list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
+           list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
+               if (bo->fence != NULL) {
+                       DRM_ERROR("Fence was non-zero.\n");
+                       drm_bo_cleanup_refs(bo, 0);
+                       return;
+               }
+
+#ifdef DRM_ODD_MM_COMPAT
+               BUG_ON(!list_empty(&bo->vma_list));
+               BUG_ON(!list_empty(&bo->p_mm_list));
+#endif
+
+               if (bo->ttm) {
+                       drm_ttm_unbind(bo->ttm);
+                       drm_destroy_ttm(bo->ttm);
+                       bo->ttm = NULL;
+               }
+
+               atomic_dec(&bm->count);
+
+               reserved_size = bo->reserved_size;
+
+               drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
+               drm_bo_unreserve_size(reserved_size);
+
+               return;
+       }
+
+       /*
+        * Some stuff is still trying to reference the buffer object.
+        * Get rid of those references.
+        */
+
+       drm_bo_cleanup_refs(bo, 0);
+
+       return;
+}
+
+/*
+ * Call dev->struct_mutex locked.
+ */
+
+static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+
+       struct drm_buffer_object *entry, *nentry;
+       struct list_head *list, *next;
+
+       list_for_each_safe(list, next, &bm->ddestroy) {
+               entry = list_entry(list, struct drm_buffer_object, ddestroy);
+
+               nentry = NULL;
+               if (next != &bm->ddestroy) {
+                       nentry = list_entry(next, struct drm_buffer_object,
+                                           ddestroy);
+                       atomic_inc(&nentry->usage);
+               }
+
+               drm_bo_cleanup_refs(entry, remove_all);
+
+               if (nentry)
+                       atomic_dec(&nentry->usage);
+       }
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static void drm_bo_delayed_workqueue(void *data)
+#else
+static void drm_bo_delayed_workqueue(struct work_struct *work)
+#endif
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+       struct drm_device *dev = (struct drm_device *) data;
+       struct drm_buffer_manager *bm = &dev->bm;
+#else
+       struct drm_buffer_manager *bm =
+           container_of(work, struct drm_buffer_manager, wq.work);
+       struct drm_device *dev = container_of(bm, struct drm_device, bm);
+#endif
+
+       DRM_DEBUG("Delayed delete Worker\n");
+
+       mutex_lock(&dev->struct_mutex);
+       if (!bm->initialized) {
+               mutex_unlock(&dev->struct_mutex);
+               return;
+       }
+       drm_bo_delayed_delete(dev, 0);
+       if (bm->initialized && !list_empty(&bm->ddestroy)) {
+               schedule_delayed_work(&bm->wq,
+                                     ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
+{
+       struct drm_buffer_object *tmp_bo = *bo;
+       bo = NULL;
+
+       DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
+
+       if (atomic_dec_and_test(&tmp_bo->usage))
+               drm_bo_destroy_locked(tmp_bo);
+}
+EXPORT_SYMBOL(drm_bo_usage_deref_locked);
+
+static void drm_bo_base_deref_locked(struct drm_file *file_priv,
+                                    struct drm_user_object *uo)
+{
+       struct drm_buffer_object *bo =
+           drm_user_object_entry(uo, struct drm_buffer_object, base);
+
+       DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+
+       drm_bo_takedown_vm_locked(bo);
+       drm_bo_usage_deref_locked(&bo);
+}
+
+void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
+{
+       struct drm_buffer_object *tmp_bo = *bo;
+       struct drm_device *dev = tmp_bo->dev;
+
+       *bo = NULL;
+       if (atomic_dec_and_test(&tmp_bo->usage)) {
+               mutex_lock(&dev->struct_mutex);
+               if (atomic_read(&tmp_bo->usage) == 0)
+                       drm_bo_destroy_locked(tmp_bo);
+               mutex_unlock(&dev->struct_mutex);
+       }
+}
+EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
+
+void drm_putback_buffer_objects(struct drm_device *dev)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct list_head *list = &bm->unfenced;
+       struct drm_buffer_object *entry, *next;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(entry, next, list, lru) {
+               atomic_inc(&entry->usage);
+               mutex_unlock(&dev->struct_mutex);
+
+               mutex_lock(&entry->mutex);
+               BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
+               mutex_lock(&dev->struct_mutex);
+
+               list_del_init(&entry->lru);
+               DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+               wake_up_all(&entry->event_queue);
+
+               /*
+                * FIXME: Might want to put back on head of list
+                * instead of tail here.
+                */
+
+               drm_bo_add_to_lru(entry);
+               mutex_unlock(&entry->mutex);
+               drm_bo_usage_deref_locked(&entry);
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_putback_buffer_objects);
+
+
+/*
+ * Note. The caller has to register (if applicable)
+ * and deregister fence object usage.
+ */
+
+int drm_fence_buffer_objects(struct drm_device *dev,
+                            struct list_head *list,
+                            uint32_t fence_flags,
+                            struct drm_fence_object *fence,
+                            struct drm_fence_object **used_fence)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_buffer_object *entry;
+       uint32_t fence_type = 0;
+       uint32_t fence_class = ~0;
+       int count = 0;
+       int ret = 0;
+       struct list_head *l;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (!list)
+               list = &bm->unfenced;
+
+       if (fence)
+               fence_class = fence->fence_class;
+
+       list_for_each_entry(entry, list, lru) {
+               BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
+               fence_type |= entry->new_fence_type;
+               if (fence_class == ~0)
+                       fence_class = entry->new_fence_class;
+               else if (entry->new_fence_class != fence_class) {
+                       DRM_ERROR("Unmatching fence classes on unfenced list: "
+                                 "%d and %d.\n",
+                                 fence_class,
+                                 entry->new_fence_class);
+                       ret = -EINVAL;
+                       goto out;
+               }
+               count++;
+       }
+
+       if (!count) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (fence) {
+               if ((fence_type & fence->type) != fence_type ||
+                   (fence->fence_class != fence_class)) {
+                       DRM_ERROR("Given fence doesn't match buffers "
+                                 "on unfenced list.\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+       } else {
+               mutex_unlock(&dev->struct_mutex);
+               ret = drm_fence_object_create(dev, fence_class, fence_type,
+                                             fence_flags | DRM_FENCE_FLAG_EMIT,
+                                             &fence);
+               mutex_lock(&dev->struct_mutex);
+               if (ret)
+                       goto out;
+       }
+
+       count = 0;
+       l = list->next;
+       while (l != list) {
+               prefetch(l->next);
+               entry = list_entry(l, struct drm_buffer_object, lru);
+               atomic_inc(&entry->usage);
+               mutex_unlock(&dev->struct_mutex);
+               mutex_lock(&entry->mutex);
+               mutex_lock(&dev->struct_mutex);
+               list_del_init(l);
+               if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+                       count++;
+                       if (entry->fence)
+                               drm_fence_usage_deref_locked(&entry->fence);
+                       entry->fence = drm_fence_reference_locked(fence);
+                       entry->fence_class = entry->new_fence_class;
+                       entry->fence_type = entry->new_fence_type;
+                       DRM_FLAG_MASKED(entry->priv_flags, 0,
+                                       _DRM_BO_FLAG_UNFENCED);
+                       wake_up_all(&entry->event_queue);
+                       drm_bo_add_to_lru(entry);
+               }
+               mutex_unlock(&entry->mutex);
+               drm_bo_usage_deref_locked(&entry);
+               l = list->next;
+       }
+       DRM_DEBUG("Fenced %d buffers\n", count);
+out:
+       mutex_unlock(&dev->struct_mutex);
+       *used_fence = fence;
+       return ret;
+}
+EXPORT_SYMBOL(drm_fence_buffer_objects);
+
+/*
+ * bo->mutex locked
+ */
+
+static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
+                       int no_wait)
+{
+       int ret = 0;
+       struct drm_device *dev = bo->dev;
+       struct drm_bo_mem_reg evict_mem;
+
+       /*
+        * Someone might have modified the buffer before we took the
+        * buffer mutex.
+        */
+
+       if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
+               goto out;
+       if (bo->mem.mem_type != mem_type)
+               goto out;
+
+       ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+       if (ret && ret != -EAGAIN) {
+               DRM_ERROR("Failed to expire fence before "
+                         "buffer eviction.\n");
+               goto out;
+       }
+
+       evict_mem = bo->mem;
+       evict_mem.mm_node = NULL;
+
+       evict_mem = bo->mem;
+       evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
+       ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
+
+       if (ret) {
+               if (ret != -EAGAIN)
+                       DRM_ERROR("Failed to find memory space for "
+                                 "buffer 0x%p eviction.\n", bo);
+               goto out;
+       }
+
+       ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
+
+       if (ret) {
+               if (ret != -EAGAIN)
+                       DRM_ERROR("Buffer eviction failed\n");
+               goto out;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       if (evict_mem.mm_node) {
+               if (evict_mem.mm_node != bo->pinned_node)
+                       drm_mm_put_block(evict_mem.mm_node);
+               evict_mem.mm_node = NULL;
+       }
+       list_del(&bo->lru);
+       drm_bo_add_to_lru(bo);
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
+                       _DRM_BO_FLAG_EVICTED);
+
+out:
+       return ret;
+}
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int drm_bo_mem_force_space(struct drm_device *dev,
+                                 struct drm_bo_mem_reg *mem,
+                                 uint32_t mem_type, int no_wait)
+{
+       struct drm_mm_node *node;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_buffer_object *entry;
+       struct drm_mem_type_manager *man = &bm->man[mem_type];
+       struct list_head *lru;
+       unsigned long num_pages = mem->num_pages;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       do {
+               node = drm_mm_search_free(&man->manager, num_pages,
+                                         mem->page_alignment, 1);
+               if (node)
+                       break;
+
+               lru = &man->lru;
+               if (lru->next == lru)
+                       break;
+
+               entry = list_entry(lru->next, struct drm_buffer_object, lru);
+               atomic_inc(&entry->usage);
+               mutex_unlock(&dev->struct_mutex);
+               mutex_lock(&entry->mutex);
+               BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
+
+               ret = drm_bo_evict(entry, mem_type, no_wait);
+               mutex_unlock(&entry->mutex);
+               drm_bo_usage_deref_unlocked(&entry);
+               if (ret)
+                       return ret;
+               mutex_lock(&dev->struct_mutex);
+       } while (1);
+
+       if (!node) {
+               mutex_unlock(&dev->struct_mutex);
+               return -ENOMEM;
+       }
+
+       node = drm_mm_get_block(node, num_pages, mem->page_alignment);
+       if (!node) {
+               mutex_unlock(&dev->struct_mutex);
+               return -ENOMEM;
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+       mem->mm_node = node;
+       mem->mem_type = mem_type;
+       return 0;
+}
+
+static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
+                               int disallow_fixed,
+                               uint32_t mem_type,
+                               uint64_t mask, uint32_t *res_mask)
+{
+       uint64_t cur_flags = drm_bo_type_flags(mem_type);
+       uint64_t flag_diff;
+
+       if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
+               return 0;
+       if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
+               cur_flags |= DRM_BO_FLAG_CACHED;
+       if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
+               cur_flags |= DRM_BO_FLAG_MAPPABLE;
+       if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
+               DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
+
+       if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
+               return 0;
+
+       if (mem_type == DRM_BO_MEM_LOCAL) {
+               *res_mask = cur_flags;
+               return 1;
+       }
+
+       flag_diff = (mask ^ cur_flags);
+       if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
+               cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
+
+       if ((flag_diff & DRM_BO_FLAG_CACHED) &&
+           (!(mask & DRM_BO_FLAG_CACHED) ||
+            (mask & DRM_BO_FLAG_FORCE_CACHING)))
+               return 0;
+
+       if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
+           ((mask & DRM_BO_FLAG_MAPPABLE) ||
+            (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+               return 0;
+
+       *res_mask = cur_flags;
+       return 1;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver.  If free space isn't found, then
+ * drm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int drm_bo_mem_space(struct drm_buffer_object *bo,
+                    struct drm_bo_mem_reg *mem, int no_wait)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man;
+
+       uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
+       const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
+       uint32_t i;
+       uint32_t mem_type = DRM_BO_MEM_LOCAL;
+       uint32_t cur_flags;
+       int type_found = 0;
+       int type_ok = 0;
+       int has_eagain = 0;
+       struct drm_mm_node *node = NULL;
+       int ret;
+
+       mem->mm_node = NULL;
+       for (i = 0; i < num_prios; ++i) {
+               mem_type = prios[i];
+               man = &bm->man[mem_type];
+
+               type_ok = drm_bo_mt_compatible(man,
+                                              bo->type == drm_bo_type_user,
+                                              mem_type, mem->mask,
+                                              &cur_flags);
+
+               if (!type_ok)
+                       continue;
+
+               if (mem_type == DRM_BO_MEM_LOCAL)
+                       break;
+
+               if ((mem_type == bo->pinned_mem_type) &&
+                   (bo->pinned_node != NULL)) {
+                       node = bo->pinned_node;
+                       break;
+               }
+
+               mutex_lock(&dev->struct_mutex);
+               if (man->has_type && man->use_type) {
+                       type_found = 1;
+                       node = drm_mm_search_free(&man->manager, mem->num_pages,
+                                                 mem->page_alignment, 1);
+                       if (node)
+                               node = drm_mm_get_block(node, mem->num_pages,
+                                                       mem->page_alignment);
+               }
+               mutex_unlock(&dev->struct_mutex);
+               if (node)
+                       break;
+       }
+
+       if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
+               mem->mm_node = node;
+               mem->mem_type = mem_type;
+               mem->flags = cur_flags;
+               return 0;
+       }
+
+       if (!type_found)
+               return -EINVAL;
+
+       num_prios = dev->driver->bo_driver->num_mem_busy_prio;
+       prios = dev->driver->bo_driver->mem_busy_prio;
+
+       for (i = 0; i < num_prios; ++i) {
+               mem_type = prios[i];
+               man = &bm->man[mem_type];
+
+               if (!man->has_type)
+                       continue;
+
+               if (!drm_bo_mt_compatible(man,
+                                         bo->type == drm_bo_type_user,
+                                         mem_type,
+                                         mem->mask,
+                                         &cur_flags))
+                       continue;
+
+               ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
+
+               if (ret == 0 && mem->mm_node) {
+                       mem->flags = cur_flags;
+                       return 0;
+               }
+
+               if (ret == -EAGAIN)
+                       has_eagain = 1;
+       }
+
+       ret = (has_eagain) ? -EAGAIN : -ENOMEM;
+       return ret;
+}
+EXPORT_SYMBOL(drm_bo_mem_space);
+
+static int drm_bo_new_mask(struct drm_buffer_object *bo,
+                          uint64_t new_flags, uint64_t used_mask)
+{
+       uint32_t new_props;
+
+       if (bo->type == drm_bo_type_user &&
+           ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
+            (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
+               DRM_ERROR("User buffers require cache-coherent memory.\n");
+               return -EINVAL;
+       }
+
+       if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+               DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
+               return -EPERM;
+       }
+       
+       if (likely(used_mask & DRM_BO_MASK_MEM) && 
+           (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
+           !DRM_SUSER(DRM_CURPROC)) {
+               if (likely(bo->mem.flags & new_flags & used_mask & 
+                          DRM_BO_MASK_MEM)) 
+                       new_flags = (new_flags & ~DRM_BO_MASK_MEM) | 
+                               (bo->mem.flags & DRM_BO_MASK_MEM);
+               else {
+                       DRM_ERROR("Incompatible memory type specification "
+                                 "for NO_EVICT buffer.\n");
+                       return -EPERM;
+               }
+       }
+
+       if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
+               DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
+               return -EPERM;
+       }
+
+       new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+                                DRM_BO_FLAG_READ);
+
+       if (!new_props) {
+               DRM_ERROR("Invalid buffer object rwx properties\n");
+               return -EINVAL;
+       }
+
+       bo->mem.mask = new_flags;
+       return 0;
+}
+
+/*
+ * Call dev->struct_mutex locked.
+ */
+
+struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
+                                             uint32_t handle, int check_owner)
+{
+       struct drm_user_object *uo;
+       struct drm_buffer_object *bo;
+
+       uo = drm_lookup_user_object(file_priv, handle);
+
+       if (!uo || (uo->type != drm_buffer_type)) {
+               DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
+               return NULL;
+       }
+
+       if (check_owner && file_priv != uo->owner) {
+               if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
+                       return NULL;
+       }
+
+       bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
+       atomic_inc(&bo->usage);
+       return bo;
+}
+EXPORT_SYMBOL(drm_lookup_buffer_object);
+
+/*
+ * Call bo->mutex locked.
+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
+ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
+ */
+
+static int drm_bo_quick_busy(struct drm_buffer_object *bo)
+{
+       struct drm_fence_object *fence = bo->fence;
+
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       if (fence) {
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
+                       return 0;
+               }
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * Call bo->mutex locked.
+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
+ */
+
+static int drm_bo_busy(struct drm_buffer_object *bo)
+{
+       struct drm_fence_object *fence = bo->fence;
+
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       if (fence) {
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
+                       return 0;
+               }
+               drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
+                       return 0;
+               }
+               return 1;
+       }
+       return 0;
+}
+
+static int drm_bo_evict_cached(struct drm_buffer_object *bo)
+{
+       int ret = 0;
+
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       if (bo->mem.mm_node)
+               ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
+       return ret;
+}
+
+/*
+ * Wait until a buffer is unmapped.
+ */
+
+static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
+{
+       int ret = 0;
+
+       if ((atomic_read(&bo->mapped) >= 0) && no_wait)
+               return -EBUSY;
+
+       DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+                   atomic_read(&bo->mapped) == -1);
+
+       if (ret == -EINTR)
+               ret = -EAGAIN;
+
+       return ret;
+}
+
+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
+{
+       int ret;
+
+       mutex_lock(&bo->mutex);
+       ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       mutex_unlock(&bo->mutex);
+       return ret;
+}
+
+/*
+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
+ * Until then, we cannot really do anything with it except delete it.
+ */
+
+static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
+                               int eagain_if_wait)
+{
+       int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+
+       if (ret && no_wait)
+               return -EBUSY;
+       else if (!ret)
+               return 0;
+
+       ret = 0;
+       mutex_unlock(&bo->mutex);
+       DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+                   !drm_bo_check_unfenced(bo));
+       mutex_lock(&bo->mutex);
+       if (ret == -EINTR)
+               return -EAGAIN;
+       ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       if (ret) {
+               DRM_ERROR("Timeout waiting for buffer to become fenced\n");
+               return -EBUSY;
+       }
+       if (eagain_if_wait)
+               return -EAGAIN;
+
+       return 0;
+}
+
+/*
+ * Fill in the ioctl reply argument with buffer info.
+ * Bo locked.
+ */
+
+void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
+                        struct drm_bo_info_rep *rep)
+{
+       if (!rep)
+               return;
+
+       rep->handle = bo->base.hash.key;
+       rep->flags = bo->mem.flags;
+       rep->size = bo->num_pages * PAGE_SIZE;
+       rep->offset = bo->offset;
+
+       if (bo->type == drm_bo_type_dc)
+               rep->arg_handle = bo->map_list.user_token;
+       else
+               rep->arg_handle = 0;
+
+       rep->mask = bo->mem.mask;
+       rep->buffer_start = bo->buffer_start;
+       rep->fence_flags = bo->fence_type;
+       rep->rep_flags = 0;
+       rep->page_alignment = bo->mem.page_alignment;
+
+       if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
+               DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
+                               DRM_BO_REP_BUSY);
+       }
+}
+EXPORT_SYMBOL(drm_bo_fill_rep_arg);
+
+/*
+ * Wait for buffer idle and register that we've mapped the buffer.
+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
+ * so that if the client dies, the mapping is automatically
+ * unregistered.
+ */
+
+static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
+                                uint32_t map_flags, unsigned hint,
+                                struct drm_bo_info_rep *rep)
+{
+       struct drm_buffer_object *bo;
+       struct drm_device *dev = file_priv->head->dev;
+       int ret = 0;
+       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+
+       mutex_lock(&dev->struct_mutex);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!bo)
+               return -EINVAL;
+
+       mutex_lock(&bo->mutex);
+       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+       if (ret)
+               goto out;
+
+       /*
+        * If this returns true, we are currently unmapped.
+        * We need to do this test, because unmapping can
+        * be done without the bo->mutex held.
+        */
+
+       while (1) {
+               if (atomic_inc_and_test(&bo->mapped)) {
+                       if (no_wait && drm_bo_busy(bo)) {
+                               atomic_dec(&bo->mapped);
+                               ret = -EBUSY;
+                               goto out;
+                       }
+
+                       ret = drm_bo_wait(bo, 0, 0, no_wait);
+                       if (ret) {
+                               atomic_dec(&bo->mapped);
+                               goto out;
+                       }
+
+                       if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
+                               drm_bo_evict_cached(bo);
+
+                       break;
+               } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
+
+                       /*
+                        * We are already mapped with different flags.
+                        * need to wait for unmap.
+                        */
+
+                       ret = drm_bo_wait_unmapped(bo, no_wait);
+                       if (ret)
+                               goto out;
+
+                       continue;
+               }
+               break;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret) {
+               if (atomic_add_negative(-1, &bo->mapped))
+                       wake_up_all(&bo->event_queue);
+
+       } else
+               drm_bo_fill_rep_arg(bo, rep);
+out:
+       mutex_unlock(&bo->mutex);
+       drm_bo_usage_deref_unlocked(&bo);
+       return ret;
+}
+
+static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *bo;
+       struct drm_ref_object *ro;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
+       if (!bo) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
+       if (!ro) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       drm_remove_ref_object(file_priv, ro);
+       drm_bo_usage_deref_locked(&bo);
+out:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/*
+ * Call struct-sem locked.
+ */
+
+static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
+                                        struct drm_user_object *uo,
+                                        enum drm_ref_type action)
+{
+       struct drm_buffer_object *bo =
+           drm_user_object_entry(uo, struct drm_buffer_object, base);
+
+       /*
+        * We DON'T want to take the bo->lock here, because we want to
+        * hold it when we wait for unmapped buffer.
+        */
+
+       BUG_ON(action != _DRM_REF_TYPE1);
+
+       if (atomic_add_negative(-1, &bo->mapped))
+               wake_up_all(&bo->event_queue);
+}
+
+/*
+ * bo->mutex locked.
+ * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
+ */
+
+int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
+                      int no_wait, int move_unfenced)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+       int ret = 0;
+       struct drm_bo_mem_reg mem;
+       /*
+        * Flush outstanding fences.
+        */
+
+       drm_bo_busy(bo);
+
+       /*
+        * Wait for outstanding fences.
+        */
+
+       ret = drm_bo_wait(bo, 0, 0, no_wait);
+       if (ret)
+               return ret;
+
+       mem.num_pages = bo->num_pages;
+       mem.size = mem.num_pages << PAGE_SHIFT;
+       mem.mask = new_mem_flags;
+       mem.page_alignment = bo->mem.page_alignment;
+
+       mutex_lock(&bm->evict_mutex);
+       mutex_lock(&dev->struct_mutex);
+       list_del_init(&bo->lru);
+       mutex_unlock(&dev->struct_mutex);
+
+       /*
+        * Determine where to move the buffer.
+        */
+       ret = drm_bo_mem_space(bo, &mem, no_wait);
+       if (ret)
+               goto out_unlock;
+
+       ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
+
+out_unlock:
+       mutex_lock(&dev->struct_mutex);
+       if (ret || !move_unfenced) {
+               if (mem.mm_node) {
+                       if (mem.mm_node != bo->pinned_node)
+                               drm_mm_put_block(mem.mm_node);
+                       mem.mm_node = NULL;
+               }
+               drm_bo_add_to_lru(bo);
+               if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+                       wake_up_all(&bo->event_queue);
+                       DRM_FLAG_MASKED(bo->priv_flags, 0,
+                                       _DRM_BO_FLAG_UNFENCED);
+               }
+       } else {
+               list_add_tail(&bo->lru, &bm->unfenced);
+               DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+                               _DRM_BO_FLAG_UNFENCED);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&bm->evict_mutex);
+       return ret;
+}
+
+static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
+{
+       uint32_t flag_diff = (mem->mask ^ mem->flags);
+
+       if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
+               return 0;
+       if ((flag_diff & DRM_BO_FLAG_CACHED) &&
+           (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
+            (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
+               return 0;
+
+       if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
+           ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
+            (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+               return 0;
+       return 1;
+}
+
+/*
+ * bo locked.
+ */
+
+static int drm_buffer_object_validate(struct drm_buffer_object *bo,
+                                     uint32_t fence_class,
+                                     int move_unfenced, int no_wait)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       uint32_t ftype;
+       int ret;
+
+       DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
+                 (unsigned long long) bo->mem.mask,
+                 (unsigned long long) bo->mem.flags);
+
+       ret = driver->fence_type(bo, &fence_class, &ftype);
+
+       if (ret) {
+               DRM_ERROR("Driver did not support given buffer permissions\n");
+               return ret;
+       }
+
+       /*
+        * We're switching command submission mechanism,
+        * or cannot simply rely on the hardware serializing for us.
+        *
+        * Insert a driver-dependant barrier or wait for buffer idle.
+        */
+
+       if ((fence_class != bo->fence_class) ||
+           ((ftype ^ bo->fence_type) & bo->fence_type)) {
+
+               ret = -EINVAL;
+               if (driver->command_stream_barrier) {
+                       ret = driver->command_stream_barrier(bo,
+                                                            fence_class,
+                                                            ftype,
+                                                            no_wait);
+               }
+               if (ret)
+                       ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+               if (ret)
+                       return ret;
+
+       }
+
+       bo->new_fence_class = fence_class;
+       bo->new_fence_type = ftype;
+
+       ret = drm_bo_wait_unmapped(bo, no_wait);
+       if (ret) {
+               DRM_ERROR("Timed out waiting for buffer unmap.\n");
+               return ret;
+       }
+
+       /*
+        * Check whether we need to move buffer.
+        */
+
+       if (!drm_bo_mem_compat(&bo->mem)) {
+               ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
+                                        move_unfenced);
+               if (ret) {
+                       if (ret != -EAGAIN)
+                               DRM_ERROR("Failed moving buffer.\n");
+                       if (ret == -ENOMEM)
+                               DRM_ERROR("Out of aperture space.\n");
+                       return ret;
+               }
+       }
+
+       /*
+        * Pinned buffers.
+        */
+
+       if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+               bo->pinned_mem_type = bo->mem.mem_type;
+               mutex_lock(&dev->struct_mutex);
+               list_del_init(&bo->pinned_lru);
+               drm_bo_add_to_pinned_lru(bo);
+
+               if (bo->pinned_node != bo->mem.mm_node) {
+                       if (bo->pinned_node != NULL)
+                               drm_mm_put_block(bo->pinned_node);
+                       bo->pinned_node = bo->mem.mm_node;
+               }
+
+               mutex_unlock(&dev->struct_mutex);
+
+       } else if (bo->pinned_node != NULL) {
+
+               mutex_lock(&dev->struct_mutex);
+
+               if (bo->pinned_node != bo->mem.mm_node)
+                       drm_mm_put_block(bo->pinned_node);
+
+               list_del_init(&bo->pinned_lru);
+               bo->pinned_node = NULL;
+               mutex_unlock(&dev->struct_mutex);
+
+       }
+
+       /*
+        * We might need to add a TTM.
+        */
+
+       if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
+               ret = drm_bo_add_ttm(bo);
+               if (ret)
+                       return ret;
+       }
+       DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
+
+       /*
+        * Finally, adjust lru to be sure.
+        */
+
+       mutex_lock(&dev->struct_mutex);
+       list_del(&bo->lru);
+       if (move_unfenced) {
+               list_add_tail(&bo->lru, &bm->unfenced);
+               DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+                               _DRM_BO_FLAG_UNFENCED);
+       } else {
+               drm_bo_add_to_lru(bo);
+               if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+                       wake_up_all(&bo->event_queue);
+                       DRM_FLAG_MASKED(bo->priv_flags, 0,
+                                       _DRM_BO_FLAG_UNFENCED);
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+int drm_bo_do_validate(struct drm_buffer_object *bo,
+                      uint64_t flags, uint64_t mask, uint32_t hint,
+                      uint32_t fence_class,
+                      int no_wait,
+                      struct drm_bo_info_rep *rep)
+{
+       int ret;
+
+       mutex_lock(&bo->mutex);
+       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+
+       if (ret)
+               goto out;
+
+       DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
+       ret = drm_bo_new_mask(bo, flags, mask);
+       if (ret)
+               goto out;
+
+       ret = drm_buffer_object_validate(bo,
+                                        fence_class,
+                                        !(hint & DRM_BO_HINT_DONT_FENCE),
+                                        no_wait);
+out:
+       if (rep)
+               drm_bo_fill_rep_arg(bo, rep);
+
+       mutex_unlock(&bo->mutex);
+       return ret;
+}
+EXPORT_SYMBOL(drm_bo_do_validate);
+
+
+int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
+                          uint32_t fence_class,
+                          uint64_t flags, uint64_t mask,
+                          uint32_t hint,
+                          int use_old_fence_class,
+                          struct drm_bo_info_rep *rep,
+                          struct drm_buffer_object **bo_rep)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *bo;
+       int ret;
+       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+
+       mutex_lock(&dev->struct_mutex);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!bo)
+               return -EINVAL;
+
+       if (use_old_fence_class)
+               fence_class = bo->fence_class;
+
+       /*
+        * Only allow creator to change shared buffer mask.
+        */
+
+       if (bo->base.owner != file_priv)
+               mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
+
+
+       ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
+                                no_wait, rep);
+
+       if (!ret && bo_rep)
+               *bo_rep = bo;
+       else
+               drm_bo_usage_deref_unlocked(&bo);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_bo_handle_validate);
+
+static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
+                             struct drm_bo_info_rep *rep)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *bo;
+
+       mutex_lock(&dev->struct_mutex);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!bo)
+               return -EINVAL;
+
+       mutex_lock(&bo->mutex);
+       if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+               (void)drm_bo_busy(bo);
+       drm_bo_fill_rep_arg(bo, rep);
+       mutex_unlock(&bo->mutex);
+       drm_bo_usage_deref_unlocked(&bo);
+       return 0;
+}
+
+static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
+                             uint32_t hint,
+                             struct drm_bo_info_rep *rep)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *bo;
+       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!bo)
+               return -EINVAL;
+
+       mutex_lock(&bo->mutex);
+       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+       if (ret)
+               goto out;
+       ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
+       if (ret)
+               goto out;
+
+       drm_bo_fill_rep_arg(bo, rep);
+
+out:
+       mutex_unlock(&bo->mutex);
+       drm_bo_usage_deref_unlocked(&bo);
+       return ret;
+}
+
+static int drm_bo_reserve_size(struct drm_device *dev,
+                              int user_bo,
+                              unsigned long num_pages,
+                              unsigned long *size)
+{
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+
+       *size = drm_size_align(sizeof(struct drm_buffer_object)) +
+               /* Always account for a TTM, even for fixed memory types */
+               drm_ttm_size(dev, num_pages, user_bo) +
+               /* user space mapping structure */
+               drm_size_align(sizeof(drm_local_map_t)) +
+               /* file offset space, aperture space, pinned space */
+               3*drm_size_align(sizeof(struct drm_mm_node *)) +
+               /* ttm backend */
+               driver->backend_size(dev, num_pages);
+
+       return drm_alloc_memctl(*size);
+}
+
+int drm_buffer_object_create(struct drm_device *dev,
+                            unsigned long size,
+                            enum drm_bo_type type,
+                            uint64_t mask,
+                            uint32_t hint,
+                            uint32_t page_alignment,
+                            unsigned long buffer_start,
+                            struct drm_buffer_object **buf_obj)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_buffer_object *bo;
+       int ret = 0;
+       unsigned long num_pages;
+       unsigned long reserved_size;
+
+       size += buffer_start & ~PAGE_MASK;
+       num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (num_pages == 0) {
+               DRM_ERROR("Illegal buffer object size.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_reserve_size(dev, type == drm_bo_type_user,
+                                 num_pages, &reserved_size);
+
+       if (ret) {
+               DRM_DEBUG("Failed reserving space for buffer object.\n");
+               return ret;
+       }
+
+       bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
+
+       if (!bo) {
+               drm_bo_unreserve_size(num_pages);
+               return -ENOMEM;
+       }
+
+       mutex_init(&bo->mutex);
+       mutex_lock(&bo->mutex);
+
+       bo->reserved_size = reserved_size;
+       atomic_set(&bo->usage, 1);
+       atomic_set(&bo->mapped, -1);
+       DRM_INIT_WAITQUEUE(&bo->event_queue);
+       INIT_LIST_HEAD(&bo->lru);
+       INIT_LIST_HEAD(&bo->pinned_lru);
+       INIT_LIST_HEAD(&bo->ddestroy);
+#ifdef DRM_ODD_MM_COMPAT
+       INIT_LIST_HEAD(&bo->p_mm_list);
+       INIT_LIST_HEAD(&bo->vma_list);
+#endif
+       bo->dev = dev;
+       bo->type = type;
+       bo->num_pages = num_pages;
+       bo->mem.mem_type = DRM_BO_MEM_LOCAL;
+       bo->mem.num_pages = bo->num_pages;
+       bo->mem.mm_node = NULL;
+       bo->mem.page_alignment = page_alignment;
+       bo->buffer_start = buffer_start & PAGE_MASK;
+       bo->priv_flags = 0;
+       bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+               DRM_BO_FLAG_MAPPABLE;
+       bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+               DRM_BO_FLAG_MAPPABLE;
+       atomic_inc(&bm->count);
+       ret = drm_bo_new_mask(bo, mask, mask);
+       if (ret)
+               goto out_err;
+
+       if (bo->type == drm_bo_type_dc) {
+               mutex_lock(&dev->struct_mutex);
+               ret = drm_bo_setup_vm_locked(bo);
+               mutex_unlock(&dev->struct_mutex);
+               if (ret)
+                       goto out_err;
+       }
+
+       ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
+       if (ret)
+               goto out_err;
+
+       mutex_unlock(&bo->mutex);
+       *buf_obj = bo;
+       return 0;
+
+out_err:
+       mutex_unlock(&bo->mutex);
+
+       drm_bo_usage_deref_unlocked(&bo);
+       return ret;
+}
+EXPORT_SYMBOL(drm_buffer_object_create);
+
+
+static int drm_bo_add_user_object(struct drm_file *file_priv,
+                                 struct drm_buffer_object *bo, int shareable)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_add_user_object(file_priv, &bo->base, shareable);
+       if (ret)
+               goto out;
+
+       bo->base.remove = drm_bo_base_deref_locked;
+       bo->base.type = drm_buffer_type;
+       bo->base.ref_struct_locked = NULL;
+       bo->base.unref = drm_buffer_user_object_unmap;
+
+out:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_create_arg *arg = data;
+       struct drm_bo_create_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       struct drm_buffer_object *entry;
+       enum drm_bo_type bo_type;
+       int ret = 0;
+
+       DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
+           (int)(req->size / 1024), req->page_alignment * 4);
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
+
+       if (bo_type == drm_bo_type_user)
+               req->mask &= ~DRM_BO_FLAG_SHAREABLE;
+
+       ret = drm_buffer_object_create(file_priv->head->dev,
+                                      req->size, bo_type, req->mask,
+                                      req->hint, req->page_alignment,
+                                      req->buffer_start, &entry);
+       if (ret)
+               goto out;
+
+       ret = drm_bo_add_user_object(file_priv, entry,
+                                    req->mask & DRM_BO_FLAG_SHAREABLE);
+       if (ret) {
+               drm_bo_usage_deref_unlocked(&entry);
+               goto out;
+       }
+
+       mutex_lock(&entry->mutex);
+       drm_bo_fill_rep_arg(entry, rep);
+       mutex_unlock(&entry->mutex);
+
+out:
+       return ret;
+}
+
+int drm_bo_setstatus_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_map_wait_idle_arg *arg = data;
+       struct drm_bo_info_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
+       if (ret)
+               return ret;
+
+       ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
+                                    req->flags,
+                                    req->mask,
+                                    req->hint | DRM_BO_HINT_DONT_FENCE,
+                                    1,
+                                    rep, NULL);
+
+       (void) drm_bo_read_unlock(&dev->bm.bm_lock);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_map_wait_idle_arg *arg = data;
+       struct drm_bo_info_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
+                                   req->hint, rep);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_handle_arg *arg = data;
+       int ret;
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_buffer_object_unmap(file_priv, arg->handle);
+       return ret;
+}
+
+
+int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_reference_info_arg *arg = data;
+       struct drm_bo_handle_arg *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       struct drm_user_object *uo;
+       int ret;
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_user_object_ref(file_priv, req->handle,
+                                 drm_buffer_type, &uo);
+       if (ret)
+               return ret;
+
+       ret = drm_bo_handle_info(file_priv, req->handle, rep);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_handle_arg *arg = data;
+       int ret = 0;
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
+       return ret;
+}
+
+int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_reference_info_arg *arg = data;
+       struct drm_bo_handle_arg *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_handle_info(file_priv, req->handle, rep);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_map_wait_idle_arg *arg = data;
+       struct drm_bo_info_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_handle_wait(file_priv, req->handle,
+                                req->hint, rep);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int drm_bo_leave_list(struct drm_buffer_object *bo,
+                            uint32_t mem_type,
+                            int free_pinned,
+                            int allow_errors)
+{
+       struct drm_device *dev = bo->dev;
+       int ret = 0;
+
+       mutex_lock(&bo->mutex);
+
+       ret = drm_bo_expire_fence(bo, allow_errors);
+       if (ret)
+               goto out;
+
+       if (free_pinned) {
+               DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
+               mutex_lock(&dev->struct_mutex);
+               list_del_init(&bo->pinned_lru);
+               if (bo->pinned_node == bo->mem.mm_node)
+                       bo->pinned_node = NULL;
+               if (bo->pinned_node != NULL) {
+                       drm_mm_put_block(bo->pinned_node);
+                       bo->pinned_node = NULL;
+               }
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
+               DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
+                         "cleanup. Removing flag and evicting.\n");
+               bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
+               bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+       }
+
+       if (bo->mem.mem_type == mem_type)
+               ret = drm_bo_evict(bo, mem_type, 0);
+
+       if (ret) {
+               if (allow_errors) {
+                       goto out;
+               } else {
+                       ret = 0;
+                       DRM_ERROR("Cleanup eviction failed\n");
+               }
+       }
+
+out:
+       mutex_unlock(&bo->mutex);
+       return ret;
+}
+
+
+static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
+                                        int pinned_list)
+{
+       if (pinned_list)
+               return list_entry(list, struct drm_buffer_object, pinned_lru);
+       else
+               return list_entry(list, struct drm_buffer_object, lru);
+}
+
+/*
+ * dev->struct_mutex locked.
+ */
+
+static int drm_bo_force_list_clean(struct drm_device *dev,
+                                  struct list_head *head,
+                                  unsigned mem_type,
+                                  int free_pinned,
+                                  int allow_errors,
+                                  int pinned_list)
+{
+       struct list_head *list, *next, *prev;
+       struct drm_buffer_object *entry, *nentry;
+       int ret;
+       int do_restart;
+
+       /*
+        * The list traversal is a bit odd here, because an item may
+        * disappear from the list when we release the struct_mutex or
+        * when we decrease the usage count. Also we're not guaranteed
+        * to drain pinned lists, so we can't always restart.
+        */
+
+restart:
+       nentry = NULL;
+       list_for_each_safe(list, next, head) {
+               prev = list->prev;
+
+               entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
+               atomic_inc(&entry->usage);
+               if (nentry) {
+                       atomic_dec(&nentry->usage);
+                       nentry = NULL;
+               }
+
+               /*
+                * Protect the next item from destruction, so we can check
+                * its list pointers later on.
+                */
+
+               if (next != head) {
+                       nentry = drm_bo_entry(next, pinned_list);
+                       atomic_inc(&nentry->usage);
+               }
+               mutex_unlock(&dev->struct_mutex);
+
+               ret = drm_bo_leave_list(entry, mem_type, free_pinned,
+                                       allow_errors);
+               mutex_lock(&dev->struct_mutex);
+
+               drm_bo_usage_deref_locked(&entry);
+               if (ret)
+                       return ret;
+
+               /*
+                * Has the next item disappeared from the list?
+                */
+
+               do_restart = ((next->prev != list) && (next->prev != prev));
+
+               if (nentry != NULL && do_restart)
+                       drm_bo_usage_deref_locked(&nentry);
+
+               if (do_restart)
+                       goto restart;
+       }
+       return 0;
+}
+
+int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem_type];
+       int ret = -EINVAL;
+
+       if (mem_type >= DRM_BO_MEM_TYPES) {
+               DRM_ERROR("Illegal memory type %d\n", mem_type);
+               return ret;
+       }
+
+       if (!man->has_type) {
+               DRM_ERROR("Trying to take down uninitialized "
+                         "memory manager type %u\n", mem_type);
+               return ret;
+       }
+       man->use_type = 0;
+       man->has_type = 0;
+
+       ret = 0;
+       if (mem_type > 0) {
+               BUG_ON(!list_empty(&bm->unfenced));
+               drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
+               drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
+
+               if (drm_mm_clean(&man->manager)) {
+                       drm_mm_takedown(&man->manager);
+               } else {
+                       ret = -EBUSY;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_bo_clean_mm);
+
+/**
+ *Evict all buffers of a particular mem_type, but leave memory manager
+ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
+ *point since we have the hardware lock.
+ */
+
+static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
+{
+       int ret;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem_type];
+
+       if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
+               DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
+               return -EINVAL;
+       }
+
+       if (!man->has_type) {
+               DRM_ERROR("Memory type %u has not been initialized.\n",
+                         mem_type);
+               return 0;
+       }
+
+       ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
+       if (ret)
+               return ret;
+       ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
+
+       return ret;
+}
+
+int drm_bo_init_mm(struct drm_device *dev,
+                  unsigned type,
+                  unsigned long p_offset, unsigned long p_size)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       int ret = -EINVAL;
+       struct drm_mem_type_manager *man;
+
+       if (type >= DRM_BO_MEM_TYPES) {
+               DRM_ERROR("Illegal memory type %d\n", type);
+               return ret;
+       }
+
+       man = &bm->man[type];
+       if (man->has_type) {
+               DRM_ERROR("Memory manager already initialized for type %d\n",
+                         type);
+               return ret;
+       }
+
+       ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
+       if (ret)
+               return ret;
+
+       ret = 0;
+       if (type != DRM_BO_MEM_LOCAL) {
+               if (!p_size) {
+                       DRM_ERROR("Zero size memory manager type %d\n", type);
+                       return ret;
+               }
+               ret = drm_mm_init(&man->manager, p_offset, p_size);
+               if (ret)
+                       return ret;
+       }
+       man->has_type = 1;
+       man->use_type = 1;
+
+       INIT_LIST_HEAD(&man->lru);
+       INIT_LIST_HEAD(&man->pinned);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_bo_init_mm);
+
+/*
+ * This function is intended to be called on drm driver unload.
+ * If you decide to call it from lastclose, you must protect the call
+ * from a potentially racing drm_bo_driver_init in firstopen.
+ * (This may happen on X server restart).
+ */
+
+int drm_bo_driver_finish(struct drm_device *dev)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       int ret = 0;
+       unsigned i = DRM_BO_MEM_TYPES;
+       struct drm_mem_type_manager *man;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (!bm->initialized)
+               goto out;
+       bm->initialized = 0;
+
+       while (i--) {
+               man = &bm->man[i];
+               if (man->has_type) {
+                       man->use_type = 0;
+                       if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
+                               ret = -EBUSY;
+                               DRM_ERROR("DRM memory manager type %d "
+                                         "is not clean.\n", i);
+                       }
+                       man->has_type = 0;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!cancel_delayed_work(&bm->wq))
+               flush_scheduled_work();
+
+       mutex_lock(&dev->struct_mutex);
+       drm_bo_delayed_delete(dev, 1);
+       if (list_empty(&bm->ddestroy))
+               DRM_DEBUG("Delayed destroy list was clean\n");
+
+       if (list_empty(&bm->man[0].lru))
+               DRM_DEBUG("Swap list was clean\n");
+
+       if (list_empty(&bm->man[0].pinned))
+               DRM_DEBUG("NO_MOVE list was clean\n");
+
+       if (list_empty(&bm->unfenced))
+               DRM_DEBUG("Unfenced list was clean\n");
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+       ClearPageReserved(bm->dummy_read_page);
+#endif
+       __free_page(bm->dummy_read_page);
+
+out:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(drm_bo_driver_finish);
+
+/*
+ * This function is intended to be called on drm driver load.
+ * If you decide to call it from firstopen, you must protect the call
+ * from a potentially racing drm_bo_driver_finish in lastclose.
+ * (This may happen on X server restart).
+ */
+
+int drm_bo_driver_init(struct drm_device *dev)
+{
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       struct drm_buffer_manager *bm = &dev->bm;
+       int ret = -EINVAL;
+
+       bm->dummy_read_page = NULL;
+       drm_bo_init_lock(&bm->bm_lock);
+       mutex_lock(&dev->struct_mutex);
+       if (!driver)
+               goto out_unlock;
+
+       bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+       if (!bm->dummy_read_page) {
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+       SetPageReserved(bm->dummy_read_page);
+#endif
+
+       /*
+        * Initialize the system memory buffer type.
+        * Other types need to be driver / IOCTL initialized.
+        */
+       ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
+       if (ret)
+               goto out_unlock;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+       INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
+#else
+       INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
+#endif
+       bm->initialized = 1;
+       bm->nice_mode = 1;
+       atomic_set(&bm->count, 0);
+       bm->cur_pages = 0;
+       INIT_LIST_HEAD(&bm->unfenced);
+       INIT_LIST_HEAD(&bm->ddestroy);
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(drm_bo_driver_init);
+
+int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_mm_init_arg *arg = data;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       int ret;
+
+       if (!driver) {
+               DRM_ERROR("Buffer objects are not supported by this driver\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
+       if (ret)
+               return ret;
+
+       ret = -EINVAL;
+       if (arg->magic != DRM_BO_INIT_MAGIC) {
+               DRM_ERROR("You are using an old libdrm that is not compatible with\n"
+                         "\tthe kernel DRM module. Please upgrade your libdrm.\n");
+               return -EINVAL;
+       }
+       if (arg->major != DRM_BO_INIT_MAJOR) {
+               DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
+                         "\tversion don't match. Got %d, expected %d.\n",
+                         arg->major, DRM_BO_INIT_MAJOR);
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       if (!bm->initialized) {
+               DRM_ERROR("DRM memory manager was not initialized.\n");
+               goto out;
+       }
+       if (arg->mem_type == 0) {
+               DRM_ERROR("System memory buffers already initialized.\n");
+               goto out;
+       }
+       ret = drm_bo_init_mm(dev, arg->mem_type,
+                            arg->p_offset, arg->p_size);
+
+out:
+       mutex_unlock(&dev->struct_mutex);
+       (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
+
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_mm_type_arg *arg = data;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       int ret;
+
+       if (!driver) {
+               DRM_ERROR("Buffer objects are not supported by this driver\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
+       if (ret)
+               return ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = -EINVAL;
+       if (!bm->initialized) {
+               DRM_ERROR("DRM memory manager was not initialized\n");
+               goto out;
+       }
+       if (arg->mem_type == 0) {
+               DRM_ERROR("No takedown for System memory buffers.\n");
+               goto out;
+       }
+       ret = 0;
+       if (drm_bo_clean_mm(dev, arg->mem_type)) {
+               DRM_ERROR("Memory manager type %d not clean. "
+                         "Delaying takedown\n", arg->mem_type);
+       }
+out:
+       mutex_unlock(&dev->struct_mutex);
+       (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
+
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_mm_type_arg *arg = data;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       int ret;
+
+       if (!driver) {
+               DRM_ERROR("Buffer objects are not supported by this driver\n");
+               return -EINVAL;
+       }
+
+       if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
+               DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
+               return -EINVAL;
+       }
+
+       if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
+         ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
+               if (ret)
+                       return ret;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_bo_lock_mm(dev, arg->mem_type);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret) {
+               (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
+               return ret;
+       }
+
+       return 0;
+}
+
+int drm_mm_unlock_ioctl(struct drm_device *dev,
+                       void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mm_type_arg *arg = data;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       int ret;
+
+       if (!driver) {
+               DRM_ERROR("Buffer objects are not supported by this driver\n");
+               return -EINVAL;
+       }
+
+       if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
+               ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * buffer object vm functions.
+ */
+
+int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
+
+       if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
+               if (mem->mem_type == DRM_BO_MEM_LOCAL)
+                       return 0;
+
+               if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
+                       return 0;
+
+               if (mem->flags & DRM_BO_FLAG_CACHED)
+                       return 0;
+       }
+       return 1;
+}
+EXPORT_SYMBOL(drm_mem_reg_is_pci);
+
+/**
+ * \c Get the PCI offset for the buffer object memory.
+ *
+ * \param bo The buffer object.
+ * \param bus_base On return the base of the PCI region
+ * \param bus_offset On return the byte offset into the PCI region
+ * \param bus_size On return the byte size of the buffer object or zero if
+ *     the buffer object memory is not accessible through a PCI region.
+ * \return Failure indication.
+ *
+ * Returns -EINVAL if the buffer object is currently not mappable.
+ * Otherwise returns zero.
+ */
+
+int drm_bo_pci_offset(struct drm_device *dev,
+                     struct drm_bo_mem_reg *mem,
+                     unsigned long *bus_base,
+                     unsigned long *bus_offset, unsigned long *bus_size)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
+
+       *bus_size = 0;
+       if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
+               return -EINVAL;
+
+       if (drm_mem_reg_is_pci(dev, mem)) {
+               *bus_offset = mem->mm_node->start << PAGE_SHIFT;
+               *bus_size = mem->num_pages << PAGE_SHIFT;
+               *bus_base = man->io_offset;
+       }
+
+       return 0;
+}
+
+/**
+ * \c Kill all user-space virtual mappings of this buffer object.
+ *
+ * \param bo The buffer object.
+ *
+ * Call bo->mutex locked.
+ */
+
+void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
+{
+       struct drm_device *dev = bo->dev;
+       loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
+       loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+
+       if (!dev->dev_mapping)
+               return;
+
+       unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
+}
+
+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
+{
+       struct drm_map_list *list;
+       drm_local_map_t *map;
+       struct drm_device *dev = bo->dev;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       if (bo->type != drm_bo_type_dc)
+               return;
+
+       list = &bo->map_list;
+       if (list->user_token) {
+               drm_ht_remove_item(&dev->map_hash, &list->hash);
+               list->user_token = 0;
+       }
+       if (list->file_offset_node) {
+               drm_mm_put_block(list->file_offset_node);
+               list->file_offset_node = NULL;
+       }
+
+       map = list->map;
+       if (!map)
+               return;
+
+       drm_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
+       list->map = NULL;
+       list->user_token = 0ULL;
+       drm_bo_usage_deref_locked(&bo);
+}
+
+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
+{
+       struct drm_map_list *list = &bo->map_list;
+       drm_local_map_t *map;
+       struct drm_device *dev = bo->dev;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       list->map = drm_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
+       if (!list->map)
+               return -ENOMEM;
+
+       map = list->map;
+       map->offset = 0;
+       map->type = _DRM_TTM;
+       map->flags = _DRM_REMOVABLE;
+       map->size = bo->mem.num_pages * PAGE_SIZE;
+       atomic_inc(&bo->usage);
+       map->handle = (void *)bo;
+
+       list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+                                                   bo->mem.num_pages, 0, 0);
+
+       if (!list->file_offset_node) {
+               drm_bo_takedown_vm_locked(bo);
+               return -ENOMEM;
+       }
+
+       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+                                                 bo->mem.num_pages, 0);
+       if (!list->file_offset_node) {
+               drm_bo_takedown_vm_locked(bo);
+               return -ENOMEM;
+       }
+
+       list->hash.key = list->file_offset_node->start;
+       if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
+               drm_bo_takedown_vm_locked(bo);
+               return -ENOMEM;
+       }
+
+       list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
+
+       return 0;
+}
+
+int drm_bo_version_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
+
+       arg->major = DRM_BO_INIT_MAJOR;
+       arg->minor = DRM_BO_INIT_MINOR;
+       arg->patchlevel = DRM_BO_INIT_PATCH;
+
+       return 0;
+}
diff --git a/psb-kernel-source-4.41.1/drm_bo_lock.c b/psb-kernel-source-4.41.1/drm_bo_lock.c
new file mode 100644 (file)
index 0000000..4162855
--- /dev/null
@@ -0,0 +1,189 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+/*
+ * This file implements a simple replacement for the buffer manager use
+ * of the heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode is fast, and
+ * intended for in-kernel use only.
+ * Taking it in write mode is slow.
+ *
+ * The write mode is used only when there is a need to block all
+ * user-space processes from allocating a
+ * new memory area.
+ * Typical use in write mode is X server VT switching, and it's allowed
+ * to leave kernel space with the write lock held. If a user-space process
+ * dies while having the write-lock, it will be released during the file
+ * descriptor release.
+ *
+ * The read lock is typically placed at the start of an IOCTL- or
+ * user-space callable function that may end up allocating a memory area.
+ * This includes setstatus, super-ioctls and no_pfn; the latter may move
+ * unmappable regions to mappable. It's a bug to leave kernel space with the
+ * read lock held.
+ *
+ * Both read- and write lock taking may be interruptible for low signal-delivery
+ * latency. The locking functions will return -EAGAIN if interrupted by a
+ * signal.
+ *
+ * Locking order: The lock should be taken BEFORE any kernel mutexes
+ * or spinlocks.
+ */
+
+#include "drmP.h"
+
+void drm_bo_init_lock(struct drm_bo_lock *lock)
+{
+       DRM_INIT_WAITQUEUE(&lock->queue);
+       atomic_set(&lock->write_lock_pending, 0);
+       atomic_set(&lock->readers, 0);
+}
+
+void drm_bo_read_unlock(struct drm_bo_lock *lock)
+{
+       if (atomic_dec_and_test(&lock->readers))
+               wake_up_all(&lock->queue);
+}
+EXPORT_SYMBOL(drm_bo_read_unlock);
+
+int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
+{
+       while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
+               int ret;
+
+               if (!interruptible) {
+                       wait_event(lock->queue,
+                                  atomic_read(&lock->write_lock_pending) == 0);
+                       continue;
+               }
+               ret = wait_event_interruptible
+                   (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
+               if (ret)
+                       return -EAGAIN;
+       }
+
+       while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
+               int ret;
+               if (!interruptible) {
+                       wait_event(lock->queue,
+                                  atomic_read(&lock->readers) != -1);
+                       continue;
+               }
+               ret = wait_event_interruptible
+                       (lock->queue, atomic_read(&lock->readers) != -1);
+               if (ret)
+                       return -EAGAIN;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_bo_read_lock);
+
+static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
+{
+       if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
+               return -EINVAL;
+       wake_up_all(&lock->queue);
+       return 0;
+}
+
+static void drm_bo_write_lock_remove(struct drm_file *file_priv,
+                                    struct drm_user_object *item)
+{
+       struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
+       int ret;
+
+       ret = __drm_bo_write_unlock(lock);
+       BUG_ON(ret);
+}
+
+int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
+                     struct drm_file *file_priv)
+{
+       int ret = 0;
+       struct drm_device *dev;
+
+       atomic_inc(&lock->write_lock_pending);
+
+       while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
+               if (!interruptible) {
+                       wait_event(lock->queue,
+                                  atomic_read(&lock->readers) == 0);
+                       continue;
+               }
+               ret = wait_event_interruptible
+                   (lock->queue, atomic_read(&lock->readers) == 0);
+
+               if (ret) {
+                       atomic_dec(&lock->write_lock_pending);
+                       wake_up_all(&lock->queue);
+                       return -EAGAIN;
+               }
+       }
+
+       /*
+        * Add a dummy user-object, the destructor of which will
+        * make sure the lock is released if the client dies
+        * while holding it.
+        */
+
+       if (atomic_dec_and_test(&lock->write_lock_pending))
+               wake_up_all(&lock->queue);
+       dev = file_priv->head->dev;
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_add_user_object(file_priv, &lock->base, 0);
+       lock->base.remove = &drm_bo_write_lock_remove;
+       lock->base.type = drm_lock_type;
+       if (ret)
+               (void)__drm_bo_write_unlock(lock);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_ref_object *ro;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (lock->base.owner != file_priv) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+       ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
+       BUG_ON(!ro);
+       drm_remove_ref_object(file_priv, ro);
+       lock->base.owner = NULL;
+
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
diff --git a/psb-kernel-source-4.41.1/drm_bo_move.c b/psb-kernel-source-4.41.1/drm_bo_move.c
new file mode 100644 (file)
index 0000000..b6b75f2
--- /dev/null
@@ -0,0 +1,597 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+/**
+ * Free the old memory node unless it's a pinned region and we
+ * have not been requested to free also pinned regions.
+ */
+
+static void drm_bo_free_old_node(struct drm_buffer_object *bo)
+{
+       struct drm_bo_mem_reg *old_mem = &bo->mem;
+
+       if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
+               mutex_lock(&bo->dev->struct_mutex);
+               drm_mm_put_block(old_mem->mm_node);
+               mutex_unlock(&bo->dev->struct_mutex);
+       }
+       old_mem->mm_node = NULL;
+}
+
+int drm_bo_move_ttm(struct drm_buffer_object *bo,
+                   int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
+{
+       struct drm_ttm *ttm = bo->ttm;
+       struct drm_bo_mem_reg *old_mem = &bo->mem;
+       uint64_t save_flags = old_mem->flags;
+       uint64_t save_mask = old_mem->mask;
+       int ret;
+
+       if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
+               if (evict)
+                       drm_ttm_evict(ttm);
+               else
+                       drm_ttm_unbind(ttm);
+
+               drm_bo_free_old_node(bo);
+               DRM_FLAG_MASKED(old_mem->flags,
+                               DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
+                               DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
+               old_mem->mem_type = DRM_BO_MEM_LOCAL;
+               save_flags = old_mem->flags;
+       }
+       if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
+               ret = drm_bind_ttm(ttm, new_mem);
+               if (ret)
+                       return ret;
+       }
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+       old_mem->mask = save_mask;
+       DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
+       return 0;
+}
+EXPORT_SYMBOL(drm_bo_move_ttm);
+
+/**
+ * \c Return a kernel virtual address to the buffer object PCI memory.
+ *
+ * \param bo The buffer object.
+ * \return Failure indication.
+ *
+ * Returns -EINVAL if the buffer object is currently not mappable.
+ * Returns -ENOMEM if the ioremap operation failed.
+ * Otherwise returns zero.
+ *
+ * After a successfull call, bo->iomap contains the virtual address, or NULL
+ * if the buffer object content is not accessible through PCI space.
+ * Call bo->mutex locked.
+ */
+
+int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
+                       void **virtual)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       unsigned long bus_base;
+       int ret;
+       void *addr;
+
+       *virtual = NULL;
+       ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
+       if (ret || bus_size == 0)
+               return ret;
+
+       if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
+               addr = (void *)(((u8 *) man->io_addr) + bus_offset);
+       else {
+               addr = ioremap_nocache(bus_base + bus_offset, bus_size);
+               if (!addr)
+                       return -ENOMEM;
+       }
+       *virtual = addr;
+       return 0;
+}
+EXPORT_SYMBOL(drm_mem_reg_ioremap);
+
+/**
+ * \c Unmap mapping obtained using drm_bo_ioremap
+ *
+ * \param bo The buffer object.
+ *
+ * Call bo->mutex locked.
+ */
+
+void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
+                        void *virtual)
+{
+       struct drm_buffer_manager *bm;
+       struct drm_mem_type_manager *man;
+
+       bm = &dev->bm;
+       man = &bm->man[mem->mem_type];
+
+       if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
+               iounmap(virtual);
+}
+EXPORT_SYMBOL(drm_mem_reg_iounmap);
+
+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+       uint32_t *dstP =
+           (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
+       uint32_t *srcP =
+           (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+
+       int i;
+       for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+               iowrite32(ioread32(srcP++), dstP++);
+       return 0;
+}
+
+static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
+                               unsigned long page)
+{
+       struct page *d = drm_ttm_get_page(ttm, page);
+       void *dst;
+
+       if (!d)
+               return -ENOMEM;
+
+       src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+       dst = kmap(d);
+       if (!dst)
+               return -ENOMEM;
+
+       memcpy_fromio(dst, src, PAGE_SIZE);
+       kunmap(d);
+       return 0;
+}
+
+static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
+{
+       struct page *s = drm_ttm_get_page(ttm, page);
+       void *src;
+
+       if (!s)
+               return -ENOMEM;
+
+       dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+       src = kmap(s);
+       if (!src)
+               return -ENOMEM;
+
+       memcpy_toio(dst, src, PAGE_SIZE);
+       kunmap(s);
+       return 0;
+}
+
+int drm_bo_move_memcpy(struct drm_buffer_object *bo,
+                      int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
+       struct drm_ttm *ttm = bo->ttm;
+       struct drm_bo_mem_reg *old_mem = &bo->mem;
+       struct drm_bo_mem_reg old_copy = *old_mem;
+       void *old_iomap;
+       void *new_iomap;
+       int ret;
+       uint64_t save_flags = old_mem->flags;
+       uint64_t save_mask = old_mem->mask;
+       unsigned long i;
+       unsigned long page;
+       unsigned long add = 0;
+       int dir;
+
+       ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
+       if (ret)
+               return ret;
+       ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
+       if (ret)
+               goto out;
+
+       if (old_iomap == NULL && new_iomap == NULL)
+               goto out2;
+       if (old_iomap == NULL && ttm == NULL)
+               goto out2;
+
+       add = 0;
+       dir = 1;
+
+       if ((old_mem->mem_type == new_mem->mem_type) &&
+           (new_mem->mm_node->start <
+            old_mem->mm_node->start + old_mem->mm_node->size)) {
+               dir = -1;
+               add = new_mem->num_pages - 1;
+       }
+
+       for (i = 0; i < new_mem->num_pages; ++i) {
+               page = i * dir + add;
+               if (old_iomap == NULL)
+                       ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
+               else if (new_iomap == NULL)
+                       ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
+               else
+                       ret = drm_copy_io_page(new_iomap, old_iomap, page);
+               if (ret)
+                       goto out1;
+       }
+       mb();
+out2:
+       drm_bo_free_old_node(bo);
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+       old_mem->mask = save_mask;
+       DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
+
+       if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
+               drm_ttm_unbind(ttm);
+               drm_destroy_ttm(ttm);
+               bo->ttm = NULL;
+       }
+
+out1:
+       drm_mem_reg_iounmap(dev, new_mem, new_iomap);
+out:
+       drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
+       return ret;
+}
+EXPORT_SYMBOL(drm_bo_move_memcpy);
+
+/*
+ * Transfer a buffer object's memory and LRU status to a newly
+ * created object. User-space references remains with the old
+ * object. Call bo->mutex locked.
+ */
+
+int drm_buffer_object_transfer(struct drm_buffer_object *bo,
+                              struct drm_buffer_object **new_obj)
+{
+       struct drm_buffer_object *fbo;
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+
+       fbo = drm_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
+       if (!fbo)
+               return -ENOMEM;
+
+       *fbo = *bo;
+       mutex_init(&fbo->mutex);
+       mutex_lock(&fbo->mutex);
+       mutex_lock(&dev->struct_mutex);
+
+       DRM_INIT_WAITQUEUE(&bo->event_queue);
+       INIT_LIST_HEAD(&fbo->ddestroy);
+       INIT_LIST_HEAD(&fbo->lru);
+       INIT_LIST_HEAD(&fbo->pinned_lru);
+#ifdef DRM_ODD_MM_COMPAT
+       INIT_LIST_HEAD(&fbo->vma_list);
+       INIT_LIST_HEAD(&fbo->p_mm_list);
+#endif
+
+       fbo->fence = drm_fence_reference_locked(bo->fence);
+       fbo->pinned_node = NULL;
+       fbo->mem.mm_node->private = (void *)fbo;
+       atomic_set(&fbo->usage, 1);
+       atomic_inc(&bm->count);
+       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&fbo->mutex);
+       bo->reserved_size = 0;
+       *new_obj = fbo;
+       return 0;
+}
+
+/*
+ * Since move is underway, we need to block signals in this function.
+ * We cannot restart until it has finished.
+ */
+
+int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
+                             int evict, int no_wait, uint32_t fence_class,
+                             uint32_t fence_type, uint32_t fence_flags,
+                             struct drm_bo_mem_reg *new_mem)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
+       struct drm_bo_mem_reg *old_mem = &bo->mem;
+       int ret;
+       uint64_t save_flags = old_mem->flags;
+       uint64_t save_mask = old_mem->mask;
+       struct drm_buffer_object *old_obj;
+
+       if (bo->fence)
+               drm_fence_usage_deref_unlocked(&bo->fence);
+       ret = drm_fence_object_create(dev, fence_class, fence_type,
+                                     fence_flags | DRM_FENCE_FLAG_EMIT,
+                                     &bo->fence);
+       bo->fence_type = fence_type;
+       if (ret)
+               return ret;
+
+#ifdef DRM_ODD_MM_COMPAT
+       /*
+        * In this mode, we don't allow pipelining a copy blit,
+        * since the buffer will be accessible from user space
+        * the moment we return and rebuild the page tables.
+        *
+        * With normal vm operation, page tables are rebuilt
+        * on demand using fault(), which waits for buffer idle.
+        */
+       if (1)
+#else
+       if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
+                     bo->mem.mm_node != NULL))
+#endif
+       {
+               ret = drm_bo_wait(bo, 0, 1, 0);
+               if (ret)
+                       return ret;
+
+               drm_bo_free_old_node(bo);
+
+               if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
+                       drm_ttm_unbind(bo->ttm);
+                       drm_destroy_ttm(bo->ttm);
+                       bo->ttm = NULL;
+               }
+       } else {
+
+               /* This should help pipeline ordinary buffer moves.
+                *
+                * Hang old buffer memory on a new buffer object,
+                * and leave it to be released when the GPU
+                * operation has completed.
+                */
+
+               ret = drm_buffer_object_transfer(bo, &old_obj);
+
+               if (ret)
+                       return ret;
+
+               if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
+                       old_obj->ttm = NULL;
+               else
+                       bo->ttm = NULL;
+
+               mutex_lock(&dev->struct_mutex);
+               list_del_init(&old_obj->lru);
+               DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+               drm_bo_add_to_lru(old_obj);
+
+               drm_bo_usage_deref_locked(&old_obj);
+               mutex_unlock(&dev->struct_mutex);
+
+       }
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+       old_mem->mask = save_mask;
+       DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
+       return 0;
+}
+EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
+
+int drm_bo_same_page(unsigned long offset,
+                    unsigned long offset2)
+{
+       return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
+}
+EXPORT_SYMBOL(drm_bo_same_page);
+
+unsigned long drm_bo_offset_end(unsigned long offset,
+                               unsigned long end)
+{
+       offset = (offset + PAGE_SIZE) & PAGE_MASK;
+       return (end < offset) ? end : offset;
+}
+EXPORT_SYMBOL(drm_bo_offset_end);
+
+static pgprot_t drm_kernel_io_prot(uint32_t map_type)
+{
+       pgprot_t tmp = PAGE_KERNEL;
+
+#if defined(__i386__) || defined(__x86_64__)
+#ifdef USE_PAT_WC
+#warning using pat
+       if (drm_use_pat() && map_type == _DRM_TTM) {
+               pgprot_val(tmp) |= _PAGE_PAT;
+               return tmp;
+       }
+#endif
+       if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+               pgprot_val(tmp) |= _PAGE_PCD;
+               pgprot_val(tmp) &= ~_PAGE_PWT;
+       }
+#elif defined(__powerpc__)
+       pgprot_val(tmp) |= _PAGE_NO_CACHE;
+       if (map_type == _DRM_REGISTERS)
+               pgprot_val(tmp) |= _PAGE_GUARDED;
+#endif
+#if defined(__ia64__)
+       if (map_type == _DRM_TTM)
+               tmp = pgprot_writecombine(tmp);
+       else
+               tmp = pgprot_noncached(tmp);
+#endif
+       return tmp;
+}
+
+static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
+                         unsigned long bus_offset, unsigned long bus_size,
+                         struct drm_bo_kmap_obj *map)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_bo_mem_reg *mem = &bo->mem;
+       struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
+
+       if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
+               map->bo_kmap_type = bo_map_premapped;
+               map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+       } else {
+               map->bo_kmap_type = bo_map_iomap;
+               map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
+       }
+       return (!map->virtual) ? -ENOMEM : 0;
+}
+
+static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
+                          unsigned long start_page, unsigned long num_pages,
+                          struct drm_bo_kmap_obj *map)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_bo_mem_reg *mem = &bo->mem;
+       struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
+       pgprot_t prot;
+       struct drm_ttm *ttm = bo->ttm;
+       struct page *d;
+       int i;
+
+       BUG_ON(!ttm);
+
+       if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
+
+               /*
+                * We're mapping a single page, and the desired
+                * page protection is consistent with the bo.
+                */
+
+               map->bo_kmap_type = bo_map_kmap;
+               map->page = drm_ttm_get_page(ttm, start_page);
+               map->virtual = kmap(map->page);
+       } else {
+               /*
+                * Populate the part we're mapping;
+                */
+
+               for (i = start_page; i < start_page + num_pages; ++i) {
+                       d = drm_ttm_get_page(ttm, i);
+                       if (!d)
+                               return -ENOMEM;
+               }
+
+               /*
+                * We need to use vmap to get the desired page protection
+                * or to make the buffer object look contigous.
+                */
+
+               prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
+                       PAGE_KERNEL :
+                       drm_kernel_io_prot(man->drm_bus_maptype);
+               map->bo_kmap_type = bo_map_vmap;
+               map->virtual = vmap(ttm->pages + start_page,
+                                   num_pages, 0, prot);
+       }
+       return (!map->virtual) ? -ENOMEM : 0;
+}
+
+/*
+ * This function is to be used for kernel mapping of buffer objects.
+ * It chooses the appropriate mapping method depending on the memory type
+ * and caching policy the buffer currently has.
+ * Mapping multiple pages or buffers that live in io memory is a bit slow and
+ * consumes vmalloc space. Be restrictive with such mappings.
+ * Mapping single pages usually returns the logical kernel address,
+ * (which is fast)
+ * BUG may use slower temporary mappings for high memory pages or
+ * uncached / write-combined pages.
+ *
+ * The function fills in a drm_bo_kmap_obj which can be used to return the
+ * kernel virtual address of the buffer.
+ *
+ * Code servicing a non-priviliged user request is only allowed to map one
+ * page at a time. We might need to implement a better scheme to stop such
+ * processes from consuming all vmalloc space.
+ */
+
+int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
+               unsigned long num_pages, struct drm_bo_kmap_obj *map)
+{
+       int ret;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+
+       map->virtual = NULL;
+
+       if (num_pages > bo->num_pages)
+               return -EINVAL;
+       if (start_page > bo->num_pages)
+               return -EINVAL;
+#if 0
+       if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+               return -EPERM;
+#endif
+       ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
+                               &bus_offset, &bus_size);
+
+       if (ret)
+               return ret;
+
+       if (bus_size == 0) {
+               return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
+       } else {
+               bus_offset += start_page << PAGE_SHIFT;
+               bus_size = num_pages << PAGE_SHIFT;
+               return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+       }
+}
+EXPORT_SYMBOL(drm_bo_kmap);
+
+void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
+{
+       if (!map->virtual)
+               return;
+
+       switch (map->bo_kmap_type) {
+       case bo_map_iomap:
+               iounmap(map->virtual);
+               break;
+       case bo_map_vmap:
+               vunmap(map->virtual);
+               break;
+       case bo_map_kmap:
+               kunmap(map->page);
+               break;
+       case bo_map_premapped:
+               break;
+       default:
+               BUG();
+       }
+       map->virtual = NULL;
+       map->page = NULL;
+}
+EXPORT_SYMBOL(drm_bo_kunmap);
diff --git a/psb-kernel-source-4.41.1/drm_bufs.c b/psb-kernel-source-4.41.1/drm_bufs.c
new file mode 100644 (file)
index 0000000..bfd3dd3
--- /dev/null
@@ -0,0 +1,1609 @@
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
+{
+       return pci_resource_start(dev->pdev, resource);
+}
+EXPORT_SYMBOL(drm_get_resource_start);
+
+unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
+{
+       return pci_resource_len(dev->pdev, resource);
+}
+EXPORT_SYMBOL(drm_get_resource_len);
+
+struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map)
+{
+       struct drm_map_list *entry;
+       list_for_each_entry(entry, &dev->maplist, head) {
+               if (entry->map && map->type == entry->map->type &&
+                   ((entry->map->offset == map->offset) || 
+                    ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
+                       return entry;
+               }
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL(drm_find_matching_map);
+
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
+                         unsigned long user_token, int hashed_handle)
+{
+       int use_hashed_handle;
+
+#if (BITS_PER_LONG == 64)
+       use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
+#elif (BITS_PER_LONG == 32)
+       use_hashed_handle = hashed_handle;
+#else
+#error Unsupported long size. Neither 64 nor 32 bits.
+#endif
+
+       if (!use_hashed_handle) {
+               int ret;
+               hash->key = user_token >> PAGE_SHIFT;
+               ret = drm_ht_insert_item(&dev->map_hash, hash);
+               if (ret != -EINVAL)
+                       return ret;
+       }
+       return drm_ht_just_insert_please(&dev->map_hash, hash,
+                                        user_token, 32 - PAGE_SHIFT - 3,
+                                        0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
+}
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
+                          unsigned int size, enum drm_map_type type,
+                          enum drm_map_flags flags,
+                          struct drm_map_list **maplist)
+{
+       struct drm_map *map;
+       struct drm_map_list *list;
+       drm_dma_handle_t *dmah;
+       unsigned long user_token;
+       int ret;
+
+       map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
+       if (!map)
+               return -ENOMEM;
+
+       map->offset = offset;
+       map->size = size;
+       map->flags = flags;
+       map->type = type;
+
+       /* Only allow shared memory to be removable since we only keep enough
+        * book keeping information about shared memory to allow for removal
+        * when processes fork.
+        */
+       if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+               drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+               return -EINVAL;
+       }
+       DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
+                 map->offset, map->size, map->type);
+       if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+               drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+               return -EINVAL;
+       }
+       map->mtrr = -1;
+       map->handle = NULL;
+
+       switch (map->type) {
+       case _DRM_REGISTERS:
+       case _DRM_FRAME_BUFFER:
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
+               if (map->offset + (map->size - 1) < map->offset ||
+                   map->offset < virt_to_phys(high_memory)) {
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                       return -EINVAL;
+               }
+#endif
+#ifdef __alpha__
+               map->offset += dev->hose->mem_space->start;
+#endif
+               /* Some drivers preinitialize some maps, without the X Server
+                * needing to be aware of it.  Therefore, we just return success
+                * when the server tries to create a duplicate map.
+                */
+               list = drm_find_matching_map(dev, map);
+               if (list != NULL) {
+                       if (list->map->size != map->size) {
+                               DRM_DEBUG("Matching maps of type %d with "
+                                         "mismatched sizes, (%ld vs %ld)\n",
+                                         map->type, map->size,
+                                         list->map->size);
+                               list->map->size = map->size;
+                       }
+
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                       *maplist = list;
+                       return 0;
+               }
+
+               if (drm_core_has_MTRR(dev)) {
+                       if (map->type == _DRM_FRAME_BUFFER ||
+                           (map->flags & _DRM_WRITE_COMBINING)) {
+                               map->mtrr = mtrr_add(map->offset, map->size,
+                                                    MTRR_TYPE_WRCOMB, 1);
+                       }
+               }
+               if (map->type == _DRM_REGISTERS) {
+                       map->handle = ioremap(map->offset, map->size);
+                       if (!map->handle) {
+                               drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                               return -ENOMEM;
+                       }
+               }
+               break;
+       case _DRM_SHM:
+               list = drm_find_matching_map(dev, map);
+               if (list != NULL) {
+                       if(list->map->size != map->size) {
+                               DRM_DEBUG("Matching maps of type %d with "
+                                  "mismatched sizes, (%ld vs %ld)\n",
+                                   map->type, map->size, list->map->size);
+                               list->map->size = map->size;
+                       }
+
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                       *maplist = list;
+                       return 0;
+               }
+               map->handle = vmalloc_user(map->size);
+               DRM_DEBUG("%lu %d %p\n",
+                         map->size, drm_order(map->size), map->handle);
+               if (!map->handle) {
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                       return -ENOMEM;
+               }
+               map->offset = (unsigned long)map->handle;
+               if (map->flags & _DRM_CONTAINS_LOCK) {
+                       /* Prevent a 2nd X Server from creating a 2nd lock */
+                       if (dev->lock.hw_lock != NULL) {
+                               vfree(map->handle);
+                               drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                               return -EBUSY;
+                       }
+                       dev->sigdata.lock = dev->lock.hw_lock = map->handle;    /* Pointer to lock */
+               }
+               break;
+       case _DRM_AGP: {
+               struct drm_agp_mem *entry;
+               int valid = 0;
+
+               if (!drm_core_has_AGP(dev)) {
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                       return -EINVAL;
+               }
+#ifdef __alpha__
+               map->offset += dev->hose->mem_space->start;
+#endif
+               /* In some cases (i810 driver), user space may have already
+                * added the AGP base itself, because dev->agp->base previously
+                * only got set during AGP enable.  So, only add the base
+                * address if the map's offset isn't already within the
+                * aperture.
+                */
+               if (map->offset < dev->agp->base ||
+                   map->offset > dev->agp->base +
+                   dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+                       map->offset += dev->agp->base;
+               }
+               map->mtrr = dev->agp->agp_mtrr; /* for getmap */
+
+               /* This assumes the DRM is in total control of AGP space.
+                * It's not always the case as AGP can be in the control
+                * of user space (i.e. i810 driver). So this loop will get
+                * skipped and we double check that dev->agp->memory is
+                * actually set as well as being invalid before EPERM'ing
+                */
+               list_for_each_entry(entry, &dev->agp->memory, head) {
+                       if ((map->offset >= entry->bound) &&
+                           (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
+                               valid = 1;
+                               break;
+                       }
+               }
+               if (!list_empty(&dev->agp->memory) && !valid) {
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                       return -EPERM;
+               }
+               DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
+               break;
+       }
+       case _DRM_SCATTER_GATHER:
+               if (!dev->sg) {
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                       return -EINVAL;
+               }
+               map->offset += (unsigned long)dev->sg->virtual;
+               break;
+       case _DRM_CONSISTENT:
+               /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
+                * As we're limiting the address to 2^32-1 (or less),
+                * casting it down to 32 bits is no problem, but we
+                * need to point to a 64bit variable first. */
+               dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+               if (!dmah) {
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+                       return -ENOMEM;
+               }
+               map->handle = dmah->vaddr;
+               map->offset = (unsigned long)dmah->busaddr;
+               kfree(dmah);
+               break;
+       default:
+               drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+               return -EINVAL;
+       }
+
+       list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
+       if (!list) {
+               if (map->type == _DRM_REGISTERS)
+                       iounmap(map->handle);
+               drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+               return -EINVAL;
+       }
+       memset(list, 0, sizeof(*list));
+       list->map = map;
+
+       mutex_lock(&dev->struct_mutex);
+       list_add(&list->head, &dev->maplist);
+
+       /* Assign a 32-bit handle */
+
+       user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
+               map->offset;
+       ret = drm_map_handle(dev, &list->hash, user_token, 0);
+
+       if (ret) {
+               if (map->type == _DRM_REGISTERS)
+                       iounmap(map->handle);
+               drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+               drm_free(list, sizeof(*list), DRM_MEM_MAPS);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       list->user_token = list->hash.key << PAGE_SHIFT;
+       mutex_unlock(&dev->struct_mutex);
+
+       *maplist = list;
+       return 0;
+}
+
+int drm_addmap(struct drm_device *dev, unsigned int offset,
+              unsigned int size, enum drm_map_type type,
+              enum drm_map_flags flags, drm_local_map_t ** map_ptr)
+{
+       struct drm_map_list *list;
+       int rc;
+
+       rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+       if (!rc)
+               *map_ptr = list->map;
+       return rc;
+}
+
+EXPORT_SYMBOL(drm_addmap);
+
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *maplist;
+       int err;
+
+       if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
+               return -EPERM;
+
+       err = drm_addmap_core(dev, map->offset, map->size, map->type,
+                             map->flags, &maplist);
+
+       if (err)
+               return err;
+
+       /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+       map->handle = (void *)(unsigned long)maplist->user_token;
+       return 0;
+}
+
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
+{
+       struct drm_map_list *r_list = NULL, *list_t;
+       drm_dma_handle_t dmah;
+       int found = 0;
+
+       /* Find the list entry for the map and remove it */
+       list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+               if (r_list->map == map) {
+                       list_del(&r_list->head);
+                       drm_ht_remove_key(&dev->map_hash,
+                                         r_list->user_token >> PAGE_SHIFT);
+                       drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               return -EINVAL;
+
+       /* List has wrapped around to the head pointer, or it's empty and we
+        * didn't find anything.
+        */
+
+       switch (map->type) {
+       case _DRM_REGISTERS:
+               iounmap(map->handle);
+               /* FALLTHROUGH */
+       case _DRM_FRAME_BUFFER:
+               if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+                       int retcode;
+                       retcode = mtrr_del(map->mtrr, map->offset, map->size);
+                       DRM_DEBUG("mtrr_del=%d\n", retcode);
+               }
+               break;
+       case _DRM_SHM:
+               vfree(map->handle);
+               dev->sigdata.lock = dev->lock.hw_lock = NULL;   /* SHM removed */
+               break;
+       case _DRM_AGP:
+       case _DRM_SCATTER_GATHER:
+               break;
+       case _DRM_CONSISTENT:
+               dmah.vaddr = map->handle;
+               dmah.busaddr = map->offset;
+               dmah.size = map->size;
+               __drm_pci_free(dev, &dmah);
+               break;
+       case _DRM_TTM:
+               BUG_ON(1);
+       }
+       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_rmmap_locked);
+
+int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
+{
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_rmmap_locked(dev, map);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_rmmap);
+
+/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly.  Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about.  This seems
+ * unlikely.
+ */
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_map *request = data;
+       drm_local_map_t *map = NULL;
+       struct drm_map_list *r_list;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               if (r_list->map &&
+                   r_list->user_token == (unsigned long)request->handle &&
+                   r_list->map->flags & _DRM_REMOVABLE) {
+                       map = r_list->map;
+                       break;
+               }
+       }
+
+       /* List has wrapped around to the head pointer, or its empty we didn't
+        * find anything.
+        */
+       if (list_empty(&dev->maplist) || !map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       /* Register and framebuffer maps are permanent */
+       if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+               mutex_unlock(&dev->struct_mutex);
+               return 0;
+       }
+
+       ret = drm_rmmap_locked(dev, map);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+static void drm_cleanup_buf_error(struct drm_device *dev,
+                                 struct drm_buf_entry *entry)
+{
+       int i;
+
+       if (entry->seg_count) {
+               for (i = 0; i < entry->seg_count; i++) {
+                       if (entry->seglist[i]) {
+                               drm_pci_free(dev, entry->seglist[i]);
+                       }
+               }
+               drm_free(entry->seglist,
+                        entry->seg_count *
+                        sizeof(*entry->seglist), DRM_MEM_SEGS);
+
+               entry->seg_count = 0;
+       }
+
+       if (entry->buf_count) {
+               for (i = 0; i < entry->buf_count; i++) {
+                       if (entry->buflist[i].dev_private) {
+                               drm_free(entry->buflist[i].dev_private,
+                                        entry->buflist[i].dev_priv_size,
+                                        DRM_MEM_BUFS);
+                       }
+               }
+               drm_free(entry->buflist,
+                        entry->buf_count *
+                        sizeof(*entry->buflist), DRM_MEM_BUFS);
+
+               entry->buf_count = 0;
+       }
+}
+
+#if __OS_HAS_AGP
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_agp_mem *agp_entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i, valid;
+       struct drm_buf **temp_buflist;
+
+       if (!dma)
+               return -EINVAL;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = dev->agp->base + request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       /* Make sure buffers are located in AGP memory that we own */
+       valid = 0;
+       list_for_each_entry(agp_entry, &dev->agp->memory, head) {
+               if ((agp_offset >= agp_entry->bound) &&
+                   (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+                       valid = 1;
+                       break;
+               }
+       }
+       if (!list_empty(&dev->agp->memory) && !valid) {
+               DRM_DEBUG("zone invalid\n");
+               return -EINVAL;
+       }
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+                                  DRM_MEM_BUFS);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = drm_realloc(dma->buflist,
+                                  dma->buf_count * sizeof(*dma->buflist),
+                                  (dma->buf_count + entry->buf_count)
+                                  * sizeof(*dma->buflist), DRM_MEM_BUFS);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_AGP;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+EXPORT_SYMBOL(drm_addbufs_agp);
+#endif                         /* __OS_HAS_AGP */
+
+int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int count;
+       int order;
+       int size;
+       int total;
+       int page_order;
+       struct drm_buf_entry *entry;
+       drm_dma_handle_t *dmah;
+       struct drm_buf *buf;
+       int alignment;
+       unsigned long offset;
+       int i;
+       int byte_count;
+       int page_count;
+       unsigned long *temp_pagelist;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
+                 request->count, request->size, size, order, dev->queue_count);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+                                  DRM_MEM_BUFS);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
+                                  DRM_MEM_SEGS);
+       if (!entry->seglist) {
+               drm_free(entry->buflist,
+                        count * sizeof(*entry->buflist), DRM_MEM_BUFS);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+       /* Keep the original pagelist until we know all the allocations
+        * have succeeded
+        */
+       temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
+                                 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
+       if (!temp_pagelist) {
+               drm_free(entry->buflist,
+                        count * sizeof(*entry->buflist), DRM_MEM_BUFS);
+               drm_free(entry->seglist,
+                        count * sizeof(*entry->seglist), DRM_MEM_SEGS);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memcpy(temp_pagelist,
+              dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
+       DRM_DEBUG("pagelist: %d entries\n",
+                 dma->page_count + (count << page_order));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+       byte_count = 0;
+       page_count = 0;
+
+       while (entry->buf_count < count) {
+
+               dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+
+               if (!dmah) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       entry->seg_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       drm_free(temp_pagelist,
+                                (dma->page_count + (count << page_order))
+                                * sizeof(*dma->pagelist), DRM_MEM_PAGES);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               entry->seglist[entry->seg_count++] = dmah;
+               for (i = 0; i < (1 << page_order); i++) {
+                       DRM_DEBUG("page %d @ 0x%08lx\n",
+                                 dma->page_count + page_count,
+                                 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
+                       temp_pagelist[dma->page_count + page_count++]
+                               = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
+               }
+               for (offset = 0;
+                    offset + size <= total && entry->buf_count < count;
+                    offset += alignment, ++entry->buf_count) {
+                       buf = &entry->buflist[entry->buf_count];
+                       buf->idx = dma->buf_count + entry->buf_count;
+                       buf->total = alignment;
+                       buf->order = order;
+                       buf->used = 0;
+                       buf->offset = (dma->byte_count + byte_count + offset);
+                       buf->address = (void *)(dmah->vaddr + offset);
+                       buf->bus_address = dmah->busaddr + offset;
+                       buf->next = NULL;
+                       buf->waiting = 0;
+                       buf->pending = 0;
+                       init_waitqueue_head(&buf->dma_wait);
+                       buf->file_priv = NULL;
+
+                       buf->dev_priv_size = dev->driver->dev_priv_size;
+                       buf->dev_private = drm_alloc(buf->dev_priv_size,
+                                                    DRM_MEM_BUFS);
+                       if (!buf->dev_private) {
+                               /* Set count correctly so we free the proper amount. */
+                               entry->buf_count = count;
+                               entry->seg_count = count;
+                               drm_cleanup_buf_error(dev, entry);
+                               drm_free(temp_pagelist,
+                                        (dma->page_count +
+                                         (count << page_order))
+                                        * sizeof(*dma->pagelist),
+                                        DRM_MEM_PAGES);
+                               mutex_unlock(&dev->struct_mutex);
+                               atomic_dec(&dev->buf_alloc);
+                               return -ENOMEM;
+                       }
+                       memset(buf->dev_private, 0, buf->dev_priv_size);
+
+                       DRM_DEBUG("buffer %d @ %p\n",
+                                 entry->buf_count, buf->address);
+               }
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       temp_buflist = drm_realloc(dma->buflist,
+                                  dma->buf_count * sizeof(*dma->buflist),
+                                  (dma->buf_count + entry->buf_count)
+                                  * sizeof(*dma->buflist), DRM_MEM_BUFS);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               drm_free(temp_pagelist,
+                        (dma->page_count + (count << page_order))
+                        * sizeof(*dma->pagelist), DRM_MEM_PAGES);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       /* No allocations failed, so now we can replace the orginal pagelist
+        * with the new one.
+        */
+       if (dma->page_count) {
+               drm_free(dma->pagelist,
+                        dma->page_count * sizeof(*dma->pagelist),
+                        DRM_MEM_PAGES);
+       }
+       dma->pagelist = temp_pagelist;
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += entry->seg_count << page_order;
+       dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       if (request->flags & _DRM_PCI_BUFFER_RO)
+               dma->flags = _DRM_DMA_USE_PCI_RO;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+
+}
+EXPORT_SYMBOL(drm_addbufs_pci);
+
+static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+                                  DRM_MEM_BUFS);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset
+                                       + (unsigned long)dev->sg->virtual);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = drm_realloc(dma->buflist,
+                                  dma->buf_count * sizeof(*dma->buflist),
+                                  (dma->buf_count + entry->buf_count)
+                                  * sizeof(*dma->buflist), DRM_MEM_BUFS);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_SG;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+
+int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+
+       if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       count = request->count;
+       order = drm_order(request->size);
+       size = 1 << order;
+
+       alignment = (request->flags & _DRM_PAGE_ALIGN)
+           ? PAGE_ALIGN(size) : size;
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = request->agp_start;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+                                  DRM_MEM_BUFS);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = drm_realloc(dma->buflist,
+                                  dma->buf_count * sizeof(*dma->buflist),
+                                  (dma->buf_count + entry->buf_count)
+                                  * sizeof(*dma->buflist), DRM_MEM_BUFS);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->count = entry->buf_count;
+       request->size = size;
+
+       dma->flags = _DRM_DMA_USE_FB;
+
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+EXPORT_SYMBOL(drm_addbufs_fb);
+
+
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+int drm_addbufs(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_buf_desc *request = data;
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+#if __OS_HAS_AGP
+       if (request->flags & _DRM_AGP_BUFFER)
+               ret = drm_addbufs_agp(dev, request);
+       else
+#endif
+       if (request->flags & _DRM_SG_BUFFER)
+               ret = drm_addbufs_sg(dev, request);
+       else if (request->flags & _DRM_FB_BUFFER)
+               ret = drm_addbufs_fb(dev, request);
+       else
+               ret = drm_addbufs_pci(dev, request);
+
+       return ret;
+}
+
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+int drm_infobufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_info *request = data;
+       int i;
+       int count;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       spin_lock(&dev->count_lock);
+       if (atomic_read(&dev->buf_alloc)) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       ++dev->buf_use;         /* Can't allocate more after this call */
+       spin_unlock(&dev->count_lock);
+
+       for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+               if (dma->bufs[i].buf_count)
+                       ++count;
+       }
+
+       DRM_DEBUG("count = %d\n", count);
+
+       if (request->count >= count) {
+               for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+                       if (dma->bufs[i].buf_count) {
+                               struct drm_buf_desc __user *to =
+                                   &request->list[count];
+                               struct drm_buf_entry *from = &dma->bufs[i];
+                               struct drm_freelist *list = &dma->bufs[i].freelist;
+                               if (copy_to_user(&to->count,
+                                                &from->buf_count,
+                                                sizeof(from->buf_count)) ||
+                                   copy_to_user(&to->size,
+                                                &from->buf_size,
+                                                sizeof(from->buf_size)) ||
+                                   copy_to_user(&to->low_mark,
+                                                &list->low_mark,
+                                                sizeof(list->low_mark)) ||
+                                   copy_to_user(&to->high_mark,
+                                                &list->high_mark,
+                                                sizeof(list->high_mark)))
+                                       return -EFAULT;
+
+                               DRM_DEBUG("%d %d %d %d %d\n",
+                                         i,
+                                         dma->bufs[i].buf_count,
+                                         dma->bufs[i].buf_size,
+                                         dma->bufs[i].freelist.low_mark,
+                                         dma->bufs[i].freelist.high_mark);
+                               ++count;
+                       }
+               }
+       }
+       request->count = count;
+
+       return 0;
+}
+
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+int drm_markbufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_desc *request = data;
+       int order;
+       struct drm_buf_entry *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       DRM_DEBUG("%d, %d, %d\n",
+                 request->size, request->low_mark, request->high_mark);
+       order = drm_order(request->size);
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       entry = &dma->bufs[order];
+
+       if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+               return -EINVAL;
+       if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+               return -EINVAL;
+
+       entry->freelist.low_mark = request->low_mark;
+       entry->freelist.high_mark = request->high_mark;
+
+       return 0;
+}
+
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+int drm_freebufs(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_free *request = data;
+       int i;
+       int idx;
+       struct drm_buf *buf;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       DRM_DEBUG("%d\n", request->count);
+       for (i = 0; i < request->count; i++) {
+               if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
+                       return -EFAULT;
+               if (idx < 0 || idx >= dma->buf_count) {
+                       DRM_ERROR("Index %d (of %d max)\n",
+                                 idx, dma->buf_count - 1);
+                       return -EINVAL;
+               }
+               buf = dma->buflist[idx];
+               if (buf->file_priv != file_priv) {
+                       DRM_ERROR("Process %d freeing buffer not owned\n",
+                                 current->pid);
+                       return -EINVAL;
+               }
+               drm_free_buffer(dev, buf);
+       }
+
+       return 0;
+}
+
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+int drm_mapbufs(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int retcode = 0;
+       const int zero = 0;
+       unsigned long virtual;
+       unsigned long address;
+       struct drm_buf_map *request = data;
+       int i;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               return -EINVAL;
+
+       if (!dma)
+               return -EINVAL;
+
+       spin_lock(&dev->count_lock);
+       if (atomic_read(&dev->buf_alloc)) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       dev->buf_use++;         /* Can't allocate more after this call */
+       spin_unlock(&dev->count_lock);
+
+       if (request->count >= dma->buf_count) {
+               if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+                   || (drm_core_check_feature(dev, DRIVER_SG)
+                       && (dma->flags & _DRM_DMA_USE_SG))
+                   || (drm_core_check_feature(dev, DRIVER_FB_DMA)
+                       && (dma->flags & _DRM_DMA_USE_FB))) {
+                       struct drm_map *map = dev->agp_buffer_map;
+                       unsigned long token = dev->agp_buffer_token;
+
+                       if (!map) {
+                               retcode = -EINVAL;
+                               goto done;
+                       }
+                       down_write(&current->mm->mmap_sem);
+                       virtual = do_mmap(file_priv->filp, 0, map->size,
+                                         PROT_READ | PROT_WRITE,
+                                         MAP_SHARED,
+                                         token);
+                       up_write(&current->mm->mmap_sem);
+               } else {
+                       down_write(&current->mm->mmap_sem);
+                       virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+                                         PROT_READ | PROT_WRITE,
+                                         MAP_SHARED, 0);
+                       up_write(&current->mm->mmap_sem);
+               }
+               if (virtual > -1024UL) {
+                       /* Real error */
+                       retcode = (signed long)virtual;
+                       goto done;
+               }
+               request->virtual = (void __user *)virtual;
+
+               for (i = 0; i < dma->buf_count; i++) {
+                       if (copy_to_user(&request->list[i].idx,
+                                        &dma->buflist[i]->idx,
+                                        sizeof(request->list[0].idx))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       if (copy_to_user(&request->list[i].total,
+                                        &dma->buflist[i]->total,
+                                        sizeof(request->list[0].total))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       if (copy_to_user(&request->list[i].used,
+                                        &zero, sizeof(zero))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+                       address = virtual + dma->buflist[i]->offset;    /* *** */
+                       if (copy_to_user(&request->list[i].address,
+                                        &address, sizeof(address))) {
+                               retcode = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+      done:
+       request->count = dma->buf_count;
+       DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+
+       return retcode;
+}
+
+/**
+ * Compute size order.  Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+       int order;
+       unsigned long tmp;
+
+       for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+
+       if (size & (size - 1))
+               ++order;
+
+       return order;
+}
+EXPORT_SYMBOL(drm_order);
diff --git a/psb-kernel-source-4.41.1/drm_compat.c b/psb-kernel-source-4.41.1/drm_compat.c
new file mode 100644 (file)
index 0000000..9022923
--- /dev/null
@@ -0,0 +1,778 @@
+/**************************************************************************
+ *
+ * This kernel module is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ **************************************************************************/
+/*
+ * This code provides access to unexported mm kernel features. It is necessary
+ * to use the new DRM memory manager code with kernels that don't support it
+ * directly.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *          Linux kernel mm subsystem authors.
+ *          (Most code taken from there).
+ */
+
+#include "drmP.h"
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These have bad performance in the AGP module for the indicated kernel versions.
+ */
+
+int drm_map_page_into_agp(struct page *page)
+{
+        int i;
+        i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+        /* Caller's responsibility to call global_flush_tlb() for
+         * performance reasons */
+        return i;
+}
+
+int drm_unmap_page_from_agp(struct page *page)
+{
+        int i;
+        i = change_page_attr(page, 1, PAGE_KERNEL);
+        /* Caller's responsibility to call global_flush_tlb() for
+         * performance reasons */
+        return i;
+}
+#endif
+
+
+#if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+
+/*
+ * The protection map was exported in 2.6.19
+ */
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+#ifdef MODULE
+       static pgprot_t drm_protection_map[16] = {
+               __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+               __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+       };
+
+       return drm_protection_map[vm_flags & 0x0F];
+#else
+       extern pgprot_t protection_map[];
+       return protection_map[vm_flags & 0x0F];
+#endif
+};
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * vm code for kernels below 2.6.15 in which version a major vm write
+ * occured. This implement a simple straightforward
+ * version similar to what's going to be
+ * in kernel 2.6.19+
+ * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
+ * nopfn.
+ */
+
+static struct {
+       spinlock_t lock;
+       struct page *dummy_page;
+       atomic_t present;
+} drm_np_retry =
+{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
+
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+                                   struct fault_data *data);
+
+
+struct page * get_nopage_retry(void)
+{
+       if (atomic_read(&drm_np_retry.present) == 0) {
+               struct page *page = alloc_page(GFP_KERNEL);
+               if (!page)
+                       return NOPAGE_OOM;
+               spin_lock(&drm_np_retry.lock);
+               drm_np_retry.dummy_page = page;
+               atomic_set(&drm_np_retry.present,1);
+               spin_unlock(&drm_np_retry.lock);
+       }
+       get_page(drm_np_retry.dummy_page);
+       return drm_np_retry.dummy_page;
+}
+
+void free_nopage_retry(void)
+{
+       if (atomic_read(&drm_np_retry.present) == 1) {
+               spin_lock(&drm_np_retry.lock);
+               __free_page(drm_np_retry.dummy_page);
+               drm_np_retry.dummy_page = NULL;
+               atomic_set(&drm_np_retry.present, 0);
+               spin_unlock(&drm_np_retry.lock);
+       }
+}
+
+struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
+                              unsigned long address,
+                              int *type)
+{
+       struct fault_data data;
+
+       if (type)
+               *type = VM_FAULT_MINOR;
+
+       data.address = address;
+       data.vma = vma;
+       drm_bo_vm_fault(vma, &data);
+       switch (data.type) {
+       case VM_FAULT_OOM:
+               return NOPAGE_OOM;
+       case VM_FAULT_SIGBUS:
+               return NOPAGE_SIGBUS;
+       default:
+               break;
+       }
+
+       return NOPAGE_REFAULT;
+}
+
+#endif
+
+#if !defined(DRM_FULL_MM_COMPAT) && \
+  ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
+   (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
+
+static int drm_pte_is_clear(struct vm_area_struct *vma,
+                           unsigned long addr)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       int ret = 1;
+       pte_t *pte;
+       pmd_t *pmd;
+       pud_t *pud;
+       pgd_t *pgd;
+
+       spin_lock(&mm->page_table_lock);
+       pgd = pgd_offset(mm, addr);
+       if (pgd_none(*pgd))
+               goto unlock;
+       pud = pud_offset(pgd, addr);
+        if (pud_none(*pud))
+               goto unlock;
+       pmd = pmd_offset(pud, addr);
+       if (pmd_none(*pmd))
+               goto unlock;
+       pte = pte_offset_map(pmd, addr);
+       if (!pte)
+               goto unlock;
+       ret = pte_none(*pte);
+       pte_unmap(pte);
+ unlock:
+       spin_unlock(&mm->page_table_lock);
+       return ret;
+}
+
+static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+                 unsigned long pfn)
+{
+       int ret;
+       if (!drm_pte_is_clear(vma, addr))
+               return -EBUSY;
+
+       ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
+       return ret;
+}
+
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+                                   struct fault_data *data)
+{
+       unsigned long address = data->address;
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+       unsigned long page_offset;
+       struct page *page = NULL;
+       struct drm_ttm *ttm;
+       struct drm_device *dev;
+       unsigned long pfn;
+       int err;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+
+       dev = bo->dev;
+       drm_bo_read_lock(&dev->bm.bm_lock, 0);
+
+       mutex_lock(&bo->mutex);
+
+       err = drm_bo_wait(bo, 0, 1, 0);
+       if (err) {
+               data->type = (err == -EAGAIN) ?
+                       VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+
+       /*
+        * If buffer happens to be in a non-mappable location,
+        * move it to a mappable.
+        */
+
+       if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+               unsigned long _end = jiffies + 3*DRM_HZ;
+               uint32_t new_mask = bo->mem.mask |
+                       DRM_BO_FLAG_MAPPABLE |
+                       DRM_BO_FLAG_FORCE_MAPPABLE;
+
+               do {
+                       err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+               } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
+
+               if (err) {
+                       DRM_ERROR("Timeout moving buffer to mappable location.\n");
+                       data->type = VM_FAULT_SIGBUS;
+                       goto out_unlock;
+               }
+       }
+
+       if (address > vma->vm_end) {
+               data->type = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       dev = bo->dev;
+       err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+                               &bus_size);
+
+       if (err) {
+               data->type = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+       if (bus_size) {
+               struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
+
+               pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+               vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
+       } else {
+               ttm = bo->ttm;
+
+               drm_ttm_fixup_caching(ttm);
+               page = drm_ttm_get_page(ttm, page_offset);
+               if (!page) {
+                       data->type = VM_FAULT_OOM;
+                       goto out_unlock;
+               }
+               pfn = page_to_pfn(page);
+               vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
+                       vm_get_page_prot(vma->vm_flags) :
+                       drm_io_prot(_DRM_TTM, vma);
+       }
+
+       err = vm_insert_pfn(vma, address, pfn);
+
+       if (!err || err == -EBUSY)
+               data->type = VM_FAULT_MINOR;
+       else
+               data->type = VM_FAULT_OOM;
+out_unlock:
+       mutex_unlock(&bo->mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
+       return NULL;
+}
+
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
+  !defined(DRM_FULL_MM_COMPAT)
+
+/**
+ */
+
+unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
+                          unsigned long address)
+{
+       struct fault_data data;
+       data.address = address;
+
+       (void) drm_bo_vm_fault(vma, &data);
+       if (data.type == VM_FAULT_OOM)
+               return NOPFN_OOM;
+       else if (data.type == VM_FAULT_SIGBUS)
+               return NOPFN_SIGBUS;
+
+       /*
+        * pfn already set.
+        */
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_bo_vm_nopfn);
+#endif
+
+
+#ifdef DRM_ODD_MM_COMPAT
+
+/*
+ * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
+ * workaround for a single BUG statement in do_no_page in these versions. The
+ * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
+ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
+ * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
+ * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
+ * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
+ * phew.
+ */
+
+typedef struct p_mm_entry {
+       struct list_head head;
+       struct mm_struct *mm;
+       atomic_t refcount;
+        int locked;
+} p_mm_entry_t;
+
+typedef struct vma_entry {
+       struct list_head head;
+       struct vm_area_struct *vma;
+} vma_entry_t;
+
+
+struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
+                              unsigned long address,
+                              int *type)
+{
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+       unsigned long page_offset;
+       struct page *page;
+       struct drm_ttm *ttm;
+       struct drm_device *dev;
+
+       mutex_lock(&bo->mutex);
+
+       if (type)
+               *type = VM_FAULT_MINOR;
+
+       if (address > vma->vm_end) {
+               page = NOPAGE_SIGBUS;
+               goto out_unlock;
+       }
+
+       dev = bo->dev;
+
+       if (drm_mem_reg_is_pci(dev, &bo->mem)) {
+               DRM_ERROR("Invalid compat nopage.\n");
+               page = NOPAGE_SIGBUS;
+               goto out_unlock;
+       }
+
+       ttm = bo->ttm;
+       drm_ttm_fixup_caching(ttm);
+       page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+       page = drm_ttm_get_page(ttm, page_offset);
+       if (!page) {
+               page = NOPAGE_OOM;
+               goto out_unlock;
+       }
+
+       get_page(page);
+out_unlock:
+       mutex_unlock(&bo->mutex);
+       return page;
+}
+
+
+
+
+int drm_bo_map_bound(struct vm_area_struct *vma)
+{
+       struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
+       int ret = 0;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+
+       ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
+                               &bus_offset, &bus_size);
+       BUG_ON(ret);
+
+       if (bus_size) {
+               struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
+               unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
+               pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
+               ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
+                                        vma->vm_end - vma->vm_start,
+                                        pgprot);
+       }
+
+       return ret;
+}
+
+
+int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
+{
+       p_mm_entry_t *entry, *n_entry;
+       vma_entry_t *v_entry;
+       struct mm_struct *mm = vma->vm_mm;
+
+       v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
+       if (!v_entry) {
+               DRM_ERROR("Allocation of vma pointer entry failed\n");
+               return -ENOMEM;
+       }
+       v_entry->vma = vma;
+
+       list_add_tail(&v_entry->head, &bo->vma_list);
+
+       list_for_each_entry(entry, &bo->p_mm_list, head) {
+               if (mm == entry->mm) {
+                       atomic_inc(&entry->refcount);
+                       return 0;
+               } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
+       }
+
+       n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
+       if (!n_entry) {
+               DRM_ERROR("Allocation of process mm pointer entry failed\n");
+               return -ENOMEM;
+       }
+       INIT_LIST_HEAD(&n_entry->head);
+       n_entry->mm = mm;
+       n_entry->locked = 0;
+       atomic_set(&n_entry->refcount, 0);
+       list_add_tail(&n_entry->head, &entry->head);
+
+       return 0;
+}
+
+void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
+{
+       p_mm_entry_t *entry, *n;
+       vma_entry_t *v_entry, *v_n;
+       int found = 0;
+       struct mm_struct *mm = vma->vm_mm;
+
+       list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
+               if (v_entry->vma == vma) {
+                       found = 1;
+                       list_del(&v_entry->head);
+                       drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
+                       break;
+               }
+       }
+       BUG_ON(!found);
+
+       list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
+               if (mm == entry->mm) {
+                       if (atomic_add_negative(-1, &entry->refcount)) {
+                               list_del(&entry->head);
+                               BUG_ON(entry->locked);
+                               drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
+                       }
+                       return;
+               }
+       }
+       BUG_ON(1);
+}
+
+
+
+int drm_bo_lock_kmm(struct drm_buffer_object * bo)
+{
+       p_mm_entry_t *entry;
+       int lock_ok = 1;
+
+       list_for_each_entry(entry, &bo->p_mm_list, head) {
+               BUG_ON(entry->locked);
+               if (!down_write_trylock(&entry->mm->mmap_sem)) {
+                       lock_ok = 0;
+                       break;
+               }
+               entry->locked = 1;
+       }
+
+       if (lock_ok)
+               return 0;
+
+       list_for_each_entry(entry, &bo->p_mm_list, head) {
+               if (!entry->locked)
+                       break;
+               up_write(&entry->mm->mmap_sem);
+               entry->locked = 0;
+       }
+
+       /*
+        * Possible deadlock. Try again. Our callers should handle this
+        * and restart.
+        */
+
+       return -EAGAIN;
+}
+
+void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
+{
+       p_mm_entry_t *entry;
+
+       list_for_each_entry(entry, &bo->p_mm_list, head) {
+               BUG_ON(!entry->locked);
+               up_write(&entry->mm->mmap_sem);
+               entry->locked = 0;
+       }
+}
+
+int drm_bo_remap_bound(struct drm_buffer_object *bo)
+{
+       vma_entry_t *v_entry;
+       int ret = 0;
+
+       if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
+               list_for_each_entry(v_entry, &bo->vma_list, head) {
+                       ret = drm_bo_map_bound(v_entry->vma);
+                       if (ret)
+                               break;
+               }
+       }
+
+       return ret;
+}
+
+void drm_bo_finish_unmap(struct drm_buffer_object *bo)
+{
+       vma_entry_t *v_entry;
+
+       list_for_each_entry(v_entry, &bo->vma_list, head) {
+               v_entry->vma->vm_flags &= ~VM_PFNMAP;
+       }
+}
+
+#endif
+
+#ifdef USE_PAT_WC
+#include <asm/tlbflush.h>
+
+static int drm_has_pat = 0;
+
+int drm_use_pat(void)
+{
+       return drm_has_pat;
+}
+
+static void drm_pat_ipi_handler(void *notused)
+{
+        u32 v1, v2;
+
+       rdmsr(MSR_IA32_CR_PAT, v1, v2);
+       v2 &= 0xFFFFFFF8;
+       v2 |= 0x00000001;
+       wbinvd();
+       wrmsr(MSR_IA32_CR_PAT, v1, v2);
+       __flush_tlb_all();
+}
+
+/*
+ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
+ */
+
+void drm_init_pat(void)
+{
+
+       if (!boot_cpu_has(X86_FEATURE_PAT)) {
+               return;
+       }
+
+        #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+        if (on_each_cpu(drm_pat_ipi_handler,NULL,1) != 0) {
+        #else        
+       if (on_each_cpu(drm_pat_ipi_handler, NULL, 1, 1) != 0) {
+        #endif
+               DRM_ERROR("Timed out setting up CPU PAT.\n");
+               return;
+       }       
+       drm_has_pat = 1;
+}
+EXPORT_SYMBOL(drm_init_pat);
+#endif
+
+#ifdef DRM_IDR_COMPAT_FN
+/* only called when idp->lock is held */
+static void __free_layer(struct idr *idp, struct idr_layer *p)
+{
+       p->ary[0] = idp->id_free;
+       idp->id_free = p;
+       idp->id_free_cnt++;
+}
+
+static void free_layer(struct idr *idp, struct idr_layer *p)
+{
+       unsigned long flags;
+
+       /*
+        * Depends on the return element being zeroed.
+        */
+       spin_lock_irqsave(&idp->lock, flags);
+       __free_layer(idp, p);
+       spin_unlock_irqrestore(&idp->lock, flags);
+}
+
+/**
+ * idr_for_each - iterate through all stored pointers
+ * @idp: idr handle
+ * @fn: function to be called for each pointer
+ * @data: data passed back to callback function
+ *
+ * Iterate over the pointers registered with the given idr.  The
+ * callback function will be called for each pointer currently
+ * registered, passing the id, the pointer and the data pointer passed
+ * to this function.  It is not safe to modify the idr tree while in
+ * the callback, so functions such as idr_get_new and idr_remove are
+ * not allowed.
+ *
+ * We check the return of @fn each time. If it returns anything other
+ * than 0, we break out and return that value.
+ *
+* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
+ */
+int idr_for_each(struct idr *idp,
+                int (*fn)(int id, void *p, void *data), void *data)
+{
+       int n, id, max, error = 0;
+       struct idr_layer *p;
+       struct idr_layer *pa[MAX_LEVEL];
+       struct idr_layer **paa = &pa[0];
+
+       n = idp->layers * IDR_BITS;
+       p = idp->top;
+       max = 1 << n;
+
+       id = 0;
+       while (id < max) {
+               while (n > 0 && p) {
+                       n -= IDR_BITS;
+                       *paa++ = p;
+                       p = p->ary[(id >> n) & IDR_MASK];
+               }
+
+               if (p) {
+                       error = fn(id, (void *)p, data);
+                       if (error)
+                               break;
+               }
+
+               id += 1 << n;
+               while (n < fls(id)) {
+                       n += IDR_BITS;
+                       p = *--paa;
+               }
+       }
+
+       return error;
+}
+EXPORT_SYMBOL(idr_for_each);
+
+/**
+ * idr_remove_all - remove all ids from the given idr tree
+ * @idp: idr handle
+ *
+ * idr_destroy() only frees up unused, cached idp_layers, but this
+ * function will remove all id mappings and leave all idp_layers
+ * unused.
+ *
+ * A typical clean-up sequence for objects stored in an idr tree, will
+ * use idr_for_each() to free all objects, if necessay, then
+ * idr_remove_all() to remove all ids, and idr_destroy() to free
+ * up the cached idr_layers.
+ */
+void idr_remove_all(struct idr *idp)
+{
+       int n, id, max, error = 0;
+       struct idr_layer *p;
+       struct idr_layer *pa[MAX_LEVEL];
+       struct idr_layer **paa = &pa[0];
+
+       n = idp->layers * IDR_BITS;
+       p = idp->top;
+       max = 1 << n;
+
+       id = 0;
+       while (id < max && !error) {
+               while (n > IDR_BITS && p) {
+                       n -= IDR_BITS;
+                       *paa++ = p;
+                       p = p->ary[(id >> n) & IDR_MASK];
+               }
+
+               id += 1 << n;
+               while (n < fls(id)) {
+                       if (p) {
+                               memset(p, 0, sizeof *p);
+                               free_layer(idp, p);
+                       }
+                       n += IDR_BITS;
+                       p = *--paa;
+               }
+       }
+       idp->top = NULL;
+       idp->layers = 0;
+}
+EXPORT_SYMBOL(idr_remove_all);
+
+#endif /* DRM_IDR_COMPAT_FN */
+
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+/**
+ * idr_replace - replace pointer for given id
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the id
+ * @id: lookup key
+ *
+ * Replace the pointer registered with an id and return the old value.
+ * A -ENOENT return indicates that @id was not found.
+ * A -EINVAL return indicates that @id was not within valid constraints.
+ *
+ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ */
+void *idr_replace(struct idr *idp, void *ptr, int id)
+{
+       int n;
+       struct idr_layer *p, *old_p;
+
+       n = idp->layers * IDR_BITS;
+       p = idp->top;
+
+       id &= MAX_ID_MASK;
+
+       if (id >= (1 << n))
+               return ERR_PTR(-EINVAL);
+
+       n -= IDR_BITS;
+       while ((n > 0) && p) {
+               p = p->ary[(id >> n) & IDR_MASK];
+               n -= IDR_BITS;
+       }
+
+       n = id & IDR_MASK;
+       if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+               return ERR_PTR(-ENOENT);
+
+       old_p = p->ary[n];
+       p->ary[n] = ptr;
+
+       return (void *)old_p;
+}
+EXPORT_SYMBOL(idr_replace);
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_compat.h b/psb-kernel-source-4.41.1/drm_compat.h
new file mode 100644 (file)
index 0000000..4895635
--- /dev/null
@@ -0,0 +1,383 @@
+/**
+ * \file drm_compat.h
+ * Backward compatability definitions for Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_COMPAT_H_
+#define _DRM_COMPAT_H_
+
+#ifndef minor
+#define minor(x) MINOR((x))
+#endif
+
+#ifndef MODULE_LICENSE
+#define MODULE_LICENSE(x)
+#endif
+
+#ifndef preempt_disable
+#define preempt_disable()
+#define preempt_enable()
+#endif
+
+#ifndef pte_offset_map
+#define pte_offset_map pte_offset
+#define pte_unmap(pte)
+#endif
+
+#ifndef module_param
+#define module_param(name, type, perm)
+#endif
+
+/* older kernels had different irq args */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+#undef DRM_IRQ_ARGS
+#define DRM_IRQ_ARGS           int irq, void *arg, struct pt_regs *regs
+
+typedef _Bool bool;
+enum {
+        false   = 0,
+        true    = 1
+};
+
+#endif
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head)                               \
+       for (pos = (head)->next, n = pos->next; pos != (head);          \
+               pos = n, n = pos->next)
+#endif
+
+#ifndef list_for_each_entry
+#define list_for_each_entry(pos, head, member)                         \
+       for (pos = list_entry((head)->next, typeof(*pos), member),      \
+                    prefetch(pos->member.next);                                \
+            &pos->member != (head);                                    \
+            pos = list_entry(pos->member.next, typeof(*pos), member),  \
+                    prefetch(pos->member.next))
+#endif
+
+#ifndef list_for_each_entry_safe
+#define list_for_each_entry_safe(pos, n, head, member)                  \
+        for (pos = list_entry((head)->next, typeof(*pos), member),      \
+                n = list_entry(pos->member.next, typeof(*pos), member); \
+             &pos->member != (head);                                    \
+             pos = n, n = list_entry(n->member.next, typeof(*n), member))
+#endif
+
+#ifndef __user
+#define __user
+#endif
+
+#if !defined(__put_page)
+#define __put_page(p)           atomic_dec(&(p)->count)
+#endif
+
+#if !defined(__GFP_COMP)
+#define __GFP_COMP 0
+#endif
+
+#if !defined(IRQF_SHARED)
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
+{
+  return remap_page_range(vma, from,
+                         pfn << PAGE_SHIFT,
+                         size,
+                         pgprot);
+}
+
+static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
+{
+       void *addr;
+
+       addr = kmalloc(size * nmemb, flags);
+       if (addr != NULL)
+               memset((void *)addr, 0, size * nmemb);
+
+       return addr;
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+#define mutex_lock down
+#define mutex_unlock up
+
+#define mutex semaphore
+
+#define mutex_init(a) sema_init((a), 1)
+
+#endif
+
+#ifndef DEFINE_SPINLOCK
+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
+#endif
+
+/* old architectures */
+#ifdef __AMD64__
+#define __x86_64__
+#endif
+
+/* sysfs __ATTR macro */
+#ifndef __ATTR
+#define __ATTR(_name,_mode,_show,_store) { \
+        .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE },     \
+        .show   = _show,                                        \
+        .store  = _store,                                       \
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+#define vmalloc_user(_size) ({void * tmp = vmalloc(_size);   \
+      if (tmp) memset(tmp, 0, _size);                       \
+      (tmp);})
+#endif
+
+#ifndef list_for_each_entry_safe_reverse
+#define list_for_each_entry_safe_reverse(pos, n, head, member)          \
+        for (pos = list_entry((head)->prev, typeof(*pos), member),      \
+                n = list_entry(pos->member.prev, typeof(*pos), member); \
+             &pos->member != (head);                                    \
+             pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+#endif
+
+#include <linux/mm.h>
+#include <asm/page.h>
+
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
+     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
+#define DRM_ODD_MM_COMPAT
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
+#define DRM_FULL_MM_COMPAT
+#endif
+
+
+/*
+ * Flush relevant caches and clear a VMA structure so that page references
+ * will cause a page fault. Don't flush tlbs.
+ */
+
+extern void drm_clear_vma(struct vm_area_struct *vma,
+                         unsigned long addr, unsigned long end);
+
+/*
+ * Return the PTE protection map entries for the VMA flags given by
+ * flags. This is a functional interface to the kernel's protection map.
+ */
+
+extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
+
+#ifndef GFP_DMA32
+#define GFP_DMA32 GFP_KERNEL
+#endif
+#ifndef __GFP_DMA32
+#define __GFP_DMA32 GFP_KERNEL
+#endif
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These are too slow in earlier kernels.
+ */
+
+extern int drm_unmap_page_from_agp(struct page *page);
+extern int drm_map_page_into_agp(struct page *page);
+
+#define map_page_into_agp drm_map_page_into_agp
+#define unmap_page_from_agp drm_unmap_page_from_agp
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+extern struct page *get_nopage_retry(void);
+extern void free_nopage_retry(void);
+
+#define NOPAGE_REFAULT get_nopage_retry()
+#endif
+
+
+#ifndef DRM_FULL_MM_COMPAT
+
+/*
+ * For now, just return a dummy page that we've allocated out of
+ * static space. The page will be put by do_nopage() since we've already
+ * filled out the pte.
+ */
+
+struct fault_data {
+       struct vm_area_struct *vma;
+       unsigned long address;
+       pgoff_t pgoff;
+       unsigned int flags;
+
+       int type;
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
+                                    unsigned long address,
+                                    int *type);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
+  !defined(DRM_FULL_MM_COMPAT)
+extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
+                                    unsigned long address);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
+#endif /* ndef DRM_FULL_MM_COMPAT */
+
+#ifdef DRM_ODD_MM_COMPAT
+
+struct drm_buffer_object;
+
+
+/*
+ * Add a vma to the ttm vma list, and the
+ * process mm pointer to the ttm mm list. Needs the ttm mutex.
+ */
+
+extern int drm_bo_add_vma(struct drm_buffer_object * bo,
+                          struct vm_area_struct *vma);
+/*
+ * Delete a vma and the corresponding mm pointer from the
+ * ttm lists. Needs the ttm mutex.
+ */
+extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
+                             struct vm_area_struct *vma);
+
+/*
+ * Attempts to lock all relevant mmap_sems for a ttm, while
+ * not releasing the ttm mutex. May return -EAGAIN to avoid
+ * deadlocks. In that case the caller shall release the ttm mutex,
+ * schedule() and try again.
+ */
+
+extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
+
+/*
+ * Unlock all relevant mmap_sems for a ttm.
+ */
+extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
+
+/*
+ * If the ttm was bound to the aperture, this function shall be called
+ * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
+ * vmas mapping this ttm. This is needed just after unmapping the ptes of
+ * the vma, otherwise the do_nopage() function will bug :(. The function
+ * releases the mmap_sems for this ttm.
+ */
+
+extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
+
+/*
+ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
+ * fault these pfns in, because the first one will set the vma VM_PFNMAP
+ * flag, which will make the next fault bug in do_nopage(). The function
+ * releases the mmap_sems for this ttm.
+ */
+
+extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
+
+
+/*
+ * Remap a vma for a bound ttm. Call with the ttm mutex held and
+ * the relevant mmap_sem locked.
+ */
+extern int drm_bo_map_bound(struct vm_area_struct *vma);
+
+#endif
+
+#if (defined(CONFIG_X86) && defined(X86_FEATURE_PAT) && defined(X86_FEATURE_MSR))
+
+/*
+ * Use the i386 Page Attribute Table for write-combining.
+ * We can't include this code in the kernel submission, since some of it
+ * belongs at kernel startup.
+ */
+
+#define USE_PAT_WC
+#else
+#undef USE_PAT_WC
+#endif
+
+#ifdef USE_PAT_WC
+#ifndef MSR_IA32_CR_PAT
+#define MSR_IA32_CR_PAT 0x0277
+#endif
+
+#ifndef _PAGE_PAT
+#define _PAGE_PAT 0x080                /* Note that this is the same value as _PAGE_PROTNONE */
+
+#endif
+
+extern void drm_init_pat(void);
+extern int drm_use_pat(void);
+
+#endif
+
+/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
+//#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && \
+                               (LINUX_VERSION_CODE != KERNEL_VERSION(2,6,21)))
+#define DRM_IDR_COMPAT_FN
+#endif
+#ifdef DRM_IDR_COMPAT_FN
+int idr_for_each(struct idr *idp,
+                int (*fn)(int id, void *p, void *data), void *data);
+void idr_remove_all(struct idr *idp);
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+void *idr_replace(struct idr *idp, void *ptr, int id);
+#endif
+
+#ifndef VM_CAN_NONLINEAR
+#define DRM_VM_NOPAGE 1
+#endif
+
+#ifdef DRM_VM_NOPAGE
+
+extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
+                                 unsigned long address, int *type);
+
+extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
+                                     unsigned long address, int *type);
+
+extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
+                                     unsigned long address, int *type);
+
+extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
+                                    unsigned long address, int *type);
+#endif
+
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_context.c b/psb-kernel-source-4.41.1/drm_context.c
new file mode 100644 (file)
index 0000000..83ad291
--- /dev/null
@@ -0,0 +1,472 @@
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * ChangeLog:
+ *  2001-11-16 Torsten Duwe <duwe@caldera.de>
+ *             added context constructor/destructor hooks,
+ *             needed by SiS driver's memory management.
+ */
+
+#include "drmP.h"
+
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_remove(&dev->ctx_idr, ctx_handle);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
+ */
+static int drm_ctxbitmap_next(struct drm_device *dev)
+{
+       int new_id;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+       mutex_lock(&dev->struct_mutex);
+       ret = idr_get_new_above(&dev->ctx_idr, NULL,
+                               DRM_RESERVED_CONTEXTS, &new_id);
+       if (ret == -EAGAIN) {
+               mutex_unlock(&dev->struct_mutex);
+               goto again;
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+       return new_id;
+}
+
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device *dev)
+{
+       idr_init(&dev->ctx_idr);
+       return 0;
+}
+
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device *dev)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_remove_all(&dev->ctx_idr);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
+
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+int drm_getsareactx(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_ctx_priv_map *request = data;
+       struct drm_map *map;
+       struct drm_map_list *_entry;
+
+       mutex_lock(&dev->struct_mutex);
+
+       map = idr_find(&dev->ctx_idr, request->ctx_id);
+       if (!map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+
+       request->handle = NULL;
+       list_for_each_entry(_entry, &dev->maplist, head) {
+               if (_entry->map == map) {
+                       request->handle =
+                           (void *)(unsigned long)_entry->user_token;
+                       break;
+               }
+       }
+       if (request->handle == NULL)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
+ */
+int drm_setsareactx(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_ctx_priv_map *request = data;
+       struct drm_map *map = NULL;
+       struct drm_map_list *r_list = NULL;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               if (r_list->map
+                   && r_list->user_token == (unsigned long) request->handle)
+                       goto found;
+       }
+      bad:
+       mutex_unlock(&dev->struct_mutex);
+       return -EINVAL;
+
+      found:
+       map = r_list->map;
+       if (!map)
+               goto bad;
+
+       if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
+               goto bad;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
+ */
+static int drm_context_switch(struct drm_device *dev, int old, int new)
+{
+       if (test_and_set_bit(0, &dev->context_flag)) {
+               DRM_ERROR("Reentering -- FIXME\n");
+               return -EBUSY;
+       }
+
+       DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+       if (new == dev->last_context) {
+               clear_bit(0, &dev->context_flag);
+               return 0;
+       }
+
+       return 0;
+}
+
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device *dev, int new)
+{
+       dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
+       dev->last_switch = jiffies;
+
+       if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+               DRM_ERROR("Lock isn't held after context switch\n");
+       }
+
+       /* If a context switch is ever initiated
+          when the kernel holds the lock, release
+          that lock here. */
+       clear_bit(0, &dev->context_flag);
+       wake_up(&dev->context_wait);
+
+       return 0;
+}
+
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_resctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx_res *res = data;
+       struct drm_ctx ctx;
+       int i;
+
+       if (res->count >= DRM_RESERVED_CONTEXTS) {
+               memset(&ctx, 0, sizeof(ctx));
+               for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+                       ctx.handle = i;
+                       if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
+                               return -EFAULT;
+               }
+       }
+       res->count = DRM_RESERVED_CONTEXTS;
+
+       return 0;
+}
+
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+int drm_addctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx_list *ctx_entry;
+       struct drm_ctx *ctx = data;
+
+       ctx->handle = drm_ctxbitmap_next(dev);
+       if (ctx->handle == DRM_KERNEL_CONTEXT) {
+               /* Skip kernel's context and get a new one. */
+               ctx->handle = drm_ctxbitmap_next(dev);
+       }
+       DRM_DEBUG("%d\n", ctx->handle);
+       if (ctx->handle == -1) {
+               DRM_DEBUG("Not enough free contexts.\n");
+               /* Should this return -EBUSY instead? */
+               return -ENOMEM;
+       }
+
+       if (ctx->handle != DRM_KERNEL_CONTEXT) {
+               if (dev->driver->context_ctor)
+                       if (!dev->driver->context_ctor(dev, ctx->handle)) {
+                               DRM_DEBUG("Running out of ctxs or memory.\n");
+                               return -ENOMEM;
+                       }
+       }
+
+       ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST);
+       if (!ctx_entry) {
+               DRM_DEBUG("out of memory\n");
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&ctx_entry->head);
+       ctx_entry->handle = ctx->handle;
+       ctx_entry->tag = file_priv;
+
+       mutex_lock(&dev->ctxlist_mutex);
+       list_add(&ctx_entry->head, &dev->ctxlist);
+       ++dev->ctx_count;
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       return 0;
+}
+
+int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       /* This does nothing */
+       return 0;
+}
+
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       /* This is 0, because we don't handle any context flags */
+       ctx->flags = 0;
+
+       return 0;
+}
+
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
+int drm_switchctx(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       return drm_context_switch(dev, dev->last_context, ctx->handle);
+}
+
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+int drm_newctx(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       drm_context_switch_complete(dev, ctx->handle);
+
+       return 0;
+}
+
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+int drm_rmctx(struct drm_device *dev, void *data,
+             struct drm_file *file_priv)
+{
+       struct drm_ctx *ctx = data;
+
+       DRM_DEBUG("%d\n", ctx->handle);
+       if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
+               file_priv->remove_auth_on_close = 1;
+       }
+       if (ctx->handle != DRM_KERNEL_CONTEXT) {
+               if (dev->driver->context_dtor)
+                       dev->driver->context_dtor(dev, ctx->handle);
+               drm_ctxbitmap_free(dev, ctx->handle);
+       }
+
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->handle == ctx->handle) {
+                               list_del(&pos->head);
+                               drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       return 0;
+}
+
+/*@}*/
diff --git a/psb-kernel-source-4.41.1/drm_core.h b/psb-kernel-source-4.41.1/drm_core.h
new file mode 100644 (file)
index 0000000..705bbff
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define CORE_AUTHOR            "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
+
+#define CORE_NAME              "drm"
+#define CORE_DESC              "DRM shared core routines"
+#define CORE_DATE              "20060810"
+
+#define DRM_IF_MAJOR   1
+#define DRM_IF_MINOR   3
+
+#define CORE_MAJOR     1
+#define CORE_MINOR     1
+#define CORE_PATCHLEVEL 0
diff --git a/psb-kernel-source-4.41.1/drm_crtc.c b/psb-kernel-source-4.41.1/drm_crtc.c
new file mode 100644 (file)
index 0000000..f276439
--- /dev/null
@@ -0,0 +1,2169 @@
+/*
+ * Copyright (c) 2006-2007 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <linux/list.h>
+#include "drm.h"
+#include "drmP.h"
+#include "drm_crtc.h"
+
+/**
+ * drm_idr_get - allocate a new identifier
+ * @dev: DRM device
+ * @ptr: object pointer, used to generate unique ID
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and outputs.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+int drm_idr_get(struct drm_device *dev, void *ptr)
+{
+       int new_id = 0;
+       int ret;
+again:
+       if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Ran out memory getting a mode number\n");
+               return 0;
+       }
+
+       ret = idr_get_new_above(&dev->mode_config.crtc_idr, ptr, 1, &new_id);
+       if (ret == -EAGAIN)
+               goto again;     
+
+       return new_id;
+}
+
+/**
+ * drm_idr_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+void drm_idr_put(struct drm_device *dev, int id)
+{
+       idr_remove(&dev->mode_config.crtc_idr, id);
+}
+
+/**
+ * drm_crtc_from_fb - find the CRTC structure associated with an fb
+ * @dev: DRM device
+ * @fb: framebuffer in question
+ *
+ * LOCKING:
+ * Caller must hold mode_config lock.
+ *
+ * Find CRTC in the mode_config structure that matches @fb.
+ *
+ * RETURNS:
+ * Pointer to the CRTC or NULL if it wasn't found.
+ */
+struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
+                                 struct drm_framebuffer *fb)
+{
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc->fb == fb)
+                       return crtc;
+       }
+       return NULL;
+}
+
+/**
+ * drm_framebuffer_create - create a new framebuffer object
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Creates a new framebuffer objects and adds it to @dev's DRM mode_config.
+ *
+ * RETURNS:
+ * Pointer to new framebuffer or NULL on error.
+ */
+struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev)
+{
+       struct drm_framebuffer *fb;
+
+       /* Limit to single framebuffer for now */
+       if (dev->mode_config.num_fb > 1) {
+               mutex_unlock(&dev->mode_config.mutex);
+               DRM_ERROR("Attempt to add multiple framebuffers failed\n");
+               return NULL;
+       }
+
+       fb = kzalloc(sizeof(struct drm_framebuffer), GFP_KERNEL);
+       if (!fb)
+               return NULL;
+       
+       fb->id = drm_idr_get(dev, fb);
+       fb->dev = dev;
+       dev->mode_config.num_fb++;
+       list_add(&fb->head, &dev->mode_config.fb_list);
+
+       return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_create);
+
+/**
+ * drm_framebuffer_destroy - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs in @dev's mode_config.  If they're using @fb, removes
+ * it, setting it to NULL.
+ */
+void drm_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = fb->dev;
+       struct drm_crtc *crtc;
+
+       /* remove from any CRTC */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc->fb == fb)
+                       crtc->fb = NULL;
+       }
+
+       drm_idr_put(dev, fb->id);
+       list_del(&fb->head);
+       dev->mode_config.num_fb--;
+
+       kfree(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_destroy);
+
+/**
+ * drm_crtc_create - create a new CRTC object
+ * @dev: DRM device
+ * @funcs: callbacks for the new CRTC
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Creates a new CRTC object and adds it to @dev's mode_config structure.
+ *
+ * RETURNS:
+ * Pointer to new CRTC object or NULL on error.
+ */
+struct drm_crtc *drm_crtc_create(struct drm_device *dev,
+                                const struct drm_crtc_funcs *funcs)
+{
+       struct drm_crtc *crtc;
+
+       crtc = kzalloc(sizeof(struct drm_crtc), GFP_KERNEL);
+       if (!crtc)
+               return NULL;
+
+       crtc->dev = dev;
+       crtc->funcs = funcs;
+
+       crtc->id = drm_idr_get(dev, crtc);
+
+       list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+       dev->mode_config.num_crtc++;
+
+       return crtc;
+}
+EXPORT_SYMBOL(drm_crtc_create);
+
+/**
+ * drm_crtc_destroy - remove a CRTC object
+ * @crtc: CRTC to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Cleanup @crtc.  Calls @crtc's cleanup function, then removes @crtc from
+ * its associated DRM device's mode_config.  Frees it afterwards.
+ */
+void drm_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+
+       if (crtc->funcs->cleanup)
+               (*crtc->funcs->cleanup)(crtc);
+
+       drm_idr_put(dev, crtc->id);
+       list_del(&crtc->head);
+       dev->mode_config.num_crtc--;
+       kfree(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_destroy);
+
+/**
+ * drm_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool drm_crtc_in_use(struct drm_crtc *crtc)
+{
+       struct drm_output *output;
+       struct drm_device *dev = crtc->dev;
+       /* FIXME: Locking around list access? */
+       list_for_each_entry(output, &dev->mode_config.output_list, head)
+               if (output->crtc == crtc)
+                       return true;
+       return false;
+}
+EXPORT_SYMBOL(drm_crtc_in_use);
+
+/*
+ * Detailed mode info for a standard 640x480@60Hz monitor
+ */
+static struct drm_display_mode std_mode[] = {
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 25200, 640, 656,
+                  752, 800, 0, 480, 490, 492, 525, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 640x480@60Hz */
+};
+
+/**
+ * drm_crtc_probe_output_modes - get complete set of display modes
+ * @dev: DRM device
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on @dev's mode_config layout, scan all the outputs and try to detect
+ * modes on them.  Modes will first be added to the output's probed_modes
+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
+ * put into the normal modes list.
+ *
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ * FIXME: take into account monitor limits
+ */
+void drm_crtc_probe_output_modes(struct drm_device *dev, int maxX, int maxY)
+{
+       struct drm_output *output;
+       struct drm_display_mode *mode, *t;
+       int ret;
+       //if (maxX == 0 || maxY == 0) 
+       // TODO
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+
+               /* set all modes to the unverified state */
+               list_for_each_entry_safe(mode, t, &output->modes, head)
+                       mode->status = MODE_UNVERIFIED;
+               
+               output->status = (*output->funcs->detect)(output);
+
+               if (output->status == output_status_disconnected) {
+                       DRM_DEBUG("%s is disconnected\n", output->name);
+                       /* TODO set EDID to NULL */
+                       continue;
+               }
+
+               ret = (*output->funcs->get_modes)(output);
+
+               if (ret) {
+                       drm_mode_output_list_update(output);
+               }
+
+               if (maxX && maxY)
+                       drm_mode_validate_size(dev, &output->modes, maxX,
+                                              maxY, 0);
+               list_for_each_entry_safe(mode, t, &output->modes, head) {
+                       if (mode->status == MODE_OK)
+                               mode->status = (*output->funcs->mode_valid)(output,mode);
+               }
+               
+
+               drm_mode_prune_invalid(dev, &output->modes, TRUE);
+
+               if (list_empty(&output->modes)) {
+                       struct drm_display_mode *stdmode;
+
+                       DRM_DEBUG("No valid modes on %s\n", output->name);
+
+                       /* Should we do this here ???
+                        * When no valid EDID modes are available we end up
+                        * here and bailed in the past, now we add a standard
+                        * 640x480@60Hz mode and carry on.
+                        */
+                       stdmode = drm_mode_duplicate(dev, &std_mode[0]);
+                       drm_mode_probed_add(output, stdmode);
+                       drm_mode_list_concat(&output->probed_modes,
+                                            &output->modes);
+
+                       DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
+                                                               output->name);
+               }
+
+               drm_mode_sort(&output->modes);
+
+               DRM_DEBUG("Probed modes for %s\n", output->name);
+               list_for_each_entry_safe(mode, t, &output->modes, head) {
+                       mode->vrefresh = drm_mode_vrefresh(mode);
+
+                       drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+                       drm_mode_debug_printmodeline(dev, mode);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_crtc_probe_output_modes);
+
+/**
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: width of mode
+ * @y: height of mode
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc.  Give @crtc and its associated outputs a chance
+ * to fixup or reject the mode prior to trying to set it.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                      int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_display_mode *adjusted_mode, saved_mode;
+       int saved_x, saved_y;
+       bool didLock = false;
+       bool ret = false;
+       struct drm_output *output;
+
+       adjusted_mode = drm_mode_duplicate(dev, mode);
+
+       crtc->enabled = drm_crtc_in_use(crtc);
+
+       if (!crtc->enabled) {
+               return true;
+       }
+
+       didLock = crtc->funcs->lock(crtc);
+
+       saved_mode = crtc->mode;
+       saved_x = crtc->x;
+       saved_y = crtc->y;
+       
+       /* Update crtc values up front so the driver can rely on them for mode
+        * setting.
+        */
+       crtc->mode = *mode;
+       crtc->x = x;
+       crtc->y = y;
+
+       /* XXX short-circuit changes to base location only */
+       
+       /* Pass our mode to the outputs and the CRTC to give them a chance to
+        * adjust it according to limitations or output properties, and also
+        * a chance to reject the mode entirely.
+        */
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               
+               if (output->crtc != crtc)
+                       continue;
+               
+               if (!output->funcs->mode_fixup(output, mode, adjusted_mode)) {
+                       goto done;
+               }
+       }
+       
+       if (!crtc->funcs->mode_fixup(crtc, mode, adjusted_mode)) {
+               goto done;
+       }
+
+       /* Prepare the outputs and CRTCs before setting the mode. */
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+
+               if (output->crtc != crtc)
+                       continue;
+               
+               /* Disable the output as the first thing we do. */
+               output->funcs->prepare(output);
+       }
+       
+       crtc->funcs->prepare(crtc);
+       
+       /* Set up the DPLL and any output state that needs to adjust or depend
+        * on the DPLL.
+        */
+       crtc->funcs->mode_set(crtc, mode, adjusted_mode, x, y);
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+
+               if (output->crtc != crtc)
+                       continue;
+               
+               DRM_INFO("%s: set mode %s %x\n", output->name, mode->name, mode->mode_id);
+
+               output->funcs->mode_set(output, mode, adjusted_mode);
+       }
+       
+       /* Now, enable the clocks, plane, pipe, and outputs that we set up. */
+       crtc->funcs->commit(crtc);
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+
+               if (output->crtc != crtc)
+                       continue;
+               
+               output->funcs->commit(output);
+
+#if 0 // TODO def RANDR_12_INTERFACE
+               if (output->randr_output)
+                       RRPostPendingProperties (output->randr_output);
+#endif
+       }
+       
+       /* XXX free adjustedmode */
+       drm_mode_destroy(dev, adjusted_mode);
+       ret = TRUE;
+       /* TODO */
+//     if (scrn->pScreen)
+//             drm_crtc_set_screen_sub_pixel_order(dev);
+
+done:
+       if (!ret) {
+               crtc->x = saved_x;
+               crtc->y = saved_y;
+               crtc->mode = saved_mode;
+       }
+       
+       if (didLock)
+               crtc->funcs->unlock (crtc);
+       
+       return ret;
+}
+EXPORT_SYMBOL(drm_crtc_set_mode);
+
+/**
+ * drm_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an output or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void drm_disable_unused_functions(struct drm_device *dev)
+{
+       struct drm_output *output;
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               if (!output->crtc)
+                       (*output->funcs->dpms)(output, DPMSModeOff);
+       }
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (!crtc->enabled)
+                       crtc->funcs->dpms(crtc, DPMSModeOff);
+       }
+}
+EXPORT_SYMBOL(drm_disable_unused_functions);
+
+/**
+ * drm_mode_probed_add - add a mode to the specified output's probed mode list
+ * @output: output the new mode
+ * @mode: mode data
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ * 
+ * Add @mode to @output's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_output *output,
+                        struct drm_display_mode *mode)
+{
+       list_add(&mode->head, &output->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @output: output list to modify
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ * 
+ * Remove @mode from @output's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_output *output, struct drm_display_mode *mode)
+{
+       list_del(&mode->head);
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_remove);
+
+/**
+ * drm_output_create - create a new output
+ * @dev: DRM device
+ * @funcs: callbacks for this output
+ * @name: user visible name of the output
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Creates a new drm_output structure and adds it to @dev's mode_config
+ * structure.
+ *
+ * RETURNS:
+ * Pointer to the new output or NULL on error.
+ */
+struct drm_output *drm_output_create(struct drm_device *dev,
+                                    const struct drm_output_funcs *funcs,
+                                    const char *name)
+{
+       struct drm_output *output = NULL;
+
+       output = kzalloc(sizeof(struct drm_output), GFP_KERNEL);
+       if (!output)
+               return NULL;
+               
+       output->dev = dev;
+       output->funcs = funcs;
+       output->id = drm_idr_get(dev, output);
+       if (name)
+               strncpy(output->name, name, DRM_OUTPUT_LEN);
+       output->name[DRM_OUTPUT_LEN - 1] = 0;
+       output->subpixel_order = SubPixelUnknown;
+       INIT_LIST_HEAD(&output->probed_modes);
+       INIT_LIST_HEAD(&output->modes);
+       /* randr_output? */
+       /* output_set_monitor(output)? */
+       /* check for output_ignored(output)? */
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_add_tail(&output->head, &dev->mode_config.output_list);
+       dev->mode_config.num_output++;
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return output;
+
+}
+EXPORT_SYMBOL(drm_output_create);
+
+/**
+ * drm_output_destroy - remove an output
+ * @output: output to remove
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Call @output's cleanup function, then remove the output from the DRM
+ * mode_config after freeing @output's modes.
+ */
+void drm_output_destroy(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       struct drm_display_mode *mode, *t;
+
+       if (*output->funcs->cleanup)
+               (*output->funcs->cleanup)(output);
+
+       list_for_each_entry_safe(mode, t, &output->probed_modes, head)
+               drm_mode_remove(output, mode);
+
+       list_for_each_entry_safe(mode, t, &output->modes, head)
+               drm_mode_remove(output, mode);
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_idr_put(dev, output->id);
+       list_del(&output->head);
+       mutex_unlock(&dev->mode_config.mutex);
+       kfree(output);
+}
+EXPORT_SYMBOL(drm_output_destroy);
+
+/**
+ * drm_output_rename - rename an output
+ * @output: output to rename
+ * @name: new user visible name
+ *
+ * LOCKING:
+ * None.
+ *
+ * Simply stuff a new name into @output's name field, based on @name.
+ *
+ * RETURNS:
+ * True if the name was changed, false otherwise.
+ */
+bool drm_output_rename(struct drm_output *output, const char *name)
+{
+       if (!name)
+               return false;
+
+       strncpy(output->name, name, DRM_OUTPUT_LEN);
+       output->name[DRM_OUTPUT_LEN - 1] = 0;
+
+       DRM_DEBUG("Changed name to %s\n", output->name);
+//     drm_output_set_monitor(output);
+//     if (drm_output_ignored(output))
+//             return FALSE;
+
+       return TRUE;
+}
+EXPORT_SYMBOL(drm_output_rename);
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None.
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+       struct drm_display_mode *nmode;
+
+       nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+       if (!nmode)
+               return NULL;
+
+       nmode->mode_id = drm_idr_get(dev, nmode);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       drm_idr_put(dev, mode->mode_id);
+
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None, should happen single threaded at init time.
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+       mutex_init(&dev->mode_config.mutex);
+       INIT_LIST_HEAD(&dev->mode_config.fb_list);
+       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+       INIT_LIST_HEAD(&dev->mode_config.output_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_list);
+       INIT_LIST_HEAD(&dev->mode_config.usermode_list);
+       idr_init(&dev->mode_config.crtc_idr);
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_get_buffer_object - find the buffer object for a given handle
+ * @dev: DRM device
+ * @bo: pointer to caller's buffer_object pointer
+ * @handle: handle to lookup
+ *
+ * LOCKING:
+ * Must take @dev's struct_mutex to protect buffer object lookup.
+ *
+ * Given @handle, lookup the buffer object in @dev and put it in the caller's
+ * @bo pointer.
+ *
+ * RETURNS:
+ * Zero on success, -EINVAL if the handle couldn't be found.
+ */
+static int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
+{
+       struct drm_user_object *uo;
+       struct drm_hash_item *hash;
+       int ret;
+
+       *bo = NULL;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_ht_find_item(&dev->object_hash, handle, &hash);
+       if (ret) {
+               DRM_ERROR("Couldn't find handle.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       uo = drm_hash_entry(hash, struct drm_user_object, hash);
+       if (uo->type != drm_buffer_type) {
+               ret = -EINVAL;
+               goto out_err;
+       }
+       
+       *bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
+       ret = 0;
+out_err:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+char drm_init_mode[32];
+int drm_init_xres;
+int drm_init_yres;
+EXPORT_SYMBOL(drm_init_mode);
+EXPORT_SYMBOL(drm_init_xres);
+EXPORT_SYMBOL(drm_init_yres);
+
+/**
+ * drm_pick_crtcs - pick crtcs for output devices
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ */
+static void drm_pick_crtcs (struct drm_device *dev)
+{
+       int c, o, assigned;
+       struct drm_output *output, *output_equal;
+       struct drm_crtc   *crtc;
+       struct drm_display_mode *des_mode = NULL, *modes, *modes_equal;
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+                       output->crtc = NULL;
+    
+               /* Don't hook up outputs that are disconnected ??
+                *
+                * This is debateable. Do we want fixed /dev/fbX or
+                * dynamic on hotplug (need mode code for that though) ?
+                *
+                * If we don't hook up outputs now, then we only create
+                * /dev/fbX for the output that's enabled, that's good as
+                * the users console will be on that output.
+                *
+                * If we do hook up outputs that are disconnected now, then
+                * the user may end up having to muck about with the fbcon
+                * map flags to assign his console to the enabled output. Ugh.
+                */
+               if (output->status != output_status_connected)
+                       continue;
+
+               des_mode = NULL;
+               list_for_each_entry(des_mode, &output->modes, head) {
+                       if (/* !strcmp(des_mode->name, drm_init_mode) ||  */
+                           des_mode->hdisplay==drm_init_xres
+                           && des_mode->vdisplay==drm_init_yres) {
+                               des_mode->type |= DRM_MODE_TYPE_USERPREF;
+                               break;
+                       }
+
+               }
+               /* No userdef mode (initial mode set from module parameter) */
+               if (!des_mode || !(des_mode->type & DRM_MODE_TYPE_USERPREF)) {
+                       list_for_each_entry(des_mode, &output->modes, head) {
+                               if (des_mode->type & DRM_MODE_TYPE_PREFERRED)
+                                       break;
+                       }
+               }
+
+               /* No preferred mode, and no default mode, let's just
+                  select the first available */
+               if (!des_mode || (!(des_mode->type & DRM_MODE_TYPE_PREFERRED)
+                                 && !(des_mode->type & DRM_MODE_TYPE_USERPREF))) {
+                       list_for_each_entry(des_mode, &output->modes, head) {
+                               if (des_mode)
+                                       break;
+                       }
+               }
+
+               c = -1;
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       assigned = 0;
+
+                       c++;
+                       if ((output->possible_crtcs & (1 << c)) == 0)
+                               continue;
+       
+                       list_for_each_entry(output_equal, &dev->mode_config.output_list, head) {
+                               if (output->id == output_equal->id)
+                                       continue;
+
+                               /* Find out if crtc has been assigned before */
+                               if (output_equal->crtc == crtc)
+                                       assigned = 1;
+                       }
+
+#if 1 /* continue for now */
+                       if (assigned)
+                               continue;
+#endif
+
+                       o = -1;
+                       list_for_each_entry(output_equal, &dev->mode_config.output_list, head) {
+                               o++;
+                               if (output->id == output_equal->id)
+                                       continue;
+
+                               list_for_each_entry(modes, &output->modes, head) {
+                                       list_for_each_entry(modes_equal, &output_equal->modes, head) {
+                                               if (drm_mode_equal (modes, modes_equal)) {
+                                                       if ((output->possible_clones & output_equal->possible_clones) && (output_equal->crtc == crtc)) {
+                                                               printk("Cloning %s (0x%lx) to %s (0x%lx)\n",output->name,output->possible_clones,output_equal->name,output_equal->possible_clones);
+                                                               assigned = 0;
+                                                               goto clone;
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+
+clone:
+                       /* crtc has been assigned skip it */
+                       if (assigned)
+                               continue;
+
+                       /* Found a CRTC to attach to, do it ! */
+                       output->crtc = crtc;
+                       output->crtc->desired_mode = des_mode;
+                       output->initial_x = 0;
+                       output->initial_y = 0;
+                       DRM_DEBUG("Desired mode for CRTC %d is 0x%x:%s\n",c,des_mode->mode_id, des_mode->name);
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_pick_crtcs);
+
+/**
+ * drm_initial_config - setup a sane initial output configuration
+ * @dev: DRM device
+ * @can_grow: this configuration is growable
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and outputs and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_initial_config(struct drm_device *dev, bool can_grow)
+{
+       struct drm_output *output;
+       struct drm_crtc *crtc;
+       int ret = false;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       drm_crtc_probe_output_modes(dev, 2048, 2048);
+
+       drm_pick_crtcs(dev);
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+               /* can't setup the crtc if there's no assigned mode */
+               if (!crtc->desired_mode)
+                       continue;
+
+               /* Now setup the fbdev for attached crtcs */
+               dev->driver->fb_probe(dev, crtc);
+       }
+
+       /* This is a little screwy, as we've already walked the outputs 
+        * above, but it's a little bit of magic too. There's the potential
+        * for things not to get setup above if an existing device gets
+        * re-assigned thus confusing the hardware. By walking the outputs
+        * this fixes up their crtc's.
+        */
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+
+               /* can't setup the output if there's no assigned mode */
+               if (!output->crtc || !output->crtc->desired_mode)
+                       continue;
+
+               /* and needs an attached fb */
+               if (output->crtc->fb)
+                       drm_crtc_set_mode(output->crtc, output->crtc->desired_mode, 0, 0);
+       }
+
+       drm_disable_unused_functions(dev);
+
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+EXPORT_SYMBOL(drm_initial_config);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free up all the outputs and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+       struct drm_output *output, *ot;
+       struct drm_crtc *crtc, *ct;
+       struct drm_framebuffer *fb, *fbt;
+       struct drm_display_mode *mode, *mt;
+       struct drm_property *property, *pt;
+
+       list_for_each_entry_safe(output, ot, &dev->mode_config.output_list, head) {
+               drm_output_destroy(output);
+       }
+
+       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) {
+               drm_property_destroy(dev, property);
+       }
+
+       list_for_each_entry_safe(mode, mt, &dev->mode_config.usermode_list, head) {
+               drm_mode_destroy(dev, mode);
+       }
+
+       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+               if (fb->bo->type != drm_bo_type_kernel)
+                       drm_framebuffer_destroy(fb);
+               else
+                       dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
+       }
+
+       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+               drm_crtc_destroy(crtc);
+       }
+
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+/**
+ * drm_crtc_set_config - set a new config from userspace
+ * @crtc: CRTC to setup
+ * @crtc_info: user provided configuration
+ * @new_mode: new mode to set
+ * @output_set: set of outputs for the new config
+ * @fb: new framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the user in @crtc_info, and enable
+ * it.
+ *
+ * RETURNS:
+ * Zero. (FIXME)
+ */
+int drm_crtc_set_config(struct drm_crtc *crtc, struct drm_mode_crtc *crtc_info, struct drm_display_mode *new_mode, struct drm_output **output_set, struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_crtc **save_crtcs, *new_crtc;
+       bool save_enabled = crtc->enabled;
+       bool changed;
+       struct drm_output *output;
+       int count = 0, ro;
+
+       save_crtcs = kzalloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc *), GFP_KERNEL);
+       if (!save_crtcs)
+               return -ENOMEM;
+
+       if (crtc->fb != fb)
+               changed = true;
+
+       if (crtc_info->x != crtc->x || crtc_info->y != crtc->y)
+               changed = true;
+
+       if (new_mode && (crtc->mode.mode_id != new_mode->mode_id))
+               changed = true;
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               save_crtcs[count++] = output->crtc;
+
+               if (output->crtc == crtc)
+                       new_crtc = NULL;
+               else
+                       new_crtc = output->crtc;
+
+               for (ro = 0; ro < crtc_info->count_outputs; ro++) {
+                       if (output_set[ro] == output)
+                               new_crtc = crtc;
+               }
+               if (new_crtc != output->crtc) {
+                       changed = true;
+                       output->crtc = new_crtc;
+               }
+       }
+
+       if (changed) {
+               crtc->fb = fb;
+               crtc->enabled = (new_mode != NULL);
+               if (new_mode != NULL) {
+                       DRM_DEBUG("attempting to set mode from userspace\n");
+                       drm_mode_debug_printmodeline(dev, new_mode);
+                       if (!drm_crtc_set_mode(crtc, new_mode, crtc_info->x,
+                                              crtc_info->y)) {
+                               crtc->enabled = save_enabled;
+                               count = 0;
+                               list_for_each_entry(output, &dev->mode_config.output_list, head)
+                                       output->crtc = save_crtcs[count++];
+                               kfree(save_crtcs);
+                               return -EINVAL;
+                       }
+                       crtc->desired_x = crtc_info->x;
+                       crtc->desired_y = crtc_info->y;
+                       crtc->desired_mode = new_mode;
+               }
+               drm_disable_unused_functions(dev);
+       }
+       kfree(save_crtcs);
+       return 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, struct drm_display_mode *in)
+{
+
+       out->id = in->mode_id;
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drmo_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ */
+void drm_crtc_convert_umode(struct drm_display_mode *out, struct drm_mode_modeinfo *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+       
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, output and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_card_res *card_res = data;
+       struct list_head *lh;
+       struct drm_framebuffer *fb;
+       struct drm_output *output;
+       struct drm_crtc *crtc;
+       struct drm_mode_modeinfo u_mode;
+       struct drm_display_mode *mode;
+       int ret = 0;
+       int mode_count= 0;
+       int output_count = 0;
+       int crtc_count = 0;
+       int fb_count = 0;
+       int copied = 0;
+
+       memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       list_for_each(lh, &dev->mode_config.fb_list)
+               fb_count++;
+
+       list_for_each(lh, &dev->mode_config.crtc_list)
+               crtc_count++;
+
+       list_for_each_entry(output, &dev->mode_config.output_list,
+                           head) {
+               output_count++;
+               list_for_each(lh, &output->modes)
+                       mode_count++;
+       }
+       list_for_each(lh, &dev->mode_config.usermode_list)
+               mode_count++;
+
+       if (card_res->count_modes == 0) {
+               DRM_DEBUG("probing modes %dx%d\n", dev->mode_config.max_width, dev->mode_config.max_height);
+               drm_crtc_probe_output_modes(dev, dev->mode_config.max_width, dev->mode_config.max_height);
+               mode_count = 0;
+               list_for_each_entry(output, &dev->mode_config.output_list, head) {
+                       list_for_each(lh, &output->modes)
+                               mode_count++;
+               }
+               list_for_each(lh, &dev->mode_config.usermode_list)
+                       mode_count++;
+       }
+
+       /* handle this in 4 parts */
+       /* FBs */
+       if (card_res->count_fbs >= fb_count) {
+               copied = 0;
+               list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+                       if (put_user(fb->id, card_res->fb_id + copied))
+                               return -EFAULT;
+                       copied++;
+               }
+       }
+       card_res->count_fbs = fb_count;
+
+       /* CRTCs */
+       if (card_res->count_crtcs >= crtc_count) {
+               copied = 0;
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head){
+                       DRM_DEBUG("CRTC ID is %d\n", crtc->id);
+                       if (put_user(crtc->id, card_res->crtc_id + copied))
+                               return -EFAULT;
+                       copied++;
+               }
+       }
+       card_res->count_crtcs = crtc_count;
+
+
+       /* Outputs */
+       if (card_res->count_outputs >= output_count) {
+               copied = 0;
+               list_for_each_entry(output, &dev->mode_config.output_list,
+                                   head) {
+                       DRM_DEBUG("OUTPUT ID is %d\n", output->id);
+                       if (put_user(output->id, card_res->output_id + copied))
+                               return -EFAULT;
+                       copied++;
+               }
+       }
+       card_res->count_outputs = output_count;
+       
+       /* Modes */
+       if (card_res->count_modes >= mode_count) {
+               copied = 0;
+               list_for_each_entry(output, &dev->mode_config.output_list,
+                                   head) {
+                       list_for_each_entry(mode, &output->modes, head) {
+                               drm_crtc_convert_to_umode(&u_mode, mode);
+                               if (copy_to_user(card_res->modes + copied,
+                                                &u_mode, sizeof(u_mode)))
+                                       return -EFAULT;
+                               copied++;
+                       }
+               }
+               /* add in user modes */
+               list_for_each_entry(mode, &dev->mode_config.usermode_list, head) {
+                       drm_crtc_convert_to_umode(&u_mode, mode);
+                       if (copy_to_user(card_res->modes + copied, &u_mode,
+                                        sizeof(u_mode)))
+                               return -EFAULT;
+                       copied++;
+               }
+       }
+       card_res->count_modes = mode_count;
+
+       DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
+                 card_res->count_outputs,
+                 card_res->count_modes);
+       
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc *crtc_resp = data;
+       struct drm_crtc *crtc;
+       struct drm_output *output;
+       int ocount;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       crtc = idr_find(&dev->mode_config.crtc_idr, crtc_resp->crtc_id);
+       if (!crtc || (crtc->id != crtc_resp->crtc_id)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       crtc_resp->x = crtc->x;
+       crtc_resp->y = crtc->y;
+
+       if (crtc->fb)
+               crtc_resp->fb_id = crtc->fb->id;
+       else
+               crtc_resp->fb_id = 0;
+
+       crtc_resp->outputs = 0;
+       if (crtc->enabled) {
+
+               crtc_resp->mode = crtc->mode.mode_id;
+               ocount = 0;
+               list_for_each_entry(output, &dev->mode_config.output_list, head) {
+                       if (output->crtc == crtc)
+                               crtc_resp->outputs |= 1 << (ocount++);
+               }
+       } else {
+               crtc_resp->mode = 0;
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getoutput - get output configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a output configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getoutput(struct drm_device *dev,
+                      void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_get_output *out_resp = data;
+       struct drm_output *output;
+       struct drm_display_mode *mode;
+       int mode_count = 0;
+       int props_count = 0;
+       int ret = 0;
+       int copied = 0;
+       int i;
+
+       DRM_DEBUG("output id %d:\n", out_resp->output);
+
+       mutex_lock(&dev->mode_config.mutex);
+       output= idr_find(&dev->mode_config.crtc_idr, out_resp->output);
+       if (!output || (output->id != out_resp->output)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       list_for_each_entry(mode, &output->modes, head)
+               mode_count++;
+       
+       for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++)
+               if (output->user_mode_ids[i] != 0)
+                       mode_count++;
+
+       for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
+               if (output->property_ids[i] != 0) {
+                       props_count++;
+               }
+       }
+
+       strncpy(out_resp->name, output->name, DRM_OUTPUT_NAME_LEN);
+       out_resp->name[DRM_OUTPUT_NAME_LEN-1] = 0;
+
+       out_resp->mm_width = output->mm_width;
+       out_resp->mm_height = output->mm_height;
+       out_resp->subpixel = output->subpixel_order;
+       out_resp->connection = output->status;
+       if (output->crtc)
+               out_resp->crtc = output->crtc->id;
+       else
+               out_resp->crtc = 0;
+
+       out_resp->crtcs = output->possible_crtcs;
+       out_resp->clones = output->possible_clones;
+
+       if ((out_resp->count_modes >= mode_count) && mode_count) {
+               copied = 0;
+               list_for_each_entry(mode, &output->modes, head) {
+                       out_resp->modes[copied++] = mode->mode_id;
+               }
+               for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
+                       if (output->user_mode_ids[i] != 0) {
+                               if (put_user(output->user_mode_ids[i], out_resp->modes + copied))
+                                       return -EFAULT;
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_modes = mode_count;
+
+       if ((out_resp->count_props >= props_count) && props_count) {
+               copied = 0;
+               for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
+                       if (output->property_ids[i] != 0) {
+                               if (put_user(output->property_ids[i], out_resp->props + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+
+                               if (put_user(output->property_values[i], out_resp->prop_values + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_props = props_count;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc *crtc_req = data;
+       struct drm_crtc *crtc;
+       struct drm_output **output_set = NULL, *output;
+       struct drm_display_mode *mode;
+       struct drm_framebuffer *fb = NULL;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+       crtc = idr_find(&dev->mode_config.crtc_idr, crtc_req->crtc_id);
+       if (!crtc || (crtc->id != crtc_req->crtc_id)) {
+               DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->mode) {
+               /* if we have a mode we need a framebuffer */
+               if (crtc_req->fb_id) {
+                       fb = idr_find(&dev->mode_config.crtc_idr, crtc_req->fb_id);
+                       if (!fb || (fb->id != crtc_req->fb_id)) {
+                               DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+               }
+               mode = idr_find(&dev->mode_config.crtc_idr, crtc_req->mode);
+               if (!mode || (mode->mode_id != crtc_req->mode)) {
+                       struct drm_output *output;
+                       
+                       list_for_each_entry(output, 
+                                           &dev->mode_config.output_list,
+                                           head) {
+                               list_for_each_entry(mode, &output->modes,
+                                                   head) {
+                                       drm_mode_debug_printmodeline(dev, 
+                                                                    mode);
+                               }
+                       }
+
+                       DRM_DEBUG("Unknown mode id %d, %p\n", crtc_req->mode, mode);
+                       ret = -EINVAL;
+                       goto out;
+               }
+       } else
+               mode = NULL;
+
+       if (crtc_req->count_outputs == 0 && mode) {
+               DRM_DEBUG("Count outputs is 0 but mode set\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_outputs > 0 && !mode && !fb) {
+               DRM_DEBUG("Count outputs is %d but no mode or fb set\n", crtc_req->count_outputs);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_outputs > 0) {
+               u32 out_id;
+               output_set = kmalloc(crtc_req->count_outputs *
+                                    sizeof(struct drm_output *), GFP_KERNEL);
+               if (!output_set) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               for (i = 0; i < crtc_req->count_outputs; i++) {
+                       if (get_user(out_id, &crtc_req->set_outputs[i])) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+
+                       output = idr_find(&dev->mode_config.crtc_idr, out_id);
+                       if (!output || (out_id != output->id)) {
+                               DRM_DEBUG("Output id %d unknown\n", out_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+
+                       output_set[i] = output;
+               }
+       }
+               
+       ret = drm_crtc_set_config(crtc, crtc_req, mode, output_set, fb);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_framebuffer *fb;
+       struct drm_buffer_object *bo;
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       if ((config->min_width > r->width) || (r->width > config->max_width)) {
+               DRM_ERROR("mode new framebuffer width not within limits\n");
+               return -EINVAL;
+       }
+       if ((config->min_height > r->height) || (r->height > config->max_height)) {
+               DRM_ERROR("mode new framebuffer height not within limits\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+       /* TODO check limits are okay */
+       ret = drm_get_buffer_object(dev, &bo, r->handle);
+       if (ret || !bo) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* TODO check buffer is sufficently large */
+       /* TODO setup destructor callback */
+
+       fb = drm_framebuffer_create(dev);
+       if (!fb) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       fb->width = r->width;
+       fb->height = r->height;
+       fb->pitch = r->pitch;
+       fb->bits_per_pixel = r->bpp;
+       fb->depth = r->depth;
+       fb->offset = bo->offset;
+       fb->bo = bo;
+
+       r->buffer_id = fb->id;
+
+       list_add(&fb->filp_head, &file_priv->fbs);
+
+       /* FIXME: bind the fb to the right crtc */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               crtc->fb = fb;
+               dev->driver->fb_probe(dev, crtc);
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_framebuffer *fb = 0;
+       uint32_t *id = data;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       fb = idr_find(&dev->mode_config.crtc_idr, *id);
+       /* TODO check that we realy get a framebuffer back. */
+       if (!fb || (*id != fb->id)) {
+               DRM_ERROR("mode invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* TODO check if we own the buffer */
+       /* TODO release all crtc connected to the framebuffer */
+       /* bind the fb to the crtc for now */
+       /* TODO unhock the destructor from the buffer object */
+
+       if (fb->bo->type != drm_bo_type_kernel)
+               drm_framebuffer_destroy(fb);
+       else
+               dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       fb = idr_find(&dev->mode_config.crtc_idr, r->buffer_id);
+       if (!fb || (r->buffer_id != fb->id)) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       r->height = fb->height;
+       r->width = fb->width;
+       r->depth = fb->depth;
+       r->bpp = fb->bits_per_pixel;
+       r->handle = fb->bo->base.hash.key;
+       r->pitch = fb->pitch;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct file *filp)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->head->dev;
+       struct drm_framebuffer *fb, *tfb;
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+               list_del(&fb->filp_head);
+               if (fb->bo->type != drm_bo_type_kernel)
+                       drm_framebuffer_destroy(fb);
+               else
+                       dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+/*
+ *
+ */
+void drm_mode_addmode(struct drm_device *dev, struct drm_display_mode *user_mode)
+{
+       user_mode->type |= DRM_MODE_TYPE_USERDEF;
+
+       user_mode->output_count = 0;
+       list_add(&user_mode->head, &dev->mode_config.usermode_list);
+}
+EXPORT_SYMBOL(drm_mode_addmode);
+
+int drm_mode_rmmode(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       struct drm_display_mode *t;
+       int ret = -EINVAL;
+       list_for_each_entry(t, &dev->mode_config.usermode_list, head) {
+               if (t == mode) {
+                       list_del(&mode->head);
+                       drm_mode_destroy(dev, mode);
+                       ret = 0;
+                       break;
+               }
+       }
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_rmmode);
+
+static int drm_mode_attachmode(struct drm_device *dev,
+                              struct drm_output *output,
+                              struct drm_display_mode *mode)
+{
+       int ret = 0;
+       int i;
+
+       for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
+               if (output->user_mode_ids[i] == 0) {
+                       output->user_mode_ids[i] = mode->mode_id;
+                       mode->output_count++;
+                       break;
+               }
+       }
+
+       if (i == DRM_OUTPUT_MAX_UMODES)
+               ret = -ENOSPC;
+
+       return ret;
+}
+
+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+                            struct drm_display_mode *mode)
+{
+       struct drm_output *output;
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               if (output->crtc == crtc)
+                       drm_mode_attachmode(dev, output, mode);
+       }
+}
+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
+
+static int drm_mode_detachmode(struct drm_device *dev,
+                              struct drm_output *output,
+                              struct drm_display_mode *mode)
+{
+       int found = 0;
+       int ret = 0, i;
+
+       for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
+               if (output->user_mode_ids[i] == mode->mode_id) {
+                       output->user_mode_ids[i] = 0;
+                       mode->output_count--;
+                       found = 1;
+               }
+       }
+
+       if (!found)
+               ret = -EINVAL;
+
+       return ret;
+}
+
+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       struct drm_output *output;
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               drm_mode_detachmode(dev, output, mode);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
+
+/**
+ * drm_fb_addmode - adds a user defined mode
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Adds a user specified mode to the kernel.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * writes new mode id into arg.
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addmode_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_modeinfo *new_mode = data;
+       struct drm_display_mode *user_mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       user_mode = drm_mode_create(dev);
+       if (!user_mode) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       drm_crtc_convert_umode(user_mode, new_mode);
+
+       drm_mode_addmode(dev, user_mode);
+       new_mode->id = user_mode->mode_id;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_fb_rmmode - removes a user defined mode
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Remove the user defined mode specified by the user.
+ *
+ * Called by the user via ioctl
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmmode_ioctl(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv)
+{
+       uint32_t *id = data;
+       struct drm_display_mode *mode;
+       int ret = -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);    
+       mode = idr_find(&dev->mode_config.crtc_idr, *id);
+       if (!mode || (*id != mode->mode_id)) {
+               goto out;
+       }
+
+       if (!(mode->type & DRM_MODE_TYPE_USERDEF)) {
+               goto out;
+       }
+
+       if (mode->output_count) {
+               goto out;
+       }
+
+       ret = drm_mode_rmmode(dev, mode);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_fb_attachmode - Attach a user mode to an output
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * This attaches a user specified mode to an output.
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_attachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_output *output;
+       struct drm_display_mode *mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       mode = idr_find(&dev->mode_config.crtc_idr, mode_cmd->mode_id);
+       if (!mode || (mode->mode_id != mode_cmd->mode_id)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       output = idr_find(&dev->mode_config.crtc_idr, mode_cmd->output_id);
+       if (!output || (output->id != mode_cmd->output_id)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = drm_mode_attachmode(dev, output, mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+
+/**
+ * drm_fb_detachmode - Detach a user specified mode from an output
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_detachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_output *output;
+       struct drm_display_mode *mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       mode = idr_find(&dev->mode_config.crtc_idr, mode_cmd->mode_id);
+       if (!mode || (mode->mode_id != mode_cmd->mode_id)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       output = idr_find(&dev->mode_config.crtc_idr, mode_cmd->output_id);
+       if (!output || (output->id != mode_cmd->output_id)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+
+       ret = drm_mode_detachmode(dev, output, mode);
+out:          
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                        const char *name, int num_values)
+{
+       struct drm_property *property = NULL;
+
+       property = kzalloc(sizeof(struct drm_output), GFP_KERNEL);
+       if (!property)
+               return NULL;
+       
+       property->values = kzalloc(sizeof(uint32_t)*num_values, GFP_KERNEL);
+       if (!property->values)
+               goto fail;
+
+       property->id = drm_idr_get(dev, property);
+       property->flags = flags;
+       property->num_values = num_values;
+       INIT_LIST_HEAD(&property->enum_list);
+
+       if (name)
+               strncpy(property->name, name, DRM_PROP_NAME_LEN);
+
+       list_add_tail(&property->head, &dev->mode_config.property_list);
+       return property;
+fail:
+       kfree(property);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+int drm_property_add_enum(struct drm_property *property, int index,
+                         uint32_t value, const char *name)
+{
+       struct drm_property_enum *prop_enum;
+
+       if (!(property->flags & DRM_MODE_PROP_ENUM))
+               return -EINVAL;
+
+       if (!list_empty(&property->enum_list)) {
+               list_for_each_entry(prop_enum, &property->enum_list, head) {
+                       if (prop_enum->value == value) {
+                               strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); 
+                               prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+                               return 0;
+                       }
+               }
+       }
+
+       prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+       if (!prop_enum)
+               return -ENOMEM;
+
+       strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); 
+       prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+       prop_enum->value = value;
+
+       property->values[index] = value;
+       list_add_tail(&prop_enum->head, &property->enum_list);
+       return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+       struct drm_property_enum *prop_enum, *pt;
+
+       list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
+               list_del(&prop_enum->head);
+               kfree(prop_enum);
+       }
+
+       kfree(property->values);
+       drm_idr_put(dev, property->id);
+       list_del(&property->head);
+       kfree(property);        
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+
+int drm_output_attach_property(struct drm_output *output,
+                              struct drm_property *property, int init_val)
+{
+       int i;
+
+       for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
+               if (output->property_ids[i] == 0) {
+                       output->property_ids[i] = property->id;
+                       output->property_values[i] = init_val;
+                       break;
+               }
+       }
+
+       if (i == DRM_OUTPUT_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_output_attach_property);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_get_property *out_resp = data;
+       struct drm_property *property;
+       int enum_count = 0;
+       int value_count = 0;
+       int ret = 0, i;
+       int copied;
+       struct drm_property_enum *prop_enum;
+
+       mutex_lock(&dev->mode_config.mutex);
+       property = idr_find(&dev->mode_config.crtc_idr, out_resp->prop_id);
+       if (!property || (property->id != out_resp->prop_id)) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+
+       list_for_each_entry(prop_enum, &property->enum_list, head)
+               enum_count++;
+
+       value_count = property->num_values;
+
+       strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+       out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+       out_resp->flags = property->flags;
+
+       if ((out_resp->count_values >= value_count) && value_count) {
+               for (i = 0; i < value_count; i++) {
+                       if (put_user(property->values[i], out_resp->values + i)) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+       out_resp->count_values = value_count;
+
+       if ((out_resp->count_enums >= enum_count) && enum_count) {
+               copied = 0;
+               list_for_each_entry(prop_enum, &property->enum_list, head) {
+                       if (put_user(prop_enum->value, &out_resp->enums[copied].value)) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+
+                       if (copy_to_user(&out_resp->enums[copied].name,
+                                        prop_enum->name, DRM_PROP_NAME_LEN)) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+                       copied++;
+               }
+       }
+       out_resp->count_enums = enum_count;
+
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
diff --git a/psb-kernel-source-4.41.1/drm_crtc.h b/psb-kernel-source-4.41.1/drm_crtc.h
new file mode 100644 (file)
index 0000000..07bfe04
--- /dev/null
@@ -0,0 +1,592 @@
+/*
+ * Copyright Â© 2006 Keith Packard
+ * Copyright Â© 2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ */
+#ifndef __DRM_CRTC_H__
+#define __DRM_CRTC_H__
+
+#include <linux/i2c.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+
+#include <linux/fb.h>
+
+struct drm_device;
+
+/*
+ * Note on terminology:  here, for brevity and convenience, we refer to output
+ * control chips as 'CRTCs'.  They can control any type of output, VGA, LVDS,
+ * DVI, etc.  And 'screen' refers to the whole of the visible display, which
+ * may span multiple monitors (and therefore multiple CRTC and output
+ * structures).
+ */
+
+enum drm_mode_status {
+    MODE_OK    = 0,    /* Mode OK */
+    MODE_HSYNC,                /* hsync out of range */
+    MODE_VSYNC,                /* vsync out of range */
+    MODE_H_ILLEGAL,    /* mode has illegal horizontal timings */
+    MODE_V_ILLEGAL,    /* mode has illegal horizontal timings */
+    MODE_BAD_WIDTH,    /* requires an unsupported linepitch */
+    MODE_NOMODE,       /* no mode with a maching name */
+    MODE_NO_INTERLACE, /* interlaced mode not supported */
+    MODE_NO_DBLESCAN,  /* doublescan mode not supported */
+    MODE_NO_VSCAN,     /* multiscan mode not supported */
+    MODE_MEM,          /* insufficient video memory */
+    MODE_VIRTUAL_X,    /* mode width too large for specified virtual size */
+    MODE_VIRTUAL_Y,    /* mode height too large for specified virtual size */
+    MODE_MEM_VIRT,     /* insufficient video memory given virtual size */
+    MODE_NOCLOCK,      /* no fixed clock available */
+    MODE_CLOCK_HIGH,   /* clock required is too high */
+    MODE_CLOCK_LOW,    /* clock required is too low */
+    MODE_CLOCK_RANGE,  /* clock/mode isn't in a ClockRange */
+    MODE_BAD_HVALUE,   /* horizontal timing was out of range */
+    MODE_BAD_VVALUE,   /* vertical timing was out of range */
+    MODE_BAD_VSCAN,    /* VScan value out of range */
+    MODE_HSYNC_NARROW, /* horizontal sync too narrow */
+    MODE_HSYNC_WIDE,   /* horizontal sync too wide */
+    MODE_HBLANK_NARROW,        /* horizontal blanking too narrow */
+    MODE_HBLANK_WIDE,  /* horizontal blanking too wide */
+    MODE_VSYNC_NARROW, /* vertical sync too narrow */
+    MODE_VSYNC_WIDE,   /* vertical sync too wide */
+    MODE_VBLANK_NARROW,        /* vertical blanking too narrow */
+    MODE_VBLANK_WIDE,  /* vertical blanking too wide */
+    MODE_PANEL,         /* exceeds panel dimensions */
+    MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
+    MODE_ONE_WIDTH,     /* only one width is supported */
+    MODE_ONE_HEIGHT,    /* only one height is supported */
+    MODE_ONE_SIZE,      /* only one resolution is supported */
+    MODE_NO_REDUCED,    /* monitor doesn't accept reduced blanking */
+    MODE_UNVERIFIED = -3, /* mode needs to reverified */
+    MODE_BAD = -2,     /* unspecified reason */
+    MODE_ERROR = -1    /* error condition */
+};
+
+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
+                                   DRM_MODE_TYPE_CRTC_C)
+
+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+       .name = nm, .status = 0, .type = (t), .clock = (c), \
+       .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
+       .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
+       .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
+       .vscan = (vs), .flags = (f), .vrefresh = 0
+
+struct drm_display_mode {
+       /* Header */
+       struct list_head head;
+       char name[DRM_DISPLAY_MODE_LEN];
+       int mode_id;
+       int output_count;
+       enum drm_mode_status status;
+       int type;
+
+       /* Proposed mode values */
+       int clock;
+       int hdisplay;
+       int hsync_start;
+       int hsync_end;
+       int htotal;
+       int hskew;
+       int vdisplay;
+       int vsync_start;
+       int vsync_end;
+       int vtotal;
+       int vscan;
+       unsigned int flags;
+
+       /* Actual mode we give to hw */
+       int clock_index;
+       int synth_clock;
+       int crtc_hdisplay;
+       int crtc_hblank_start;
+       int crtc_hblank_end;
+       int crtc_hsync_start;
+       int crtc_hsync_end;
+       int crtc_htotal;
+       int crtc_hskew;
+       int crtc_vdisplay;
+       int crtc_vblank_start;
+       int crtc_vblank_end;
+       int crtc_vsync_start;
+       int crtc_vsync_end;
+       int crtc_vtotal;
+       int crtc_hadjusted;
+       int crtc_vadjusted;
+
+       /* Driver private mode info */
+       int private_size;
+       int *private;
+       int private_flags;
+
+       int vrefresh;
+       float hsync;
+};
+
+/* Video mode flags */
+#define V_PHSYNC       (1<<0)
+#define V_NHSYNC       (1<<1)
+#define V_PVSYNC       (1<<2)
+#define V_NVSYNC       (1<<3)
+#define V_INTERLACE    (1<<4)
+#define V_DBLSCAN      (1<<5)
+#define V_CSYNC                (1<<6)
+#define V_PCSYNC       (1<<7)
+#define V_NCSYNC       (1<<8)
+#define V_HSKEW                (1<<9) /* hskew provided */
+#define V_BCAST                (1<<10)
+#define V_PIXMUX       (1<<11)
+#define V_DBLCLK       (1<<12)
+#define V_CLKDIV2      (1<<13)
+
+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+#define DPMSModeOn 0
+#define DPMSModeStandby 1
+#define DPMSModeSuspend 2
+#define DPMSModeOff 3
+
+enum drm_output_status {
+       output_status_connected = 1,
+       output_status_disconnected = 2,
+       output_status_unknown = 3,
+};
+
+enum subpixel_order {
+       SubPixelUnknown = 0,
+       SubPixelHorizontalRGB,
+       SubPixelHorizontalBGR,
+       SubPixelVerticalRGB,
+       SubPixelVerticalBGR,
+       SubPixelNone,
+};
+
+/*
+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
+ */
+struct drm_display_info {
+       char name[DRM_DISPLAY_INFO_LEN];
+       /* Input info */
+       bool serration_vsync;
+       bool sync_on_green;
+       bool composite_sync;
+       bool separate_syncs;
+       bool blank_to_black;
+       unsigned char video_level;
+       bool digital;
+       /* Physical size */
+        unsigned int width_mm;
+       unsigned int height_mm;
+
+       /* Display parameters */
+       unsigned char gamma; /* FIXME: storage format */
+       bool gtf_supported;
+       bool standard_color;
+       enum {
+               monochrome,
+               rgb,
+               other,
+               unknown,
+       } display_type;
+       bool active_off_supported;
+       bool suspend_supported;
+       bool standby_supported;
+
+       /* Color info FIXME: storage format */
+       unsigned short redx, redy;
+       unsigned short greenx, greeny;
+       unsigned short bluex, bluey;
+       unsigned short whitex, whitey;
+
+       /* Clock limits FIXME: storage format */
+       unsigned int min_vfreq, max_vfreq;
+       unsigned int min_hfreq, max_hfreq;
+       unsigned int pixel_clock;
+
+       /* White point indices FIXME: storage format */
+       unsigned int wpx1, wpy1;
+       unsigned int wpgamma1;
+       unsigned int wpx2, wpy2;
+       unsigned int wpgamma2;
+
+       /* Preferred mode (if any) */
+       struct drm_display_mode *preferred_mode;
+       char *raw_edid; /* if any */
+};
+
+struct drm_framebuffer {
+       struct drm_device *dev;
+       struct list_head head;
+       int id; /* idr assigned */
+       unsigned int pitch;
+       unsigned long offset;
+       unsigned int width;
+       unsigned int height;
+       /* depth can be 15 or 16 */
+       unsigned int depth;
+       int bits_per_pixel;
+       int flags;
+       struct drm_buffer_object *bo;
+       void *fbdev;
+       u32 pseudo_palette[16];
+       struct drm_bo_kmap_obj kmap;
+       struct list_head filp_head;
+};
+
+struct drm_property_enum {
+       struct list_head head;
+       uint32_t value;
+       unsigned char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_property {
+       struct list_head head;
+       int id; /* idr assigned */
+       uint32_t flags;
+       char name[DRM_PROP_NAME_LEN];
+       uint32_t num_values;
+       uint32_t *values;
+
+       struct list_head enum_list;
+};
+
+struct drm_crtc;
+struct drm_output;
+
+/**
+ * drm_crtc_funcs - control CRTCs for a given device
+ * @dpms: control display power levels
+ * @save: save CRTC state
+ * @resore: restore CRTC state
+ * @lock: lock the CRTC
+ * @unlock: unlock the CRTC
+ * @shadow_allocate: allocate shadow pixmap
+ * @shadow_create: create shadow pixmap for rotation support
+ * @shadow_destroy: free shadow pixmap
+ * @mode_fixup: fixup proposed mode
+ * @mode_set: set the desired mode on the CRTC
+ * @gamma_set: specify color ramp for CRTC
+ * @cleanup: cleanup driver private state prior to close
+ *
+ * The drm_crtc_funcs structure is the central CRTC management structure
+ * in the DRM.  Each CRTC controls one or more outputs (note that the name
+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
+ * outputs, not just CRTs).
+ *
+ * Each driver is responsible for filling out this structure at startup time,
+ * in addition to providing other modesetting features, like i2c and DDC
+ * bus accessors.
+ */
+struct drm_crtc_funcs {
+       /*
+        * Control power levels on the CRTC.  If the mode passed in is
+        * unsupported, the provider must use the next lowest power level.
+        */
+       void (*dpms)(struct drm_crtc *crtc, int mode);
+
+       /* JJJ:  Are these needed? */
+       /* Save CRTC state */
+       void (*save)(struct drm_crtc *crtc); /* suspend? */
+       /* Restore CRTC state */
+       void (*restore)(struct drm_crtc *crtc); /* resume? */
+       bool (*lock)(struct drm_crtc *crtc);
+       void (*unlock)(struct drm_crtc *crtc);
+
+       void (*prepare)(struct drm_crtc *crtc);
+       void (*commit)(struct drm_crtc *crtc);
+
+       /* Provider can fixup or change mode timings before modeset occurs */
+       bool (*mode_fixup)(struct drm_crtc *crtc,
+                          struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+       /* Actually set the mode */
+       void (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode, int x, int y);
+       /* Set gamma on the CRTC */
+       void (*gamma_set)(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+                         int regno);
+       /* Driver cleanup routine */
+       void (*cleanup)(struct drm_crtc *crtc);
+};
+
+/**
+ * drm_crtc - central CRTC control structure
+ * @enabled: is this CRTC enabled?
+ * @x: x position on screen
+ * @y: y position on screen
+ * @desired_mode: new desired mode
+ * @desired_x: desired x for desired_mode
+ * @desired_y: desired y for desired_mode
+ * @funcs: CRTC control functions
+ * @driver_private: arbitrary driver data
+ *
+ * Each CRTC may have one or more outputs associated with it.  This structure
+ * allows the CRTC to be controlled.
+ */
+struct drm_crtc {
+       struct drm_device *dev;
+       struct list_head head;
+
+       int id; /* idr assigned */
+
+       /* framebuffer the output is currently bound to */
+       struct drm_framebuffer *fb;
+
+       bool enabled;
+
+       /* JJJ: are these needed? */
+       bool cursor_in_range;
+       bool cursor_shown;
+
+       struct drm_display_mode mode;
+
+       int x, y;
+       struct drm_display_mode *desired_mode;
+       int desired_x, desired_y;
+       const struct drm_crtc_funcs *funcs;
+       void *driver_private;
+
+       /* RRCrtcPtr randr_crtc? */
+};
+
+extern struct drm_crtc *drm_crtc_create(struct drm_device *dev,
+                                       const struct drm_crtc_funcs *funcs);
+
+/**
+ * drm_output_funcs - control outputs on a given device
+ * @init: setup this output
+ * @dpms: set power state (see drm_crtc_funcs above)
+ * @save: save output state
+ * @restore: restore output state
+ * @mode_valid: is this mode valid on the given output?
+ * @mode_fixup: try to fixup proposed mode for this output
+ * @mode_set: set this mode
+ * @detect: is this output active?
+ * @get_modes: get mode list for this output
+ * @set_property: property for this output may need update
+ * @cleanup: output is going away, cleanup
+ *
+ * Each CRTC may have one or more outputs attached to it.  The functions
+ * below allow the core DRM code to control outputs, enumerate available modes,
+ * etc.
+ */
+struct drm_output_funcs {
+       void (*init)(struct drm_output *output);
+       void (*dpms)(struct drm_output *output, int mode);
+       void (*save)(struct drm_output *output);
+       void (*restore)(struct drm_output *output);
+       int (*mode_valid)(struct drm_output *output,
+                         struct drm_display_mode *mode);
+       bool (*mode_fixup)(struct drm_output *output,
+                          struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+       void (*prepare)(struct drm_output *output);
+       void (*commit)(struct drm_output *output);
+       void (*mode_set)(struct drm_output *output,
+                        struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode);
+       enum drm_output_status (*detect)(struct drm_output *output);
+       int (*get_modes)(struct drm_output *output);
+       /* JJJ: type checking for properties via property value type */
+       bool (*set_property)(struct drm_output *output, int prop, void *val);
+       void (*cleanup)(struct drm_output *output);
+};
+
+#define DRM_OUTPUT_MAX_UMODES 16
+#define DRM_OUTPUT_MAX_PROPERTY 16
+#define DRM_OUTPUT_LEN 32
+/**
+ * drm_output - central DRM output control structure
+ * @crtc: CRTC this output is currently connected to, NULL if none
+ * @possible_crtcs: bitmap of CRTCS this output could be attached to
+ * @possible_clones: bitmap of possible outputs this output could clone
+ * @interlace_allowed: can this output handle interlaced modes?
+ * @doublescan_allowed: can this output handle doublescan?
+ * @available_modes: modes available on this output (from get_modes() + user)
+ * @initial_x: initial x position for this output
+ * @initial_y: initial y position for this output
+ * @status: output connected?
+ * @subpixel_order: for this output
+ * @mm_width: displayable width of output in mm
+ * @mm_height: displayable height of output in mm
+ * @name: name of output (should be one of a few standard names)
+ * @funcs: output control functions
+ * @driver_private: private driver data
+ *
+ * Each output may be connected to one or more CRTCs, or may be clonable by
+ * another output if they can share a CRTC.  Each output also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_output {
+       struct drm_device *dev;
+       struct list_head head;
+       struct drm_crtc *crtc;
+       int id; /* idr assigned */
+       unsigned long possible_crtcs;
+       unsigned long possible_clones;
+       bool interlace_allowed;
+       bool doublescan_allowed;
+       struct list_head modes; /* list of modes on this output */
+
+       /*
+         OptionInfoPtr options;
+         XF86ConfMonitorPtr conf_monitor;
+        */
+       int initial_x, initial_y;
+       enum drm_output_status status;
+
+       /* these are modes added by probing with DDC or the BIOS */
+       struct list_head probed_modes;
+       
+       /* xf86MonPtr MonInfo; */
+       enum subpixel_order subpixel_order;
+       int mm_width, mm_height;
+       struct drm_display_info *monitor_info; /* if any */
+       char name[DRM_OUTPUT_LEN];
+       const struct drm_output_funcs *funcs;
+       void *driver_private;
+
+       u32 user_mode_ids[DRM_OUTPUT_MAX_UMODES];
+
+       u32 property_ids[DRM_OUTPUT_MAX_PROPERTY];
+       u32 property_values[DRM_OUTPUT_MAX_PROPERTY];
+};
+
+/**
+ * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
+ * @resize: adjust CRTCs as necessary for the proposed layout
+ *
+ * Currently only a resize hook is available.  DRM will call back into the
+ * driver with a new screen width and height.  If the driver can't support
+ * the proposed size, it can return false.  Otherwise it should adjust
+ * the CRTC<->output mappings as needed and update its view of the screen.
+ */
+struct drm_mode_config_funcs {
+       bool (*resize)(struct drm_device *dev, int width, int height);
+};
+
+/**
+ * drm_mode_config - Mode configuration control structure
+ *
+ */
+struct drm_mode_config {
+       struct mutex mutex; /* protects configuration and IDR */
+       struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, output, modes - just makes life easier */
+       /* this is limited to one for now */
+       int num_fb;
+       struct list_head fb_list;
+       int num_output;
+       struct list_head output_list;
+
+       /* int compat_output? */
+       int num_crtc;
+       struct list_head crtc_list;
+
+       struct list_head usermode_list;
+
+       struct list_head property_list;
+
+       int min_width, min_height;
+       int max_width, max_height;
+       /* DamagePtr rotationDamage? */
+       /* DGA stuff? */
+       struct drm_mode_config_funcs *funcs;
+       unsigned long fb_base;
+};
+
+struct drm_output *drm_output_create(struct drm_device *dev,
+                                    const struct drm_output_funcs *funcs,
+                                    const char *name);
+extern void drm_output_destroy(struct drm_output *output);
+extern bool drm_output_rename(struct drm_output *output, const char *name);
+extern void drm_fb_release(struct file *filp);
+
+extern struct edid *drm_get_edid(struct drm_output *output,
+                                struct i2c_adapter *adapter);
+extern int drm_add_edid_modes(struct drm_output *output, struct edid *edid);
+extern void drm_mode_probed_add(struct drm_output *output, struct drm_display_mode *mode);
+extern void drm_mode_remove(struct drm_output *output, struct drm_display_mode *mode);
+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+                                                  struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(struct drm_device *dev,
+                                        struct drm_display_mode *mode);
+extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_cleanup(struct drm_device *dev);
+extern void drm_mode_set_name(struct drm_display_mode *mode);
+extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
+extern void drm_disable_unused_functions(struct drm_device *dev);
+
+extern void drm_mode_addmode(struct drm_device *dev, struct drm_display_mode *user_mode);
+extern int drm_mode_rmmode(struct drm_device *dev, struct drm_display_mode *mode);
+
+/* for us by fb module */
+extern int drm_mode_attachmode_crtc(struct drm_device *dev,
+                                   struct drm_crtc *crtc,
+                                   struct drm_display_mode *mode);
+extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
+
+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+extern void drm_mode_list_concat(struct list_head *head,
+                                struct list_head *new);
+extern void drm_mode_validate_size(struct drm_device *dev,
+                                  struct list_head *mode_list,
+                                  int maxX, int maxY, int maxPitch);
+extern void drm_mode_prune_invalid(struct drm_device *dev,
+                                  struct list_head *mode_list, bool verbose);
+extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_vrefresh(struct drm_display_mode *mode);
+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+                                 int adjust_flags);
+extern void drm_mode_output_list_update(struct drm_output *output);
+
+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
+extern bool drm_initial_config(struct drm_device *dev, bool cangrow);
+extern void drm_framebuffer_set_object(struct drm_device *dev,
+                                      unsigned long handle);
+extern struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev);
+extern void drm_framebuffer_destroy(struct drm_framebuffer *fb);
+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
+extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                      int x, int y);
+
+extern int drm_output_attach_property(struct drm_output *output,
+                                     struct drm_property *property, int init_val);
+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                               const char *name, int num_values);
+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+extern int drm_property_add_enum(struct drm_property *property, int index, 
+                                uint32_t value, const char *name);
+
+/* IOCTLs */
+extern int drm_mode_getresources(struct drm_device *dev,
+                                void *data, struct drm_file *file_priv);
+
+extern int drm_mode_getcrtc(struct drm_device *dev,
+                           void *data, struct drm_file *file_priv);
+extern int drm_mode_getoutput(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv);
+extern int drm_mode_setcrtc(struct drm_device *dev,
+                           void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+extern int drm_mode_rmfb(struct drm_device *dev,
+                        void *data, struct drm_file *file_priv);
+extern int drm_mode_getfb(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+extern int drm_mode_addmode_ioctl(struct drm_device *dev,
+                                 void *data, struct drm_file *file_priv);
+extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
+                                void *data, struct drm_file *file_priv);
+extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
+                                    void *data, struct drm_file *file_priv);
+extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
+                                    void *data, struct drm_file *file_priv);
+
+extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                                     void *data, struct drm_file *file_priv);
+#endif /* __DRM_CRTC_H__ */
+
diff --git a/psb-kernel-source-4.41.1/drm_dma.c b/psb-kernel-source-4.41.1/drm_dma.c
new file mode 100644 (file)
index 0000000..f7bff0a
--- /dev/null
@@ -0,0 +1,179 @@
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
+ */
+int drm_dma_setup(struct drm_device *dev)
+{
+       int i;
+
+       dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
+       if (!dev->dma)
+               return -ENOMEM;
+
+       memset(dev->dma, 0, sizeof(*dev->dma));
+
+       for (i = 0; i <= DRM_MAX_ORDER; i++)
+               memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+
+       return 0;
+}
+
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_dma_takedown(struct drm_device *dev)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int i, j;
+
+       if (!dma)
+               return;
+
+       /* Clear dma buffers */
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].seg_count) {
+                       DRM_DEBUG("order %d: buf_count = %d,"
+                                 " seg_count = %d\n",
+                                 i,
+                                 dma->bufs[i].buf_count,
+                                 dma->bufs[i].seg_count);
+                       for (j = 0; j < dma->bufs[i].seg_count; j++) {
+                               if (dma->bufs[i].seglist[j]) {
+                                       drm_pci_free(dev, dma->bufs[i].seglist[j]);
+                               }
+                       }
+                       drm_free(dma->bufs[i].seglist,
+                                dma->bufs[i].seg_count
+                                * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
+               }
+               if (dma->bufs[i].buf_count) {
+                       for (j = 0; j < dma->bufs[i].buf_count; j++) {
+                               if (dma->bufs[i].buflist[j].dev_private) {
+                                       drm_free(dma->bufs[i].buflist[j].
+                                                dev_private,
+                                                dma->bufs[i].buflist[j].
+                                                dev_priv_size, DRM_MEM_BUFS);
+                               }
+                       }
+                       drm_free(dma->bufs[i].buflist,
+                                dma->bufs[i].buf_count *
+                                sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
+               }
+       }
+
+       if (dma->buflist) {
+               drm_free(dma->buflist,
+                        dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS);
+       }
+
+       if (dma->pagelist) {
+               drm_free(dma->pagelist,
+                        dma->page_count * sizeof(*dma->pagelist),
+                        DRM_MEM_PAGES);
+       }
+       drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
+       dev->dma = NULL;
+}
+
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+void drm_free_buffer(struct drm_device *dev, struct drm_buf *buf)
+{
+       if (!buf)
+               return;
+
+       buf->waiting = 0;
+       buf->pending = 0;
+       buf->file_priv = NULL;
+       buf->used = 0;
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
+           && waitqueue_active(&buf->dma_wait)) {
+               wake_up_interruptible(&buf->dma_wait);
+       }
+}
+
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+                             struct drm_file *file_priv)
+{
+       struct drm_device_dma *dma = dev->dma;
+       int i;
+
+       if (!dma)
+               return;
+       for (i = 0; i < dma->buf_count; i++) {
+               if (dma->buflist[i]->file_priv == file_priv) {
+                       switch (dma->buflist[i]->list) {
+                       case DRM_LIST_NONE:
+                               drm_free_buffer(dev, dma->buflist[i]);
+                               break;
+                       case DRM_LIST_WAIT:
+                               dma->buflist[i]->list = DRM_LIST_RECLAIM;
+                               break;
+                       default:
+                               /* Buffer already on hardware. */
+                               break;
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL(drm_core_reclaim_buffers);
diff --git a/psb-kernel-source-4.41.1/drm_drawable.c b/psb-kernel-source-4.41.1/drm_drawable.c
new file mode 100644 (file)
index 0000000..1839c57
--- /dev/null
@@ -0,0 +1,192 @@
+/**
+ * \file drm_drawable.c
+ * IOCTLs for drawables
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ * \author Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Allocate drawable ID and memory to store information about it.
+ */
+int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       unsigned long irqflags;
+       struct drm_draw *draw = data;
+       int new_id = 0;
+       int ret;
+
+again:
+       if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory expanding drawable idr\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&dev->drw_lock, irqflags);
+       ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
+       if (ret == -EAGAIN) {
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+               goto again;
+       }
+
+       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+       draw->handle = new_id;
+
+       DRM_DEBUG("%d\n", draw->handle);
+
+       return 0;
+}
+
+/**
+ * Free drawable ID and memory to store information about it.
+ */
+int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_draw *draw = data;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+       drm_free(drm_get_drawable_info(dev, draw->handle),
+                sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
+
+       idr_remove(&dev->drw_idr, draw->handle);
+
+       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+       DRM_DEBUG("%d\n", draw->handle);
+       return 0;
+}
+
+int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_update_draw *update = data;
+       unsigned long irqflags;
+       struct drm_clip_rect *rects;
+       struct drm_drawable_info *info;
+       int err;
+
+       info = idr_find(&dev->drw_idr, update->handle);
+       if (!info) {
+               info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
+               if (!info)
+                       return -ENOMEM;
+               if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
+                       DRM_ERROR("No such drawable %d\n", update->handle);
+                       drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+                       return -EINVAL;
+               }
+       }
+
+       switch (update->type) {
+       case DRM_DRAWABLE_CLIPRECTS:
+               if (update->num != info->num_rects) {
+                       rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
+                                        DRM_MEM_BUFS);
+               } else
+                       rects = info->rects;
+
+               if (update->num && !rects) {
+                       DRM_ERROR("Failed to allocate cliprect memory\n");
+                       err = -ENOMEM;
+                       goto error;
+               }
+
+               if (update->num && DRM_COPY_FROM_USER(rects,
+                                                    (struct drm_clip_rect __user *)
+                                                    (unsigned long)update->data,
+                                                    update->num *
+                                                    sizeof(*rects))) {
+                       DRM_ERROR("Failed to copy cliprects from userspace\n");
+                       err = -EFAULT;
+                       goto error;
+               }
+
+               spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+               if (rects != info->rects) {
+                       drm_free(info->rects, info->num_rects *
+                                sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
+               }
+
+               info->rects = rects;
+               info->num_rects = update->num;
+
+               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+               DRM_DEBUG("Updated %d cliprects for drawable %d\n",
+                         info->num_rects, update->handle);
+               break;
+       default:
+               DRM_ERROR("Invalid update type %d\n", update->type);
+               return -EINVAL;
+       }
+
+       return 0;
+
+error:
+       if (rects != info->rects)
+               drm_free(rects, update->num * sizeof(struct drm_clip_rect),
+                        DRM_MEM_BUFS);
+
+       return err;
+}
+
+/**
+ * Caller must hold the drawable spinlock!
+ */
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
+{
+       return idr_find(&dev->drw_idr, id);
+}
+EXPORT_SYMBOL(drm_get_drawable_info);
+
+static int drm_drawable_free(int idr, void *p, void *data)
+{
+       struct drm_drawable_info *info = p;
+
+       if (info) {
+               drm_free(info->rects, info->num_rects *
+                        sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
+               drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+       }
+
+       return 0;
+}
+
+void drm_drawable_free_all(struct drm_device *dev)
+{
+       idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
+       idr_remove_all(&dev->drw_idr);
+}
diff --git a/psb-kernel-source-4.41.1/drm_drv.c b/psb-kernel-source-4.41.1/drm_drv.c
new file mode 100644 (file)
index 0000000..b34f2e5
--- /dev/null
@@ -0,0 +1,695 @@
+/**
+ * \file drm_drv.c
+ * Generic driver template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * \code
+ * #define DRIVER_AUTHOR       "VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME         "mga"
+ * #define DRIVER_DESC         "Matrox G200/G400"
+ * #define DRIVER_DATE         "20001127"
+ *
+ * #define drm_x               mga_##x
+ * \endcode
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "drmP.h"
+#include "drm_core.h"
+
+static void drm_cleanup(struct drm_device * dev);
+int drm_fb_loaded = 0;
+
+static int drm_version(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+
+/** Ioctl table */
+static struct drm_ioctl_desc drm_ioctls[] = {
+       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+       /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+       DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETOUTPUT, drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDMODE, drm_mode_addmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMMODE, drm_mode_rmmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
+                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
+                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
+                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
+                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
+};
+
+#define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
+
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+       struct drm_magic_entry *pt, *next;
+       struct drm_map_list *r_list, *list_t;
+       struct drm_vma_entry *vma, *vma_temp;
+       int i;
+
+       DRM_DEBUG("\n");
+
+       if (dev->driver->lastclose)
+               dev->driver->lastclose(dev);
+       DRM_DEBUG("driver lastclose completed\n");
+
+       if (dev->unique) {
+               drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
+               dev->unique = NULL;
+               dev->unique_len = 0;
+       }
+
+       if (dev->irq_enabled)
+               drm_irq_uninstall(dev);
+
+       /* Free drawable information memory */
+       mutex_lock(&dev->struct_mutex);
+
+       drm_drawable_free_all(dev);
+       del_timer(&dev->timer);
+
+       if (dev->unique) {
+               drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
+               dev->unique = NULL;
+               dev->unique_len = 0;
+       }
+
+       if (dev->magicfree.next) {
+               list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
+                       list_del(&pt->head);
+                       drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
+                       drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+               }
+               drm_ht_remove(&dev->magiclist);
+       }
+
+
+       /* Clear AGP information */
+       if (drm_core_has_AGP(dev) && dev->agp) {
+               struct drm_agp_mem *entry, *tempe;
+
+               /* Remove AGP resources, but leave dev->agp
+                  intact until drv_cleanup is called. */
+               list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+                       if (entry->bound)
+                               drm_unbind_agp(entry->memory);
+                       drm_free_agp(entry->memory, entry->pages);
+                       drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+               }
+               INIT_LIST_HEAD(&dev->agp->memory);
+
+               if (dev->agp->acquired)
+                       drm_agp_release(dev);
+
+               dev->agp->acquired = 0;
+               dev->agp->enabled = 0;
+       }
+       if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
+               drm_sg_cleanup(dev->sg);
+               dev->sg = NULL;
+       }
+
+       /* Clear vma list (only built for debugging) */
+       list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+               list_del(&vma->head);
+               drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+       }
+
+       list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+               if (!(r_list->map->flags & _DRM_DRIVER)) {
+                       drm_rmmap_locked(dev, r_list->map);
+                       r_list = NULL;
+               }
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
+               for (i = 0; i < dev->queue_count; i++) {
+
+                       if (dev->queuelist[i]) {
+                               drm_free(dev->queuelist[i],
+                                        sizeof(*dev->queuelist[0]),
+                                        DRM_MEM_QUEUES);
+                               dev->queuelist[i] = NULL;
+                       }
+               }
+               drm_free(dev->queuelist,
+                        dev->queue_slots * sizeof(*dev->queuelist),
+                        DRM_MEM_QUEUES);
+               dev->queuelist = NULL;
+       }
+       dev->queue_count = 0;
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               drm_dma_takedown(dev);
+
+       if (dev->lock.hw_lock) {
+               dev->sigdata.lock = dev->lock.hw_lock = NULL;   /* SHM removed */
+               dev->lock.file_priv = NULL;
+               wake_up_interruptible(&dev->lock.lock_queue);
+       }
+       dev->dev_mapping = NULL;
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("lastclose completed\n");
+       return 0;
+}
+
+void drm_cleanup_pci(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       pci_set_drvdata(pdev, NULL);
+       pci_release_regions(pdev);
+       if (dev)
+               drm_cleanup(dev);
+}
+EXPORT_SYMBOL(drm_cleanup_pci);
+
+/**
+ * Module initialization. Called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes an array of drm_device structures, and attempts to
+ * initialize all available devices, using consecutive minors, registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_init(struct drm_driver *driver,
+                      struct pci_device_id *pciidlist)
+{
+       struct pci_dev *pdev;
+       struct pci_device_id *pid;
+       int rc, i;
+
+       DRM_DEBUG("\n");
+
+       for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
+               pid = &pciidlist[i];
+
+               pdev = NULL;
+               /* pass back in pdev to account for multiple identical cards */
+               while ((pdev =
+                       pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+                                      pid->subdevice, pdev))) {
+                       /* Are there device class requirements? */
+                       if ((pid->class != 0)
+                               && ((pdev->class & pid->class_mask) != pid->class)) {
+                               continue;
+                       }
+                       /* is there already a driver loaded, or (short circuit saves work) */
+                       /* does something like VesaFB have control of the memory region? */
+                       if (pci_dev_driver(pdev)
+                           || pci_request_regions(pdev, "DRM scan")) {
+                               /* go into stealth mode */
+                               drm_fb_loaded = 1;
+                               pci_dev_put(pdev);
+                               break;
+                       }
+                       /* no fbdev or vesadev, put things back and wait for normal probe */
+                       pci_release_regions(pdev);
+               }
+       }
+
+       if (!drm_fb_loaded)
+               return pci_register_driver(&driver->pci_driver);
+       else {
+               for (i = 0; pciidlist[i].vendor != 0; i++) {
+                       pid = &pciidlist[i];
+
+                       pdev = NULL;
+                       /* pass back in pdev to account for multiple identical cards */
+                       while ((pdev =
+                               pci_get_subsys(pid->vendor, pid->device,
+                                              pid->subvendor, pid->subdevice,
+                                              pdev))) {
+                               /* Are there device class requirements? */
+                               if ((pid->class != 0)
+                                       && ((pdev->class & pid->class_mask) != pid->class)) {
+                                       continue;
+                               }
+                               /* stealth mode requires a manual probe */
+                               pci_dev_get(pdev);
+                               if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
+                                       pci_dev_put(pdev);
+                                       return rc;
+                               }
+                       }
+               }
+               DRM_INFO("Used old pci detect: framebuffer loaded\n");
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_init);
+
+/**
+ * Called via cleanup_module() at module unload time.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ *
+ * \sa drm_init
+ */
+static void drm_cleanup(struct drm_device * dev)
+{
+
+       DRM_DEBUG("\n");
+       if (!dev) {
+               DRM_ERROR("cleanup called no dev\n");
+               return;
+       }
+
+       drm_lastclose(dev);
+       drm_ctxbitmap_cleanup(dev);
+
+       if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
+           && dev->agp->agp_mtrr >= 0) {
+               int retval;
+               retval = mtrr_del(dev->agp->agp_mtrr,
+                                 dev->agp->agp_info.aper_base,
+                                 dev->agp->agp_info.aper_size * 1024 * 1024);
+               DRM_DEBUG("mtrr_del=%d\n", retval);
+       }
+
+       if (dev->driver->unload)
+               dev->driver->unload(dev);
+        
+       drm_bo_driver_finish(dev);
+       drm_fence_manager_takedown(dev);
+
+       drm_ht_remove(&dev->map_hash);
+       drm_mm_takedown(&dev->offset_manager);
+       drm_ht_remove(&dev->object_hash);
+
+       if (drm_core_has_AGP(dev) && dev->agp) {
+               drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+               dev->agp = NULL;
+       }
+
+       if (!drm_fb_loaded)
+               pci_disable_device(dev->pdev);
+
+       drm_put_head(&dev->primary);
+       if (drm_put_dev(dev))
+               DRM_ERROR("Cannot unload module\n");
+}
+
+void drm_exit(struct drm_driver *driver)
+{
+       int i;
+       struct drm_device *dev = NULL;
+       struct drm_head *head;
+
+       DRM_DEBUG("\n");
+       if (drm_fb_loaded) {
+               for (i = 0; i < drm_cards_limit; i++) {
+                       head = drm_heads[i];
+                       if (!head)
+                               continue;
+                       if (!head->dev)
+                               continue;
+                       if (head->dev->driver != driver)
+                               continue;
+                       dev = head->dev;
+                       if (dev) {
+                               /* release the pci driver */
+                               if (dev->pdev)
+                                       pci_dev_put(dev->pdev);
+                               drm_cleanup(dev);
+                       }
+               }
+       } else
+               pci_unregister_driver(&driver->pci_driver);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+       free_nopage_retry();
+#endif
+       DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_exit);
+
+/** File operations structure */
+static const struct file_operations drm_stub_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_stub_open
+};
+
+static int __init drm_core_init(void)
+{
+       int ret;
+       struct sysinfo si;
+       unsigned long avail_memctl_mem;
+       unsigned long max_memctl_mem;
+
+#ifdef USE_PAT_WC
+#warning Init pat
+       drm_init_pat();
+#endif
+       si_meminfo(&si);
+
+       /*
+        * AGP only allows low / DMA32 memory ATM.
+        */
+
+       avail_memctl_mem = si.totalram - si.totalhigh;
+
+       /*
+        * Avoid overflows
+        */
+
+       max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
+       max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
+
+       if (avail_memctl_mem >= max_memctl_mem)
+               avail_memctl_mem = max_memctl_mem;
+
+       drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit);
+
+       ret = -ENOMEM;
+       drm_cards_limit =
+           (drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
+       drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
+       if (!drm_heads)
+               goto err_p1;
+
+       if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+               goto err_p1;
+
+       drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+       if (IS_ERR(drm_class)) {
+               printk(KERN_ERR "DRM: Error creating drm class.\n");
+               ret = PTR_ERR(drm_class);
+               goto err_p2;
+       }
+
+       drm_proc_root = proc_mkdir("dri", NULL);
+       if (!drm_proc_root) {
+               DRM_ERROR("Cannot create /proc/dri\n");
+               ret = -1;
+               goto err_p3;
+       }
+
+       drm_mem_init();
+
+       DRM_INFO("Initialized %s %d.%d.%d %s\n",
+                CORE_NAME,
+                CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+       return 0;
+err_p3:
+       drm_sysfs_destroy();
+err_p2:
+       unregister_chrdev(DRM_MAJOR, "drm");
+       drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
+err_p1:
+       return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+       remove_proc_entry("dri", NULL);
+       drm_sysfs_destroy();
+
+       unregister_chrdev(DRM_MAJOR, "drm");
+
+       drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_version *version = data;
+       int len;
+
+       version->version_major = dev->driver->major;
+       version->version_minor = dev->driver->minor;
+       version->version_patchlevel = dev->driver->patchlevel;
+       DRM_COPY(version->name, dev->driver->name);
+       DRM_COPY(version->date, dev->driver->date);
+       DRM_COPY(version->desc, dev->driver->desc);
+
+       return 0;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ *
+ * Copies data in and out according to the size and direction given in cmd,
+ * which must match the ioctl cmd known by the kernel.  The kernel uses a 512
+ * byte stack buffer to store the ioctl arguments in kernel space.  Should we
+ * ever need much larger ioctl arguments, we may need to allocate memory.
+ */
+int drm_ioctl(struct inode *inode, struct file *filp,
+             unsigned int cmd, unsigned long arg)
+{
+       return drm_unlocked_ioctl(filp, cmd, arg);
+}
+EXPORT_SYMBOL(drm_ioctl);
+
+long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_ioctl_desc *ioctl;
+       drm_ioctl_t *func;
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       int retcode = -EINVAL;
+       char kdata[512];
+
+       atomic_inc(&dev->ioctl_count);
+       atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+       ++file_priv->ioctl_count;
+
+       DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+                 current->pid, cmd, nr, (long)old_encode_dev(file_priv->head->device),
+                 file_priv->authenticated);
+
+       if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+           ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+               goto err_i1;
+       if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+               && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+               ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+       else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
+               ioctl = &drm_ioctls[nr];
+       else {
+               retcode = -EINVAL;
+               goto err_i1;
+       }
+#if 0
+       /*
+        * This check is disabled, because driver private ioctl->cmd
+        * are not the ioctl commands with size and direction bits but
+        * just the indices. The DRM core ioctl->cmd are the proper ioctl
+        * commands. The drivers' ioctl tables need to be fixed.
+        */
+       if (ioctl->cmd != cmd) {
+               retcode = -EINVAL;
+               goto err_i1;
+       }
+#endif
+       func = ioctl->func;
+       /* is there a local override? */
+       if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+               func = dev->driver->dma_ioctl;
+
+       if (cmd & IOC_IN) {
+               if (copy_from_user(kdata, (void __user *)arg,
+                                  _IOC_SIZE(cmd)) != 0) {
+                       retcode = -EACCES;
+                       goto err_i1;
+               }
+       }
+
+       if (!func) {
+               DRM_DEBUG("no function\n");
+               retcode = -EINVAL;
+       } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+                  ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+                  ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
+               retcode = -EACCES;
+       } else {
+               retcode = func(dev, kdata, file_priv);
+       }
+
+       if ((retcode == 0) && (cmd & IOC_OUT)) {
+               if (copy_to_user((void __user *)arg, kdata,
+                                _IOC_SIZE(cmd)) != 0)
+                       retcode = -EACCES;
+       }
+
+err_i1:
+       atomic_dec(&dev->ioctl_count);
+       if (retcode)
+               DRM_DEBUG("ret = %d\n", retcode);
+       return retcode;
+}
+EXPORT_SYMBOL(drm_unlocked_ioctl);
+
+drm_local_map_t *drm_getsarea(struct drm_device *dev)
+{
+       struct drm_map_list *entry;
+
+       list_for_each_entry(entry, &dev->maplist, head) {
+               if (entry->map && entry->map->type == _DRM_SHM &&
+                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+                       return entry->map;
+               }
+       }
+       return NULL;
+}
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/psb-kernel-source-4.41.1/drm_edid.c b/psb-kernel-source-4.41.1/drm_edid.c
new file mode 100644 (file)
index 0000000..6307cdc
--- /dev/null
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ *   Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ */
+#include "drmP.h"
+#include <linux/i2c-algo-bit.h>
+#include "drm_edid.h"
+
+#include <acpi/acpi_drivers.h>
+
+/* Valid EDID header has these bytes */
+static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+int drm_get_acpi_edid(char *method, char *edid, ssize_t length)
+{
+       int status;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+       struct acpi_object_list args = { 1, &arg0 };
+
+       if (length == 128)
+               arg0.integer.value = 1;
+       else if (length == 256)
+               arg0.integer.value = 2;
+       else
+               return -EINVAL;
+
+       status = acpi_evaluate_object(NULL, method, &args, &buffer);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       obj = buffer.pointer;
+
+       if (obj && obj->type == ACPI_TYPE_BUFFER)
+               memcpy(edid, obj->buffer.pointer, obj->buffer.length);
+       else {
+               printk(KERN_ERR PREFIX "Invalid _DDC data\n");
+               status = -EFAULT;
+               kfree(obj);
+       }
+
+       return status;
+}
+EXPORT_SYMBOL(drm_get_acpi_edid);
+
+/**
+ * edid_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity check the EDID block by looking at the header, the version number
+ * and the checksum.  Return 0 if the EDID doesn't check out, or 1 if it's
+ * valid.
+ */
+static bool edid_valid(struct edid *edid)
+{
+       int i;
+       u8 csum = 0;
+       u8 *raw_edid = (u8 *)edid;
+
+       if (memcmp(edid->header, edid_header, sizeof(edid_header)))
+               goto bad;
+       if (edid->version != 1)
+               goto bad;
+       if (edid->revision <= 0 || edid->revision > 3)
+               goto bad;
+
+       for (i = 0; i < EDID_LENGTH; i++)
+               csum += raw_edid[i];
+       if (csum)
+               goto bad;
+
+       return 1;
+
+bad:
+       return 0;
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT.
+ *
+ * Punts for now, but should eventually use the FB layer's CVT based mode
+ * generation code.
+ */
+struct drm_display_mode *drm_mode_std(struct drm_device *dev,
+                                     struct std_timing *t)
+{
+//     struct fb_videomode mode;
+
+//     fb_find_mode_cvt(&mode, 0, 0);
+       /* JJJ:  convert to drm_display_mode */
+       struct drm_display_mode *mode;
+       int hsize = t->hsize * 8 + 248, vsize;
+
+       mode = drm_mode_create(dev);
+       if (!mode)
+               return NULL;
+
+       if (t->aspect_ratio == 0)
+               vsize = (hsize * 10) / 16;
+       else if (t->aspect_ratio == 1)
+               vsize = (hsize * 3) / 4;
+       else if (t->aspect_ratio == 2)
+               vsize = (hsize * 4) / 5;
+       else
+               vsize = (hsize * 9) / 16;
+
+       drm_mode_set_name(mode);
+
+       return mode;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @timing: EDID detailed timing info
+ * @preferred: is this a preferred mode?
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.  The @preferred flag will be set
+ * if this is the display's preferred timing, and we'll use it to indicate
+ * to the other layers that this mode is desired.
+ */
+struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+                                          struct detailed_timing *timing)
+{
+       struct drm_display_mode *mode;
+       struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+
+       if (pt->stereo) {
+               printk(KERN_WARNING "stereo mode not supported\n");
+               return NULL;
+       }
+       if (!pt->separate_sync) {
+               printk(KERN_WARNING "integrated sync not supported\n");
+               return NULL;
+       }
+
+       mode = drm_mode_create(dev);
+       if (!mode)
+               return NULL;
+
+       mode->type = DRM_MODE_TYPE_DRIVER;
+       mode->clock = timing->pixel_clock * 10;
+
+       mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo;
+       mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) |
+                                             pt->hsync_offset_lo);
+       mode->hsync_end = mode->hsync_start +
+               ((pt->hsync_pulse_width_hi << 8) |
+                pt->hsync_pulse_width_lo);
+       mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
+
+       mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
+       mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
+                                             pt->vsync_offset_lo);
+       mode->vsync_end = mode->vsync_start +
+               ((pt->vsync_pulse_width_hi << 8) |
+                pt->vsync_pulse_width_lo);
+       mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
+
+       drm_mode_set_name(mode);
+
+       if (pt->interlaced)
+               mode->flags |= V_INTERLACE;
+
+       mode->flags |= pt->hsync_positive ? V_PHSYNC : V_NHSYNC;
+       mode->flags |= pt->vsync_positive ? V_PVSYNC : V_NVSYNC;
+
+       return mode;
+}
+
+/*
+ * Detailed mode info for the EDID "established modes" data to use.
+ */
+static struct drm_display_mode edid_est_modes[] = {
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+                  968, 1056, 0, 600, 601, 605, 628, 0,
+                  V_PHSYNC | V_PVSYNC) }, /* 800x600@60Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+                  896, 1024, 0, 600, 601, 603,  625, 0,
+                  V_PHSYNC | V_PVSYNC) }, /* 800x600@56Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+                  720, 840, 0, 480, 481, 484, 500, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 640x480@75Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+                  704,  832, 0, 480, 489, 491, 520, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 640x480@72Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+                  768,  864, 0, 480, 483, 486, 525, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 640x480@67Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+                  752, 800, 0, 480, 490, 492, 525, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 640x480@60Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+                  846, 900, 0, 400, 421, 423,  449, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 720x400@88Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+                  846,  900, 0, 400, 412, 414, 449, 0,
+                  V_NHSYNC | V_PVSYNC) }, /* 720x400@70Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+                  1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+                  V_PHSYNC | V_PVSYNC) }, /* 1280x1024@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+                  1136, 1312, 0,  768, 769, 772, 800, 0,
+                  V_PHSYNC | V_PVSYNC) }, /* 1024x768@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+                  1184, 1328, 0,  768, 771, 777, 806, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 1024x768@70Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+                  1184, 1344, 0,  768, 771, 777, 806, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 1024x768@60Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+                  1208, 1264, 0, 768, 768, 776, 817, 0,
+                  V_PHSYNC | V_PVSYNC | V_INTERLACE) }, /* 1024x768@43Hz */
+       { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+                  928, 1152, 0, 624, 625, 628, 667, 0,
+                  V_NHSYNC | V_NVSYNC) }, /* 832x624@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+                  896, 1056, 0, 600, 601, 604,  625, 0,
+                  V_PHSYNC | V_PVSYNC) }, /* 800x600@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+                  976, 1040, 0, 600, 637, 643, 666, 0,
+                  V_PHSYNC | V_PVSYNC) }, /* 800x600@72Hz */
+       { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+                  1344, 1600, 0,  864, 865, 868, 900, 0,
+                  V_PHSYNC | V_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above).  Tease them out and add them to the global modes list.
+ */
+static int add_established_modes(struct drm_output *output, struct edid *edid)
+{
+       struct drm_device *dev = output->dev;
+       unsigned long est_bits = edid->established_timings.t1 |
+               (edid->established_timings.t2 << 8) |
+               ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+       int i, modes = 0;
+
+       for (i = 0; i <= EDID_EST_TIMINGS; i++)
+               if (est_bits & (1<<i)) {
+                       struct drm_display_mode *newmode;
+                       newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+                       drm_mode_probed_add(output, newmode);
+                       modes++;
+               }
+
+       return modes;
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the CVT standard.  Grab them from
+ * @edid, calculate them, and add them to the list.
+ */
+static int add_standard_modes(struct drm_output *output, struct edid *edid)
+{
+       struct drm_device *dev = output->dev;
+       int i, modes = 0;
+
+       for (i = 0; i < EDID_STD_TIMINGS; i++) {
+               struct std_timing *t = &edid->standard_timings[i];
+               struct drm_display_mode *newmode;
+
+               /* If std timings bytes are 1, 1 it's empty */
+               if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1)
+                       continue;
+
+               newmode = drm_mode_std(dev, &edid->standard_timings[i]);
+               drm_mode_probed_add(output, newmode);
+               modes++;
+       }
+
+       return modes;
+}
+
+/**
+ * add_detailed_modes - get detailed mode info from EDID data
+ * @edid: EDID block to scan
+ *
+ * Some of the detailed timing sections may contain mode information.  Grab
+ * it and add it to the list.
+ */
+static int add_detailed_info(struct drm_output *output, struct edid *edid)
+{
+       struct drm_device *dev = output->dev;
+       int i, j, modes = 0;
+
+       for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
+               struct detailed_timing *timing = &edid->detailed_timings[i];
+               struct detailed_non_pixel *data = &timing->data.other_data;
+               struct drm_display_mode *newmode;
+
+               /* EDID up to and including 1.2 may put monitor info here */
+               if (edid->version == 1 && edid->revision < 3)
+                       continue;
+
+               /* Detailed mode timing */
+               if (timing->pixel_clock) {
+                       newmode = drm_mode_detailed(dev, timing);
+                       /* First detailed mode is preferred */
+                       if (i == 0 && edid->preferred_timing)
+                               newmode->type |= DRM_MODE_TYPE_PREFERRED;
+                       drm_mode_probed_add(output, newmode);
+                                    
+                       modes++;
+                       continue;
+               }
+
+               /* Other timing or info */
+               switch (data->type) {
+               case EDID_DETAIL_MONITOR_SERIAL:
+                       break;
+               case EDID_DETAIL_MONITOR_STRING:
+                       break;
+               case EDID_DETAIL_MONITOR_RANGE:
+                       /* Get monitor range data */
+                       break;
+               case EDID_DETAIL_MONITOR_NAME:
+                       break;
+               case EDID_DETAIL_MONITOR_CPDATA:
+                       break;
+               case EDID_DETAIL_STD_MODES:
+                       /* Five modes per detailed section */
+                       for (j = 0; j < 5; i++) {
+                               struct std_timing *std;
+                               struct drm_display_mode *newmode;
+
+                               std = &data->data.timings[j];
+                               newmode = drm_mode_std(dev, std);
+                               drm_mode_probed_add(output, newmode);
+                               modes++;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return modes;
+}
+
+#define DDC_ADDR 0x50
+
+static unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
+{
+       unsigned char start = 0x0;
+       unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = DDC_ADDR,
+                       .flags  = 0,
+                       .len    = 1,
+                       .buf    = &start,
+               }, {
+                       .addr   = DDC_ADDR,
+                       .flags  = I2C_M_RD,
+                       .len    = EDID_LENGTH,
+                       .buf    = buf,
+               }
+       };
+
+       if (!buf) {
+               DRM_ERROR("unable to allocate memory for EDID block.\n");
+               return NULL;
+       }
+
+       if (i2c_transfer(adapter, msgs, 2) == 2)
+               return buf;
+
+       DRM_INFO("unable to read EDID block.\n");
+       kfree(buf);
+       return NULL;
+}
+
+unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
+{
+       struct i2c_algo_bit_data *algo_data = adapter->algo_data;
+       unsigned char *edid = NULL;
+       int i, j;
+
+       /*
+        * Startup the bus:
+        *   Set clock line high (but give it time to come up)
+        *   Then set clock & data low
+        */
+       algo_data->setscl(algo_data->data, 1);
+       udelay(550); /* startup delay */
+       algo_data->setscl(algo_data->data, 0);
+       algo_data->setsda(algo_data->data, 0);
+
+       for (i = 0; i < 3; i++) {
+               /* For some old monitors we need the
+                * following process to initialize/stop DDC
+                */
+               algo_data->setsda(algo_data->data, 0);
+               msleep(13);
+
+               algo_data->setscl(algo_data->data, 1);
+               for (j = 0; j < 5; j++) {
+                       msleep(10);
+                       if (algo_data->getscl(algo_data->data))
+                               break;
+               }
+               if (j == 5)
+                       continue;
+
+               algo_data->setsda(algo_data->data, 0);
+               msleep(15);
+               algo_data->setscl(algo_data->data, 0);
+               msleep(15);
+               algo_data->setsda(algo_data->data, 1);
+               msleep(15);
+
+               /* Do the real work */
+               edid = drm_do_probe_ddc_edid(adapter);
+               algo_data->setsda(algo_data->data, 0);
+               algo_data->setscl(algo_data->data, 0);
+               msleep(15);
+
+               algo_data->setscl(algo_data->data, 1);
+               for (j = 0; j < 10; j++) {
+                       msleep(10);
+                       if (algo_data->getscl(algo_data->data))
+                               break;
+               }
+
+               algo_data->setsda(algo_data->data, 1);
+               msleep(15);
+               algo_data->setscl(algo_data->data, 0);
+               if (edid)
+                       break;
+       }
+       /* Release the DDC lines when done or the Apple Cinema HD display
+        * will switch off
+        */
+       algo_data->setsda(algo_data->data, 0);
+       algo_data->setscl(algo_data->data, 0);
+       algo_data->setscl(algo_data->data, 1);
+
+       return edid;
+}
+EXPORT_SYMBOL(drm_ddc_read);
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @output: output we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given output's i2c channel to grab EDID data if possible.
+ * 
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_output *output,
+                         struct i2c_adapter *adapter)
+{
+       struct edid *edid;
+
+       edid = (struct edid *)drm_ddc_read(adapter);
+       if (!edid) {
+               dev_warn(&output->dev->pdev->dev, "%s: no EDID data\n",
+                        output->name);
+               return NULL;
+       }
+       if (!edid_valid(edid)) {
+               dev_warn(&output->dev->pdev->dev, "%s: EDID invalid.\n",
+                        output->name);
+               kfree(edid);
+               return NULL;
+       }
+       return edid;
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @output: output we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the output's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_output *output, struct edid *edid)
+{
+       int num_modes = 0;
+
+       if (edid == NULL) {
+               return 0;
+       }
+       if (!edid_valid(edid)) {
+               dev_warn(&output->dev->pdev->dev, "%s: EDID invalid.\n",
+                        output->name);
+               return 0;
+       }
+       num_modes += add_established_modes(output, edid);
+       num_modes += add_standard_modes(output, edid);
+       num_modes += add_detailed_info(output, edid);
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/psb-kernel-source-4.41.1/drm_edid.h b/psb-kernel-source-4.41.1/drm_edid.h
new file mode 100644 (file)
index 0000000..9d18bf0
--- /dev/null
@@ -0,0 +1,179 @@
+#ifndef __DRM_EDID_H__
+#define __DRM_EDID_H__
+
+#include <linux/types.h>
+
+#define EDID_LENGTH 128
+#define DDC_ADDR 0x50
+
+#ifdef BIG_ENDIAN
+#error "EDID structure is little endian, need big endian versions"
+#endif
+
+struct est_timings {
+       u8 t1;
+       u8 t2;
+       u8 mfg_rsvd;
+} __attribute__((packed));
+
+struct std_timing {
+       u8 hsize; /* need to multiply by 8 then add 248 */
+       u8 vfreq:6; /* need to add 60 */
+       u8 aspect_ratio:2; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
+} __attribute__((packed));
+
+/* If detailed data is pixel timing */
+struct detailed_pixel_timing {
+       u8 hactive_lo;
+       u8 hblank_lo;
+       u8 hblank_hi:4;
+       u8 hactive_hi:4;
+       u8 vactive_lo;
+       u8 vblank_lo;
+       u8 vblank_hi:4;
+       u8 vactive_hi:4;
+       u8 hsync_offset_lo;
+       u8 hsync_pulse_width_lo;
+       u8 vsync_pulse_width_lo:4;
+       u8 vsync_offset_lo:4;
+       u8 hsync_pulse_width_hi:2;
+       u8 hsync_offset_hi:2;
+       u8 vsync_pulse_width_hi:2;
+       u8 vsync_offset_hi:2;
+       u8 width_mm_lo;
+       u8 height_mm_lo;
+       u8 height_mm_hi:4;
+       u8 width_mm_hi:4;
+       u8 hborder;
+       u8 vborder;
+       u8 unknown0:1;
+       u8 vsync_positive:1;
+       u8 hsync_positive:1;
+       u8 separate_sync:2;
+       u8 stereo:1;
+       u8 unknown6:1;
+       u8 interlaced:1;
+} __attribute__((packed));
+
+/* If it's not pixel timing, it'll be one of the below */
+struct detailed_data_string {
+       u8 str[13];
+} __attribute__((packed));
+
+struct detailed_data_monitor_range {
+       u8 min_vfreq;
+       u8 max_vfreq;
+       u8 min_hfreq_khz;
+       u8 max_hfreq_khz;
+       u8 pixel_clock_mhz; /* need to multiply by 10 */
+       u16 sec_gtf_toggle; /* A000=use above, 20=use below */ /* FIXME: byte order */
+       u8 hfreq_start_khz; /* need to multiply by 2 */
+       u8 c; /* need to divide by 2 */
+       u16 m; /* FIXME: byte order */
+       u8 k;
+       u8 j; /* need to divide by 2 */
+} __attribute__((packed));
+
+struct detailed_data_wpindex {
+       u8 white_y_lo:2;
+       u8 white_x_lo:2;
+       u8 pad:4;
+       u8 white_x_hi;
+       u8 white_y_hi;
+       u8 gamma; /* need to divide by 100 then add 1 */
+} __attribute__((packed));
+
+struct detailed_data_color_point {
+       u8 windex1;
+       u8 wpindex1[3];
+       u8 windex2;
+       u8 wpindex2[3];
+} __attribute__((packed));
+
+struct detailed_non_pixel {
+       u8 pad1;
+       u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
+                   fb=color point data, fa=standard timing data,
+                   f9=undefined, f8=mfg. reserved */
+       u8 pad2;
+       union {
+               struct detailed_data_string str;
+               struct detailed_data_monitor_range range;
+               struct detailed_data_wpindex color;
+               struct std_timing timings[5];
+       } data;
+} __attribute__((packed));
+
+#define EDID_DETAIL_STD_MODES 0xfa
+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
+#define EDID_DETAIL_MONITOR_NAME 0xfc
+#define EDID_DETAIL_MONITOR_RANGE 0xfd
+#define EDID_DETAIL_MONITOR_STRING 0xfe
+#define EDID_DETAIL_MONITOR_SERIAL 0xff
+
+struct detailed_timing {
+       u16 pixel_clock; /* need to multiply by 10 KHz */ /* FIXME: byte order */
+       union {
+               struct detailed_pixel_timing pixel_data;
+               struct detailed_non_pixel other_data;
+       } data;
+} __attribute__((packed));
+
+struct edid {
+       u8 header[8];
+       /* Vendor & product info */
+       u16 mfg_id; /* FIXME: byte order */
+       u16 prod_code; /* FIXME: byte order */
+       u32 serial; /* FIXME: byte order */
+       u8 mfg_week;
+       u8 mfg_year;
+       /* EDID version */
+       u8 version;
+       u8 revision;
+       /* Display info: */
+       /*   input definition */
+       u8 serration_vsync:1;
+       u8 sync_on_green:1;
+       u8 composite_sync:1;
+       u8 separate_syncs:1;
+       u8 blank_to_black:1;
+       u8 video_level:2;
+       u8 digital:1; /* bits below must be zero if set */
+       u8 width_cm;
+       u8 height_cm;
+       u8 gamma;
+       /*   feature support */
+       u8 default_gtf:1;
+       u8 preferred_timing:1;
+       u8 standard_color:1;
+       u8 display_type:2; /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
+       u8 pm_active_off:1;
+       u8 pm_suspend:1;
+       u8 pm_standby:1;
+       /* Color characteristics */
+       u8 red_green_lo;
+       u8 black_white_lo;
+       u8 red_x;
+       u8 red_y;
+       u8 green_x;
+       u8 green_y;
+       u8 blue_x;
+       u8 blue_y;
+       u8 white_x;
+       u8 white_y;
+       /* Est. timings and mfg rsvd timings*/
+       struct est_timings established_timings;
+       /* Standard timings 1-8*/
+       struct std_timing standard_timings[8];
+       /* Detailing timings 1-4 */
+       struct detailed_timing detailed_timings[4];
+       /* Number of 128 byte ext. blocks */
+       u8 extensions;
+       /* Checksum */
+       u8 checksum;
+} __attribute__((packed));
+
+extern unsigned char *drm_ddc_read(struct i2c_adapter *adapter);
+extern int drm_get_acpi_edid(char *method, char *edid, ssize_t length);
+
+#endif /* __DRM_EDID_H__ */
diff --git a/psb-kernel-source-4.41.1/drm_fb.c b/psb-kernel-source-4.41.1/drm_fb.c
new file mode 100644 (file)
index 0000000..fd5905d
--- /dev/null
@@ -0,0 +1,436 @@
+/*
+ * Copyright Â© 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+    /*
+     *  Modularization
+     */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+
+struct drmfb_par {
+       struct drm_device *dev;
+       struct drm_crtc *crtc;
+};
+
+static int drmfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+                          unsigned blue, unsigned transp,
+                          struct fb_info *info)
+{
+       struct drmfb_par *par = info->par;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_crtc *crtc = par->crtc;
+
+       if (regno > 255)
+               return 1;
+
+       if (fb->depth == 8) {
+               if (crtc->funcs->gamma_set) {
+                       crtc->funcs->gamma_set(crtc, red, green, blue, regno);
+               }
+               return 0;
+       }
+       
+       if (regno < 16) {
+               switch (fb->depth) {
+               case 15:
+                       fb->pseudo_palette[regno] = ((red & 0xf800) >>  1) |
+                               ((green & 0xf800) >>  6) |
+                               ((blue & 0xf800) >> 11);
+                       break;
+               case 16:
+                       fb->pseudo_palette[regno] = (red & 0xf800) |
+                               ((green & 0xfc00) >>  5) |
+                               ((blue  & 0xf800) >> 11);
+                       break;
+               case 24:
+               case 32:
+                       fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
+                               (green & 0xff00) |
+                               ((blue  & 0xff00) >> 8);
+                       break;
+               }
+        }
+
+       return 0;
+}
+
+static int drmfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct drmfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_display_mode *drm_mode;
+       struct drm_output *output;
+       int depth;
+
+       if (!var->pixclock)
+               return -EINVAL;
+
+       /* Need to resize the fb object !!! */
+       if (var->xres > fb->width || var->yres > fb->height) {
+               DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height);
+               DRM_ERROR("Need resizing code.\n");
+               return -EINVAL;
+       }
+
+       switch (var->bits_per_pixel) {
+       case 16:
+               depth = (var->green.length == 6) ? 16 : 15;
+               break;
+       case 32:
+               depth = (var->transp.length > 0) ? 32 : 24;
+               break;
+       default:
+               depth = var->bits_per_pixel;
+               break;
+       }
+               
+       switch (depth) {
+       case 8:
+               var->red.offset = 0;
+               var->green.offset = 0;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 15:
+               var->red.offset = 10;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 5;
+               var->blue.length = 5;
+               var->transp.length = 1;
+               var->transp.offset = 15;
+               break;
+       case 16:
+               var->red.offset = 11;
+               var->green.offset = 6;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 6;
+               var->blue.length = 5;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 24:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 32:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 8;
+               var->transp.offset = 24;
+               break;
+       default:
+               return -EINVAL; 
+       }
+
+#if 0
+       /* Here we walk the output mode list and look for modes. If we haven't
+        * got it, then bail. Not very nice, so this is disabled.
+        * In the set_par code, we create our mode based on the incoming
+        * parameters. Nicer, but may not be desired by some.
+        */
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               if (output->crtc == par->crtc)
+                       break;
+       }
+    
+       list_for_each_entry(drm_mode, &output->modes, head) {
+               if (drm_mode->hdisplay == var->xres &&
+                   drm_mode->vdisplay == var->yres &&
+                   drm_mode->clock != 0)
+                   break;
+       }
+
+       if (!drm_mode)
+               return -EINVAL;
+#endif
+
+       return 0;
+}
+
+/* this will let fbcon do the mode init */
+static int drmfb_set_par(struct fb_info *info)
+{
+       struct drmfb_par *par = info->par;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_device *dev = par->dev;
+       struct drm_display_mode *drm_mode;
+       struct fb_var_screeninfo *var = &info->var;
+       struct drm_output *output;
+
+       switch (var->bits_per_pixel) {
+       case 16:
+               fb->depth = (var->green.length == 6) ? 16 : 15;
+               break;
+       case 32:
+               fb->depth = (var->transp.length > 0) ? 32 : 24;
+               break;
+       default:
+               fb->depth = var->bits_per_pixel;
+               break;
+       }
+
+       fb->bits_per_pixel = var->bits_per_pixel;
+
+       info->fix.line_length = fb->pitch;
+       info->fix.smem_len = info->fix.line_length * fb->height;
+       info->fix.visual = (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+
+       info->screen_size = info->fix.smem_len; /* ??? */
+
+       /* Should we walk the output's modelist or just create our own ???
+        * For now, we create and destroy a mode based on the incoming 
+        * parameters. But there's commented out code below which scans 
+        * the output list too.
+        */
+#if 0
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               if (output->crtc == par->crtc)
+                       break;
+       }
+    
+       list_for_each_entry(drm_mode, &output->modes, head) {
+               if (drm_mode->hdisplay == var->xres &&
+                   drm_mode->vdisplay == var->yres &&
+                   drm_mode->clock != 0)
+                   break;
+       }
+#else
+       drm_mode = drm_mode_create(dev);
+       drm_mode->hdisplay = var->xres;
+       drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
+       drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
+       drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
+       drm_mode->vdisplay = var->yres;
+       drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
+       drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
+       drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
+       drm_mode->clock = PICOS2KHZ(var->pixclock);
+       drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
+       drm_mode_set_name(drm_mode);
+       drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
+#endif
+
+       if (!drm_crtc_set_mode(par->crtc, drm_mode, 0, 0))
+               return -EINVAL;
+
+       /* Have to destroy our created mode if we're not searching the mode
+        * list for it.
+        */
+#if 1 
+       drm_mode_destroy(dev, drm_mode);
+#endif
+
+       return 0;
+}
+
+static struct fb_ops drmfb_ops = {
+       .owner = THIS_MODULE,
+       //      .fb_open = drmfb_open,
+       //      .fb_read = drmfb_read,
+       //      .fb_write = drmfb_write,
+       //      .fb_release = drmfb_release,
+       //      .fb_ioctl = drmfb_ioctl,
+       .fb_check_var = drmfb_check_var,
+       .fb_set_par = drmfb_set_par,
+       .fb_setcolreg = drmfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+};
+
+int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct fb_info *info;
+       struct drm_framebuffer *fb = crtc->fb;
+       struct drmfb_par *par;
+       struct device *device = &dev->pdev->dev; 
+       struct drm_display_mode *mode = crtc->desired_mode;
+       int ret;
+
+       info = framebuffer_alloc(sizeof(struct drmfb_par), device);
+       if (!info)
+               return -ENOMEM;
+
+       fb->fbdev = info;
+               
+       par = info->par;
+
+       par->dev = dev;
+       par->crtc = crtc;
+
+       info->fbops = &drmfb_ops;
+
+       strcpy(info->fix.id, "drmfb");
+       info->fix.type = FB_TYPE_PACKED_PIXELS;
+       info->fix.visual = FB_VISUAL_TRUECOLOR;
+       info->fix.accel = FB_ACCEL_NONE;
+       info->fix.type_aux = 0;
+       info->fix.mmio_start = 0;
+       info->fix.mmio_len = 0;
+       info->fix.line_length = fb->pitch;
+       info->fix.smem_start = fb->offset + dev->mode_config.fb_base;
+       info->fix.smem_len = info->fix.line_length * fb->height;
+
+       info->flags = FBINFO_DEFAULT;
+
+       ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
+       if (ret)
+               DRM_ERROR("error mapping fb: %d\n", ret);
+
+       info->screen_base = fb->kmap.virtual;
+       info->screen_size = info->fix.smem_len; /* ??? */
+       info->pseudo_palette = fb->pseudo_palette;
+       info->var.xres_virtual = fb->width;
+       info->var.yres_virtual = fb->height;
+       info->var.bits_per_pixel = fb->bits_per_pixel;
+       info->var.xoffset = 0;
+       info->var.yoffset = 0;
+       info->var.activate = FB_ACTIVATE_NOW;
+       info->var.height = -1;
+       info->var.width = -1;
+       info->var.vmode = FB_VMODE_NONINTERLACED;
+
+       info->var.xres = mode->hdisplay;
+       info->var.right_margin = mode->hsync_start - mode->hdisplay;
+       info->var.hsync_len = mode->hsync_end - mode->hsync_start;
+       info->var.left_margin = mode->htotal - mode->hsync_end;
+       info->var.yres = mode->vdisplay;
+       info->var.lower_margin = mode->vsync_start - mode->vdisplay;
+       info->var.vsync_len = mode->vsync_end - mode->vsync_start;
+       info->var.upper_margin = mode->vtotal - mode->vsync_end;
+       info->var.pixclock = 10000000 / mode->htotal * 1000 /
+                               mode->vtotal * 100;
+       /* avoid overflow */
+       info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
+
+       DRM_DEBUG("fb depth is %d\n", fb->depth);
+       switch(fb->depth) {
+       case 8:
+               info->var.red.offset = 0;
+               info->var.green.offset = 0;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8; /* 8bit DAC */
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 15:
+               info->var.red.offset = 10;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = info->var.green.length =
+                       info->var.blue.length = 5;
+               info->var.transp.offset = 15;
+               info->var.transp.length = 1;
+               break;
+       case 16:
+               info->var.red.offset = 11;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = 5;
+               info->var.green.length = 6;
+               info->var.blue.length = 5;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 24:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = info->var.green.length =
+                       info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 32:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = info->var.green.length =
+                       info->var.blue.length = 8;
+               info->var.transp.offset = 24;
+               info->var.transp.length = 8;
+               break;
+       default:
+               break;
+       }
+
+       if (register_framebuffer(info) < 0) {
+               unregister_framebuffer(info);
+               return -EINVAL;
+       }
+
+       printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+              info->fix.id);
+       return 0;
+}
+EXPORT_SYMBOL(drmfb_probe);
+
+int drmfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct fb_info *info = fb->fbdev;
+       struct drm_framebuffer *fb = crtc->fb;
+       
+       if (info) {
+               unregister_framebuffer(info);
+               framebuffer_release(info);
+               drm_bo_kunmap(&fb->kmap);
+                drm_bo_usage_deref_unlocked(fb->bo);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drmfb_remove);
+MODULE_LICENSE("GPL");
diff --git a/psb-kernel-source-4.41.1/drm_fence.c b/psb-kernel-source-4.41.1/drm_fence.c
new file mode 100644 (file)
index 0000000..188d526
--- /dev/null
@@ -0,0 +1,832 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+
+/*
+ * Convenience function to be called by fence::wait methods that
+ * need polling.
+ */
+
+int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
+                          int interruptible, uint32_t mask, 
+                          unsigned long end_jiffies)
+{
+       struct drm_device *dev = fence->dev;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
+       uint32_t count = 0;
+       int ret;
+
+       DECLARE_WAITQUEUE(entry, current);
+       add_wait_queue(&fc->fence_queue, &entry);
+
+       ret = 0;
+       
+       for (;;) {
+               __set_current_state((interruptible) ? 
+                                   TASK_INTERRUPTIBLE :
+                                   TASK_UNINTERRUPTIBLE);
+               if (drm_fence_object_signaled(fence, mask))
+                       break;
+               if (time_after_eq(jiffies, end_jiffies)) {
+                       ret = -EBUSY;
+                       break;
+               }
+               if (lazy)
+                       schedule_timeout(1);
+               else if ((++count & 0x0F) == 0){
+                       __set_current_state(TASK_RUNNING);
+                       schedule();
+                       __set_current_state((interruptible) ? 
+                                           TASK_INTERRUPTIBLE :
+                                           TASK_UNINTERRUPTIBLE);
+               }                       
+               if (interruptible && signal_pending(current)) {
+                       ret = -EAGAIN;
+                       break;
+               }
+       }
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&fc->fence_queue, &entry);
+       return ret;
+}
+EXPORT_SYMBOL(drm_fence_wait_polling);
+
+/*
+ * Typically called by the IRQ handler.
+ */
+
+void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
+                      uint32_t sequence, uint32_t type, uint32_t error)
+{
+       int wake = 0;
+       uint32_t diff;
+       uint32_t relevant_type;
+       uint32_t new_type;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
+       struct drm_fence_driver *driver = dev->driver->fence_driver;
+       struct list_head *head;
+       struct drm_fence_object *fence, *next;
+       int found = 0;
+
+       if (list_empty(&fc->ring))
+               return;
+
+       list_for_each_entry(fence, &fc->ring, ring) {
+               diff = (sequence - fence->sequence) & driver->sequence_mask;
+               if (diff > driver->wrap_diff) {
+                       found = 1;
+                       break;
+               }
+       }
+
+       fc->waiting_types &= ~type;
+       head = (found) ? &fence->ring : &fc->ring;
+
+       list_for_each_entry_safe_reverse(fence, next, head, ring) {
+               if (&fence->ring == &fc->ring)
+                       break;
+
+               if (error) {
+                       fence->error = error;
+                       fence->signaled_types = fence->type;
+                       list_del_init(&fence->ring);
+                       wake = 1;
+                       break;
+               }
+
+               if (type & DRM_FENCE_TYPE_EXE)
+                       type |= fence->native_types;
+
+               relevant_type = type & fence->type;
+               new_type = (fence->signaled_types | relevant_type) ^
+                       fence->signaled_types;
+
+               if (new_type) {
+                       fence->signaled_types |= new_type;
+                       DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
+                                 fence->base.hash.key, fence->signaled_types);
+
+                       if (driver->needed_flush)
+                               fc->pending_flush |= driver->needed_flush(fence);
+
+                       if (new_type & fence->waiting_types)
+                               wake = 1;
+               }
+
+               fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
+
+               if (!(fence->type & ~fence->signaled_types)) {
+                       DRM_DEBUG("Fence completely signaled 0x%08lx\n",
+                                 fence->base.hash.key);
+                       list_del_init(&fence->ring);
+               }
+       }
+
+       /*
+        * Reinstate lost waiting types.
+        */
+
+       if ((fc->waiting_types & type) != type) {
+               head = head->prev;
+               list_for_each_entry(fence, head, ring) {
+                       if (&fence->ring == &fc->ring)
+                               break;
+                       diff = (fc->highest_waiting_sequence - fence->sequence) &
+                               driver->sequence_mask;
+                       if (diff > driver->wrap_diff)
+                               break;
+                       
+                       fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
+               }
+       }
+
+       if (wake) 
+               wake_up_all(&fc->fence_queue);
+}
+EXPORT_SYMBOL(drm_fence_handler);
+
+static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
+{
+       struct drm_fence_manager *fm = &dev->fm;
+       unsigned long flags;
+
+       write_lock_irqsave(&fm->lock, flags);
+       list_del_init(ring);
+       write_unlock_irqrestore(&fm->lock, flags);
+}
+
+void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
+{
+       struct drm_fence_object *tmp_fence = *fence;
+       struct drm_device *dev = tmp_fence->dev;
+       struct drm_fence_manager *fm = &dev->fm;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       *fence = NULL;
+       if (atomic_dec_and_test(&tmp_fence->usage)) {
+               drm_fence_unring(dev, &tmp_fence->ring);
+               DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
+                         tmp_fence->base.hash.key);
+               atomic_dec(&fm->count);
+               BUG_ON(!list_empty(&tmp_fence->base.list));
+               drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
+       }
+}
+EXPORT_SYMBOL(drm_fence_usage_deref_locked);
+
+void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
+{
+       struct drm_fence_object *tmp_fence = *fence;
+       struct drm_device *dev = tmp_fence->dev;
+       struct drm_fence_manager *fm = &dev->fm;
+
+       *fence = NULL;
+       if (atomic_dec_and_test(&tmp_fence->usage)) {
+               mutex_lock(&dev->struct_mutex);
+               if (atomic_read(&tmp_fence->usage) == 0) {
+                       drm_fence_unring(dev, &tmp_fence->ring);
+                       atomic_dec(&fm->count);
+                       BUG_ON(!list_empty(&tmp_fence->base.list));
+                       drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
+               }
+               mutex_unlock(&dev->struct_mutex);
+       }
+}
+EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
+
+struct drm_fence_object
+*drm_fence_reference_locked(struct drm_fence_object *src)
+{
+       DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
+
+       atomic_inc(&src->usage);
+       return src;
+}
+
+void drm_fence_reference_unlocked(struct drm_fence_object **dst,
+                                 struct drm_fence_object *src)
+{
+       mutex_lock(&src->dev->struct_mutex);
+       *dst = src;
+       atomic_inc(&src->usage);
+       mutex_unlock(&src->dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_fence_reference_unlocked);
+
+static void drm_fence_object_destroy(struct drm_file *priv,
+                                    struct drm_user_object *base)
+{
+       struct drm_fence_object *fence =
+           drm_user_object_entry(base, struct drm_fence_object, base);
+
+       drm_fence_usage_deref_locked(&fence);
+}
+
+int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
+{
+       unsigned long flags;
+       int signaled;
+       struct drm_device *dev = fence->dev;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_driver *driver = dev->driver->fence_driver;
+       
+       mask &= fence->type;
+       read_lock_irqsave(&fm->lock, flags);
+
+       signaled = (mask & fence->signaled_types) == mask;
+       read_unlock_irqrestore(&fm->lock, flags);
+       if (!signaled && driver->poll) {
+               write_lock_irqsave(&fm->lock, flags);
+               driver->poll(dev, fence->fence_class, mask);
+               signaled = (mask & fence->signaled_types) == mask;
+               write_unlock_irqrestore(&fm->lock, flags);
+       }
+       return signaled;
+}
+EXPORT_SYMBOL(drm_fence_object_signaled);
+
+
+int drm_fence_object_flush(struct drm_fence_object *fence,
+                          uint32_t type)
+{
+       struct drm_device *dev = fence->dev;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
+       struct drm_fence_driver *driver = dev->driver->fence_driver;
+       unsigned long irq_flags;
+       uint32_t saved_pending_flush;
+       uint32_t diff;
+       int call_flush;
+
+       /* FIXME: this is a hacking to workaround an xserver crash
+          issue for when eabling detearing */
+/*     if (type & ~fence->type) { */
+/*             DRM_ERROR("Flush trying to extend fence type, " */
+/*                       "0x%x, 0x%x\n", type, fence->type); */
+/*                 return -EINVAL; */
+/*     } */
+
+       write_lock_irqsave(&fm->lock, irq_flags);
+       fence->waiting_types |= type;
+       fc->waiting_types |= fence->waiting_types;
+       diff = (fence->sequence - fc->highest_waiting_sequence) & 
+               driver->sequence_mask;
+
+       if (diff < driver->wrap_diff)
+               fc->highest_waiting_sequence = fence->sequence;
+
+       /*
+        * fence->waiting_types has changed. Determine whether
+        * we need to initiate some kind of flush as a result of this.
+        */
+
+       saved_pending_flush = fc->pending_flush;
+       if (driver->needed_flush) 
+               fc->pending_flush |= driver->needed_flush(fence);
+
+       if (driver->poll)
+               driver->poll(dev, fence->fence_class, fence->waiting_types);
+
+       call_flush = fc->pending_flush;
+       write_unlock_irqrestore(&fm->lock, irq_flags);
+
+       if (call_flush && driver->flush)
+               driver->flush(dev, fence->fence_class);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_fence_object_flush);
+
+/*
+ * Make sure old fence objects are signaled before their fence sequences are
+ * wrapped around and reused.
+ */
+
+void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
+                        uint32_t sequence)
+{
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
+       struct drm_fence_object *fence;
+       unsigned long irq_flags;
+       struct drm_fence_driver *driver = dev->driver->fence_driver;
+       int call_flush;
+
+       uint32_t diff;
+
+       write_lock_irqsave(&fm->lock, irq_flags);
+
+       list_for_each_entry_reverse(fence, &fc->ring, ring) {
+               diff = (sequence - fence->sequence) & driver->sequence_mask;
+               if (diff <= driver->flush_diff)
+                       break;
+       
+               fence->waiting_types = fence->type;
+               fc->waiting_types |= fence->type;
+
+               if (driver->needed_flush)
+                       fc->pending_flush |= driver->needed_flush(fence);
+       }       
+       
+       if (driver->poll)
+               driver->poll(dev, fence_class, fc->waiting_types);
+
+       call_flush = fc->pending_flush;
+       write_unlock_irqrestore(&fm->lock, irq_flags);
+
+       if (call_flush && driver->flush)
+               driver->flush(dev, fence->fence_class);
+
+       /*
+        * FIXME: Shold we implement a wait here for really old fences?
+        */
+
+}
+EXPORT_SYMBOL(drm_fence_flush_old);
+
+int drm_fence_object_wait(struct drm_fence_object *fence,
+                         int lazy, int ignore_signals, uint32_t mask)
+{
+       struct drm_device *dev = fence->dev;
+       struct drm_fence_driver *driver = dev->driver->fence_driver;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
+       int ret = 0;
+       unsigned long _end = 3 * DRM_HZ;
+
+       if (mask & ~fence->type) {
+               DRM_ERROR("Wait trying to extend fence type"
+                         " 0x%08x 0x%08x\n", mask, fence->type);
+               BUG();
+               return -EINVAL;
+       }
+
+       if (driver->wait)
+               return driver->wait(fence, lazy, !ignore_signals, mask);
+
+
+       drm_fence_object_flush(fence, mask);
+       if (driver->has_irq(dev, fence->fence_class, mask)) {
+               if (!ignore_signals)
+                       ret = wait_event_interruptible_timeout
+                               (fc->fence_queue, 
+                                drm_fence_object_signaled(fence, mask), 
+                                3 * DRM_HZ);
+               else 
+                       ret = wait_event_timeout
+                               (fc->fence_queue, 
+                                drm_fence_object_signaled(fence, mask), 
+                                3 * DRM_HZ);
+
+               if (unlikely(ret == -ERESTARTSYS))
+                       return -EAGAIN;
+
+               if (unlikely(ret == 0))
+                       return -EBUSY;
+
+               return 0;
+       }
+
+       return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
+                                     _end);
+}
+EXPORT_SYMBOL(drm_fence_object_wait);
+
+
+
+int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
+                         uint32_t fence_class, uint32_t type)
+{
+       struct drm_device *dev = fence->dev;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_driver *driver = dev->driver->fence_driver;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
+       unsigned long flags;
+       uint32_t sequence;
+       uint32_t native_types;
+       int ret;
+
+       drm_fence_unring(dev, &fence->ring);
+       ret = driver->emit(dev, fence_class, fence_flags, &sequence,
+                          &native_types);
+       if (ret)
+               return ret;
+
+       write_lock_irqsave(&fm->lock, flags);
+       fence->fence_class = fence_class;
+       fence->type = type;
+       fence->waiting_types = 0;
+       fence->signaled_types = 0;
+       fence->error = 0;
+       fence->sequence = sequence;
+       fence->native_types = native_types;
+       if (list_empty(&fc->ring))
+               fc->highest_waiting_sequence = sequence - 1;
+       list_add_tail(&fence->ring, &fc->ring);
+       fc->latest_queued_sequence = sequence;
+       write_unlock_irqrestore(&fm->lock, flags);
+       return 0;
+}
+EXPORT_SYMBOL(drm_fence_object_emit);
+
+static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
+                                uint32_t type,
+                                uint32_t fence_flags,
+                                struct drm_fence_object *fence)
+{
+       int ret = 0;
+       unsigned long flags;
+       struct drm_fence_manager *fm = &dev->fm;
+
+       mutex_lock(&dev->struct_mutex);
+       atomic_set(&fence->usage, 1);
+       mutex_unlock(&dev->struct_mutex);
+
+       write_lock_irqsave(&fm->lock, flags);
+       INIT_LIST_HEAD(&fence->ring);
+
+       /*
+        *  Avoid hitting BUG() for kernel-only fence objects.
+        */
+
+       INIT_LIST_HEAD(&fence->base.list);
+       fence->fence_class = fence_class;
+       fence->type = type;
+       fence->signaled_types = 0;
+       fence->waiting_types = 0;
+       fence->sequence = 0;
+       fence->error = 0;
+       fence->dev = dev;
+       write_unlock_irqrestore(&fm->lock, flags);
+       if (fence_flags & DRM_FENCE_FLAG_EMIT) {
+               ret = drm_fence_object_emit(fence, fence_flags,
+                                           fence->fence_class, type);
+       }
+       return ret;
+}
+
+int drm_fence_add_user_object(struct drm_file *priv,
+                             struct drm_fence_object *fence, int shareable)
+{
+       struct drm_device *dev = priv->head->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_add_user_object(priv, &fence->base, shareable);
+       if (ret)
+               goto out;
+       atomic_inc(&fence->usage);
+       fence->base.type = drm_fence_type;
+       fence->base.remove = &drm_fence_object_destroy;
+       DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
+out:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(drm_fence_add_user_object);
+
+int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
+                           uint32_t type, unsigned flags,
+                           struct drm_fence_object **c_fence)
+{
+       struct drm_fence_object *fence;
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+
+       fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
+       if (!fence) {
+               DRM_INFO("Out of memory creating fence object.\n");
+               return -ENOMEM;
+       }
+       ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
+       if (ret) {
+               drm_fence_usage_deref_unlocked(&fence);
+               return ret;
+       }
+       *c_fence = fence;
+       atomic_inc(&fm->count);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_fence_object_create);
+
+void drm_fence_manager_init(struct drm_device *dev)
+{
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fence_class;
+       struct drm_fence_driver *fed = dev->driver->fence_driver;
+       int i;
+       unsigned long flags;
+
+       rwlock_init(&fm->lock);
+       write_lock_irqsave(&fm->lock, flags);
+       fm->initialized = 0;
+       if (!fed)
+           goto out_unlock;
+
+       fm->initialized = 1;
+       fm->num_classes = fed->num_classes;
+       BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
+
+       for (i = 0; i < fm->num_classes; ++i) {
+           fence_class = &fm->fence_class[i];
+
+           memset(fence_class, 0, sizeof(*fence_class));
+           INIT_LIST_HEAD(&fence_class->ring);
+           DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
+       }
+
+       atomic_set(&fm->count, 0);
+ out_unlock:
+       write_unlock_irqrestore(&fm->lock, flags);
+}
+
+void drm_fence_fill_arg(struct drm_fence_object *fence,
+                       struct drm_fence_arg *arg)
+{
+       struct drm_device *dev = fence->dev;
+       struct drm_fence_manager *fm = &dev->fm;
+       unsigned long irq_flags;
+
+       read_lock_irqsave(&fm->lock, irq_flags);
+       arg->handle = fence->base.hash.key;
+       arg->fence_class = fence->fence_class;
+       arg->type = fence->type;
+       arg->signaled = fence->signaled_types;
+       arg->error = fence->error;
+       arg->sequence = fence->sequence;
+       read_unlock_irqrestore(&fm->lock, irq_flags);
+}
+EXPORT_SYMBOL(drm_fence_fill_arg);
+
+void drm_fence_manager_takedown(struct drm_device *dev)
+{
+}
+
+struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
+                                                uint32_t handle)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct drm_user_object *uo;
+       struct drm_fence_object *fence;
+
+       mutex_lock(&dev->struct_mutex);
+       uo = drm_lookup_user_object(priv, handle);
+       if (!uo || (uo->type != drm_fence_type)) {
+               mutex_unlock(&dev->struct_mutex);
+               return NULL;
+       }
+       fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
+       mutex_unlock(&dev->struct_mutex);
+       return fence;
+}
+
+int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_arg *arg = data;
+       struct drm_fence_object *fence;
+       ret = 0;
+
+       if (!fm->initialized) {
+               DRM_ERROR("The DRM driver does not support fencing.\n");
+               return -EINVAL;
+       }
+
+       if (arg->flags & DRM_FENCE_FLAG_EMIT)
+               LOCK_TEST_WITH_RETURN(dev, file_priv);
+       ret = drm_fence_object_create(dev, arg->fence_class,
+                                     arg->type, arg->flags, &fence);
+       if (ret)
+               return ret;
+       ret = drm_fence_add_user_object(file_priv, fence,
+                                       arg->flags &
+                                       DRM_FENCE_FLAG_SHAREABLE);
+       if (ret) {
+               drm_fence_usage_deref_unlocked(&fence);
+               return ret;
+       }
+
+       /*
+        * usage > 0. No need to lock dev->struct_mutex;
+        */
+
+       arg->handle = fence->base.hash.key;
+
+       drm_fence_fill_arg(fence, arg);
+       drm_fence_usage_deref_unlocked(&fence);
+
+       return ret;
+}
+
+int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_arg *arg = data;
+       struct drm_fence_object *fence;
+       struct drm_user_object *uo;
+       ret = 0;
+
+       if (!fm->initialized) {
+               DRM_ERROR("The DRM driver does not support fencing.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
+       if (ret)
+               return ret;
+       fence = drm_lookup_fence_object(file_priv, arg->handle);
+       drm_fence_fill_arg(fence, arg);
+       drm_fence_usage_deref_unlocked(&fence);
+
+       return ret;
+}
+
+
+int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_arg *arg = data;
+       ret = 0;
+
+       if (!fm->initialized) {
+               DRM_ERROR("The DRM driver does not support fencing.\n");
+               return -EINVAL;
+       }
+
+       return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
+}
+
+int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_arg *arg = data;
+       struct drm_fence_object *fence;
+       ret = 0;
+
+       if (!fm->initialized) {
+               DRM_ERROR("The DRM driver does not support fencing.\n");
+               return -EINVAL;
+       }
+
+       fence = drm_lookup_fence_object(file_priv, arg->handle);
+       if (!fence)
+               return -EINVAL;
+
+       drm_fence_fill_arg(fence, arg);
+       drm_fence_usage_deref_unlocked(&fence);
+
+       return ret;
+}
+
+int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_arg *arg = data;
+       struct drm_fence_object *fence;
+       ret = 0;
+
+       if (!fm->initialized) {
+               DRM_ERROR("The DRM driver does not support fencing.\n");
+               return -EINVAL;
+       }
+
+       fence = drm_lookup_fence_object(file_priv, arg->handle);
+       if (!fence)
+               return -EINVAL;
+       ret = drm_fence_object_flush(fence, arg->type);
+
+       drm_fence_fill_arg(fence, arg);
+       drm_fence_usage_deref_unlocked(&fence);
+
+       return ret;
+}
+
+
+int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_arg *arg = data;
+       struct drm_fence_object *fence;
+       ret = 0;
+
+       if (!fm->initialized) {
+               DRM_ERROR("The DRM driver does not support fencing.\n");
+               return -EINVAL;
+       }
+
+       fence = drm_lookup_fence_object(file_priv, arg->handle);
+       if (!fence)
+               return -EINVAL;
+       ret = drm_fence_object_wait(fence,
+                                   arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
+                                   0, arg->type);
+
+       drm_fence_fill_arg(fence, arg);
+       drm_fence_usage_deref_unlocked(&fence);
+
+       return ret;
+}
+
+
+int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_arg *arg = data;
+       struct drm_fence_object *fence;
+       ret = 0;
+
+       if (!fm->initialized) {
+               DRM_ERROR("The DRM driver does not support fencing.\n");
+               return -EINVAL;
+       }
+
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+       fence = drm_lookup_fence_object(file_priv, arg->handle);
+       if (!fence)
+               return -EINVAL;
+       ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
+                                   arg->type);
+
+       drm_fence_fill_arg(fence, arg);
+       drm_fence_usage_deref_unlocked(&fence);
+
+       return ret;
+}
+
+int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       int ret;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_arg *arg = data;
+       struct drm_fence_object *fence;
+       ret = 0;
+
+       if (!fm->initialized) {
+               DRM_ERROR("The DRM driver does not support fencing.\n");
+               return -EINVAL;
+       }
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized\n");
+               return -EINVAL;
+       }
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+       ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
+                                      NULL, &fence);
+       if (ret)
+               return ret;
+
+       if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
+               ret = drm_fence_add_user_object(file_priv, fence,
+                                               arg->flags &
+                                               DRM_FENCE_FLAG_SHAREABLE);
+               if (ret)
+                       return ret;
+       }
+
+       arg->handle = fence->base.hash.key;
+
+       drm_fence_fill_arg(fence, arg);
+       drm_fence_usage_deref_unlocked(&fence);
+
+       return ret;
+}
diff --git a/psb-kernel-source-4.41.1/drm_fops.c b/psb-kernel-source-4.41.1/drm_fops.c
new file mode 100644 (file)
index 0000000..c83c099
--- /dev/null
@@ -0,0 +1,531 @@
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_sarea.h"
+#include <linux/poll.h>
+
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          struct drm_device * dev);
+
+static int drm_setup(struct drm_device * dev)
+{
+       drm_local_map_t *map;
+       int i;
+       int ret;
+       int sareapage;
+
+       if (dev->driver->firstopen) {
+               ret = dev->driver->firstopen(dev);
+               if (ret != 0)
+                       return ret;
+       }
+
+       dev->magicfree.next = NULL;
+
+       /* prebuild the SAREA */
+       sareapage = max(SAREA_MAX, PAGE_SIZE);
+       i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
+       if (i != 0)
+               return i;
+
+       atomic_set(&dev->ioctl_count, 0);
+       atomic_set(&dev->vma_count, 0);
+       dev->buf_use = 0;
+       atomic_set(&dev->buf_alloc, 0);
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
+               i = drm_dma_setup(dev);
+               if (i < 0)
+                       return i;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+               atomic_set(&dev->counts[i], 0);
+
+       drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
+       INIT_LIST_HEAD(&dev->magicfree);
+
+       dev->sigdata.lock = NULL;
+       init_waitqueue_head(&dev->lock.lock_queue);
+       dev->queue_count = 0;
+       dev->queue_reserved = 0;
+       dev->queue_slots = 0;
+       dev->queuelist = NULL;
+       dev->context_flag = 0;
+       dev->interrupt_flag = 0;
+       dev->dma_flag = 0;
+       dev->last_context = 0;
+       dev->last_switch = 0;
+       dev->last_checked = 0;
+       init_waitqueue_head(&dev->context_wait);
+       dev->if_version = 0;
+
+       dev->ctx_start = 0;
+       dev->lck_start = 0;
+
+       dev->buf_async = NULL;
+       init_waitqueue_head(&dev->buf_readers);
+       init_waitqueue_head(&dev->buf_writers);
+
+       DRM_DEBUG("\n");
+
+       /*
+        * The kernel's context could be created here, but is now created
+        * in drm_dma_enqueue.  This is more resource-efficient for
+        * hardware that does not do DMA, but may mean that
+        * drm_select_queue fails between the time the interrupt is
+        * initialized and the time the queues are initialized.
+        */
+
+       return 0;
+}
+
+/**
+ * Open file.
+ *
+ * \param inode device inode
+ * \param filp file pointer.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
+ */
+int drm_open(struct inode *inode, struct file *filp)
+{
+       struct drm_device *dev = NULL;
+       int minor = iminor(inode);
+       int retcode = 0;
+
+       if (!((minor >= 0) && (minor < drm_cards_limit)))
+               return -ENODEV;
+
+       if (!drm_heads[minor])
+               return -ENODEV;
+
+       if (!(dev = drm_heads[minor]->dev))
+               return -ENODEV;
+
+       retcode = drm_open_helper(inode, filp, dev);
+       if (!retcode) {
+               atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+               spin_lock(&dev->count_lock);
+               if (!dev->open_count++) {
+                       spin_unlock(&dev->count_lock);
+                       retcode = drm_setup(dev);
+                       goto out;
+               }
+               spin_unlock(&dev->count_lock);
+       }
+
+out:
+       mutex_lock(&dev->struct_mutex);
+       BUG_ON((dev->dev_mapping != NULL) &&
+              (dev->dev_mapping != inode->i_mapping));
+       if (dev->dev_mapping == NULL)
+               dev->dev_mapping = inode->i_mapping;
+       mutex_unlock(&dev->struct_mutex);
+
+       return retcode;
+}
+EXPORT_SYMBOL(drm_open);
+
+/**
+ * File \c open operation.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ *
+ * Puts the dev->fops corresponding to the device minor number into
+ * \p filp, call the \c open method, and restore the file operations.
+ */
+int drm_stub_open(struct inode *inode, struct file *filp)
+{
+       struct drm_device *dev = NULL;
+       int minor = iminor(inode);
+       int err = -ENODEV;
+       const struct file_operations *old_fops;
+
+       DRM_DEBUG("\n");
+
+       if (!((minor >= 0) && (minor < drm_cards_limit)))
+               return -ENODEV;
+
+       if (!drm_heads[minor])
+               return -ENODEV;
+
+       if (!(dev = drm_heads[minor]->dev))
+               return -ENODEV;
+
+       old_fops = filp->f_op;
+       filp->f_op = fops_get(&dev->driver->fops);
+       if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
+               fops_put(filp->f_op);
+               filp->f_op = fops_get(old_fops);
+       }
+       fops_put(old_fops);
+
+       return err;
+}
+
+/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+static int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+       if (boot_cpu_data.x86 == 3)
+               return 0;       /* No cmpxchg on a 386 */
+#endif
+#if defined(__sparc__) && !defined(__sparc_v9__)
+       return 0;               /* No cmpxchg before v9 sparc. */
+#endif
+       return 1;
+}
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          struct drm_device * dev)
+{
+       int minor = iminor(inode);
+       struct drm_file *priv;
+       int ret;
+       int i, j;
+
+       if (filp->f_flags & O_EXCL)
+               return -EBUSY;  /* No exclusive opens */
+       if (!drm_cpu_valid())
+               return -EINVAL;
+
+       DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
+
+       priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
+       if (!priv)
+               return -ENOMEM;
+
+       memset(priv, 0, sizeof(*priv));
+       filp->private_data = priv;
+       priv->filp = filp;
+       priv->uid = current->loginuid;
+       priv->pid = current->pid;
+       priv->minor = minor;
+       priv->head = drm_heads[minor];
+       priv->ioctl_count = 0;
+       /* for compatibility root is always authenticated */
+       priv->authenticated = capable(CAP_SYS_ADMIN);
+       priv->lock_count = 0;
+
+       INIT_LIST_HEAD(&priv->lhead);
+       INIT_LIST_HEAD(&priv->refd_objects);
+       INIT_LIST_HEAD(&priv->fbs);
+
+       for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
+               ret = drm_ht_create(&priv->refd_object_hash[i],
+                                   DRM_FILE_HASH_ORDER);
+               if (ret)
+                       break;
+       }
+
+       if (ret) {
+               for (j = 0; j < i; ++j)
+                       drm_ht_remove(&priv->refd_object_hash[j]);
+               goto out_free;
+       }
+
+       if (dev->driver->open) {
+               ret = dev->driver->open(dev, priv);
+               if (ret < 0)
+                       goto out_free;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       if (list_empty(&dev->filelist))
+               priv->master = 1;
+
+       list_add(&priv->lhead, &dev->filelist);
+       mutex_unlock(&dev->struct_mutex);
+
+#ifdef __alpha__
+       /*
+        * Default the hose
+        */
+       if (!dev->hose) {
+               struct pci_dev *pci_dev;
+               pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+               if (pci_dev) {
+                       dev->hose = pci_dev->sysdata;
+                       pci_dev_put(pci_dev);
+               }
+               if (!dev->hose) {
+                       struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+                       if (b)
+                               dev->hose = b->sysdata;
+               }
+       }
+#endif
+
+       return 0;
+      out_free:
+       drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
+       filp->private_data = NULL;
+       return ret;
+}
+
+/** No-op. */
+int drm_fasync(int fd, struct file *filp, int on)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->head->dev;
+       int retcode;
+
+       DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
+                 (long)old_encode_dev(priv->head->device));
+       retcode = fasync_helper(fd, filp, on, &dev->buf_async);
+       if (retcode < 0)
+               return retcode;
+       return 0;
+}
+EXPORT_SYMBOL(drm_fasync);
+
+static void drm_object_release(struct file *filp)
+{
+       struct drm_file *priv = filp->private_data;
+       struct list_head *head;
+       struct drm_ref_object *ref_object;
+       int i;
+
+       /*
+        * Free leftover ref objects created by me. Note that we cannot use
+        * list_for_each() here, as the struct_mutex may be temporarily
+        * released by the remove_() functions, and thus the lists may be
+        * altered.
+        * Also, a drm_remove_ref_object() will not remove it
+        * from the list unless its refcount is 1.
+        */
+
+       head = &priv->refd_objects;
+       while (head->next != head) {
+               ref_object = list_entry(head->next, struct drm_ref_object, list);
+               drm_remove_ref_object(priv, ref_object);
+               head = &priv->refd_objects;
+       }
+
+       for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
+               drm_ht_remove(&priv->refd_object_hash[i]);
+}
+
+/**
+ * Release file.
+ *
+ * \param inode device inode
+ * \param file_priv DRM file private.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
+int drm_release(struct inode *inode, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->head->dev;
+       int retcode = 0;
+
+       lock_kernel();
+
+       DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+       if (dev->driver->preclose)
+               dev->driver->preclose(dev, file_priv);
+
+       /* ========================================================
+        * Begin inline drm_release
+        */
+
+       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+                 current->pid, (long)old_encode_dev(file_priv->head->device),
+                 dev->open_count);
+
+       if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
+               if (drm_i_have_hw_lock(dev, file_priv)) {
+                       dev->driver->reclaim_buffers_locked(dev, file_priv);
+               } else {
+                       unsigned long _end=jiffies + 3*DRM_HZ;
+                       int locked = 0;
+
+                       drm_idlelock_take(&dev->lock);
+
+                       /*
+                        * Wait for a while.
+                        */
+
+                       do{
+                               spin_lock(&dev->lock.spinlock);
+                               locked = dev->lock.idle_has_lock;
+                               spin_unlock(&dev->lock.spinlock);
+                               if (locked)
+                                       break;
+                               schedule();
+                       } while (!time_after_eq(jiffies, _end));
+
+                       if (!locked) {
+                               DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+                                         "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+                                         "\tI will go on reclaiming the buffers anyway.\n");
+                       }
+
+                       dev->driver->reclaim_buffers_locked(dev, file_priv);
+                       drm_idlelock_release(&dev->lock);
+               }
+       }
+
+       if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
+
+               drm_idlelock_take(&dev->lock);
+               dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
+               drm_idlelock_release(&dev->lock);
+
+       }
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               DRM_DEBUG("File %p released, freeing lock for context %d\n",
+                         filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+               drm_lock_free(&dev->lock,
+                             _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+       }
+
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !dev->driver->reclaim_buffers_locked) {
+               dev->driver->reclaim_buffers(dev, file_priv);
+       }
+
+       drm_fasync(-1, filp, 0);
+
+       mutex_lock(&dev->ctxlist_mutex);
+
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->tag == file_priv &&
+                           pos->handle != DRM_KERNEL_CONTEXT) {
+                               if (dev->driver->context_dtor)
+                                       dev->driver->context_dtor(dev,
+                                                                 pos->handle);
+
+                               drm_ctxbitmap_free(dev, pos->handle);
+
+                               list_del(&pos->head);
+                               drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+
+       mutex_lock(&dev->struct_mutex);
+       drm_fb_release(filp);
+       drm_object_release(filp);
+       if (file_priv->remove_auth_on_close == 1) {
+               struct drm_file *temp;
+
+               list_for_each_entry(temp, &dev->filelist, lhead)
+                       temp->authenticated = 0;
+       }
+       list_del(&file_priv->lhead);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (dev->driver->postclose)
+               dev->driver->postclose(dev, file_priv);
+       drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
+
+       /* ========================================================
+        * End inline drm_release
+        */
+
+       atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+       spin_lock(&dev->count_lock);
+       if (!--dev->open_count) {
+               if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+                       DRM_ERROR("Device busy: %d %d\n",
+                                 atomic_read(&dev->ioctl_count), dev->blocked);
+                       spin_unlock(&dev->count_lock);
+                       unlock_kernel();
+                       return -EBUSY;
+               }
+               spin_unlock(&dev->count_lock);
+               unlock_kernel();
+               return drm_lastclose(dev);
+       }
+       spin_unlock(&dev->count_lock);
+
+       unlock_kernel();
+
+       return retcode;
+}
+EXPORT_SYMBOL(drm_release);
+
+/** No-op. */
+/* This is to deal with older X servers that believe 0 means data is
+ * available which is not the correct return for a poll function.
+ * This cannot be fixed until the Xserver is fixed. Xserver will need
+ * to set a newer interface version to avoid breaking older Xservers.
+ * Without fixing the Xserver you get: "WaitForSomething(): select: errno=22"
+ * http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try
+ * to return the correct response.
+ */
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+{
+       /* return (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM); */
+       return 0;
+}
+EXPORT_SYMBOL(drm_poll);
diff --git a/psb-kernel-source-4.41.1/drm_hashtab.c b/psb-kernel-source-4.41.1/drm_hashtab.c
new file mode 100644 (file)
index 0000000..f5a4f84
--- /dev/null
@@ -0,0 +1,202 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_hashtab.h"
+#include <linux/hash.h>
+
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
+{
+       unsigned int i;
+
+       ht->size = 1 << order;
+       ht->order = order;
+       ht->fill = 0;
+       ht->table = NULL;
+       ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
+       if (!ht->use_vmalloc) {
+               ht->table = drm_calloc(ht->size, sizeof(*ht->table),
+                                      DRM_MEM_HASHTAB);
+       }
+       if (!ht->table) {
+               ht->use_vmalloc = 1;
+               ht->table = vmalloc(ht->size * sizeof(*ht->table));
+       }
+       if (!ht->table) {
+               DRM_ERROR("Out of memory for hash table\n");
+               return -ENOMEM;
+       }
+       for (i = 0; i < ht->size; ++i) {
+               INIT_HLIST_HEAD(&ht->table[i]);
+       }
+       return 0;
+}
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+       int count = 0;
+
+       hashed_key = hash_long(key, ht->order);
+       DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+       }
+}
+
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
+                                         unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               if (entry->key == key)
+                       return list;
+               if (entry->key > key)
+                       break;
+       }
+       return NULL;
+}
+
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list, *parent;
+       unsigned int hashed_key;
+       unsigned long key = item->key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       parent = NULL;
+       hlist_for_each(list, h_list) {
+               entry = hlist_entry(list, struct drm_hash_item, head);
+               if (entry->key == key)
+                       return -EINVAL;
+               if (entry->key > key)
+                       break;
+               parent = list;
+       }
+       if (parent) {
+               hlist_add_after(parent, &item->head);
+       } else {
+               hlist_add_head(&item->head, h_list);
+       }
+       return 0;
+}
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int drm_ht_just_insert_please(struct drm_open_hash *ht,
+                             struct drm_hash_item *item,
+                             unsigned long seed, int bits, int shift,
+                             unsigned long add)
+{
+       int ret;
+       unsigned long mask = (1 << bits) - 1;
+       unsigned long first, unshifted_key;
+
+       unshifted_key = hash_long(seed, bits);
+       first = unshifted_key;
+       do {
+               item->key = (unshifted_key << shift) + add;
+               ret = drm_ht_insert_item(ht, item);
+               if (ret)
+                       unshifted_key = (unshifted_key + 1) & mask;
+       } while (ret && (unshifted_key != first));
+
+       if (ret) {
+               DRM_ERROR("Available key bit space exhausted\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+                    struct drm_hash_item **item)
+{
+       struct hlist_node *list;
+
+       list = drm_ht_find_key(ht, key);
+       if (!list)
+               return -EINVAL;
+
+       *item = hlist_entry(list, struct drm_hash_item, head);
+       return 0;
+}
+
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
+{
+       struct hlist_node *list;
+
+       list = drm_ht_find_key(ht, key);
+       if (list) {
+               hlist_del_init(list);
+               ht->fill--;
+               return 0;
+       }
+       return -EINVAL;
+}
+
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+       hlist_del_init(&item->head);
+       ht->fill--;
+       return 0;
+}
+
+void drm_ht_remove(struct drm_open_hash *ht)
+{
+       if (ht->table) {
+               if (ht->use_vmalloc)
+                       vfree(ht->table);
+               else
+                       drm_free(ht->table, ht->size * sizeof(*ht->table),
+                                DRM_MEM_HASHTAB);
+               ht->table = NULL;
+       }
+}
diff --git a/psb-kernel-source-4.41.1/drm_hashtab.h b/psb-kernel-source-4.41.1/drm_hashtab.h
new file mode 100644 (file)
index 0000000..c090677
--- /dev/null
@@ -0,0 +1,67 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef DRM_HASHTAB_H
+#define DRM_HASHTAB_H
+
+#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+struct drm_hash_item {
+       struct hlist_node head;
+       unsigned long key;
+};
+
+struct drm_open_hash {
+       unsigned int size;
+       unsigned int order;
+       unsigned int fill;
+       struct hlist_head *table;
+       int use_vmalloc;
+};
+
+
+extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+                                    unsigned long seed, int bits, int shift,
+                                    unsigned long add);
+extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
+
+extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern void drm_ht_remove(struct drm_open_hash *ht);
+
+
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_internal.h b/psb-kernel-source-4.41.1/drm_internal.h
new file mode 100644 (file)
index 0000000..b82a189
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2007 Red Hat, Inc
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* This header file holds function prototypes and data types that are
+ * internal to the drm (not exported to user space) but shared across
+ * drivers and platforms */
+
+#ifndef __DRM_INTERNAL_H__
+#define __DRM_INTERNAL_H__
+
+/**
+ * Drawable information.
+ */
+struct drm_drawable_info {
+       unsigned int num_rects;
+       struct drm_clip_rect *rects;
+};
+
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_ioc32.c b/psb-kernel-source-4.41.1/drm_ioc32.c
new file mode 100644 (file)
index 0000000..0188154
--- /dev/null
@@ -0,0 +1,1073 @@
+/**
+ * \file drm_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#define DRM_IOCTL_VERSION32            DRM_IOWR(0x00, drm_version32_t)
+#define DRM_IOCTL_GET_UNIQUE32         DRM_IOWR(0x01, drm_unique32_t)
+#define DRM_IOCTL_GET_MAP32            DRM_IOWR(0x04, drm_map32_t)
+#define DRM_IOCTL_GET_CLIENT32         DRM_IOWR(0x05, drm_client32_t)
+#define DRM_IOCTL_GET_STATS32          DRM_IOR( 0x06, drm_stats32_t)
+
+#define DRM_IOCTL_SET_UNIQUE32         DRM_IOW( 0x10, drm_unique32_t)
+#define DRM_IOCTL_ADD_MAP32            DRM_IOWR(0x15, drm_map32_t)
+#define DRM_IOCTL_ADD_BUFS32           DRM_IOWR(0x16, drm_buf_desc32_t)
+#define DRM_IOCTL_MARK_BUFS32          DRM_IOW( 0x17, drm_buf_desc32_t)
+#define DRM_IOCTL_INFO_BUFS32          DRM_IOWR(0x18, drm_buf_info32_t)
+#define DRM_IOCTL_MAP_BUFS32           DRM_IOWR(0x19, drm_buf_map32_t)
+#define DRM_IOCTL_FREE_BUFS32          DRM_IOW( 0x1a, drm_buf_free32_t)
+
+#define DRM_IOCTL_RM_MAP32             DRM_IOW( 0x1b, drm_map32_t)
+
+#define DRM_IOCTL_SET_SAREA_CTX32      DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
+#define DRM_IOCTL_GET_SAREA_CTX32      DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
+
+#define DRM_IOCTL_RES_CTX32            DRM_IOWR(0x26, drm_ctx_res32_t)
+#define DRM_IOCTL_DMA32                        DRM_IOWR(0x29, drm_dma32_t)
+
+#define DRM_IOCTL_AGP_ENABLE32         DRM_IOW( 0x32, drm_agp_mode32_t)
+#define DRM_IOCTL_AGP_INFO32           DRM_IOR( 0x33, drm_agp_info32_t)
+#define DRM_IOCTL_AGP_ALLOC32          DRM_IOWR(0x34, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_FREE32           DRM_IOW( 0x35, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_BIND32           DRM_IOW( 0x36, drm_agp_binding32_t)
+#define DRM_IOCTL_AGP_UNBIND32         DRM_IOW( 0x37, drm_agp_binding32_t)
+
+#define DRM_IOCTL_SG_ALLOC32           DRM_IOW( 0x38, drm_scatter_gather32_t)
+#define DRM_IOCTL_SG_FREE32            DRM_IOW( 0x39, drm_scatter_gather32_t)
+
+#define DRM_IOCTL_WAIT_VBLANK32                DRM_IOWR(0x3a, drm_wait_vblank32_t)
+
+typedef struct drm_version_32 {
+       int version_major;        /**< Major version */
+       int version_minor;        /**< Minor version */
+       int version_patchlevel;   /**< Patch level */
+       u32 name_len;             /**< Length of name buffer */
+       u32 name;                 /**< Name of driver */
+       u32 date_len;             /**< Length of date buffer */
+       u32 date;                 /**< User-space buffer to hold date */
+       u32 desc_len;             /**< Length of desc buffer */
+       u32 desc;                 /**< User-space buffer to hold desc */
+} drm_version32_t;
+
+static int compat_drm_version(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       drm_version32_t v32;
+       struct drm_version __user *version;
+       int err;
+
+       if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
+               return -EFAULT;
+
+       version = compat_alloc_user_space(sizeof(*version));
+       if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+               return -EFAULT;
+       if (__put_user(v32.name_len, &version->name_len)
+           || __put_user((void __user *)(unsigned long)v32.name,
+                         &version->name)
+           || __put_user(v32.date_len, &version->date_len)
+           || __put_user((void __user *)(unsigned long)v32.date,
+                         &version->date)
+           || __put_user(v32.desc_len, &version->desc_len)
+           || __put_user((void __user *)(unsigned long)v32.desc,
+                         &version->desc))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_VERSION, (unsigned long)version);
+       if (err)
+               return err;
+
+       if (__get_user(v32.version_major, &version->version_major)
+           || __get_user(v32.version_minor, &version->version_minor)
+           || __get_user(v32.version_patchlevel, &version->version_patchlevel)
+           || __get_user(v32.name_len, &version->name_len)
+           || __get_user(v32.date_len, &version->date_len)
+           || __get_user(v32.desc_len, &version->desc_len))
+               return -EFAULT;
+
+       if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
+               return -EFAULT;
+       return 0;
+}
+
+typedef struct drm_unique32 {
+       u32 unique_len; /**< Length of unique */
+       u32 unique;     /**< Unique name for driver instantiation */
+} drm_unique32_t;
+
+static int compat_drm_getunique(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_unique32_t uq32;
+       struct drm_unique __user *u;
+       int err;
+
+       if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+               return -EFAULT;
+
+       u = compat_alloc_user_space(sizeof(*u));
+       if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+               return -EFAULT;
+       if (__put_user(uq32.unique_len, &u->unique_len)
+           || __put_user((void __user *)(unsigned long)uq32.unique,
+                         &u->unique))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+       if (err)
+               return err;
+
+       if (__get_user(uq32.unique_len, &u->unique_len))
+               return -EFAULT;
+       if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
+               return -EFAULT;
+       return 0;
+}
+
+static int compat_drm_setunique(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_unique32_t uq32;
+       struct drm_unique __user *u;
+
+       if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+               return -EFAULT;
+
+       u = compat_alloc_user_space(sizeof(*u));
+       if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+               return -EFAULT;
+       if (__put_user(uq32.unique_len, &u->unique_len)
+           || __put_user((void __user *)(unsigned long)uq32.unique,
+                         &u->unique))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+}
+
+typedef struct drm_map32 {
+       u32 offset;             /**< Requested physical address (0 for SAREA)*/
+       u32 size;               /**< Requested physical size (bytes) */
+       enum drm_map_type type; /**< Type of memory to map */
+       enum drm_map_flags flags;       /**< Flags */
+       u32 handle;             /**< User-space: "Handle" to pass to mmap() */
+       int mtrr;               /**< MTRR slot used */
+} drm_map32_t;
+
+static int compat_drm_getmap(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       drm_map32_t __user *argp = (void __user *)arg;
+       drm_map32_t m32;
+       struct drm_map __user *map;
+       int idx, err;
+       void *handle;
+
+       if (get_user(idx, &argp->offset))
+               return -EFAULT;
+
+       map = compat_alloc_user_space(sizeof(*map));
+       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+               return -EFAULT;
+       if (__put_user(idx, &map->offset))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_GET_MAP, (unsigned long)map);
+       if (err)
+               return err;
+
+       if (__get_user(m32.offset, &map->offset)
+           || __get_user(m32.size, &map->size)
+           || __get_user(m32.type, &map->type)
+           || __get_user(m32.flags, &map->flags)
+           || __get_user(handle, &map->handle)
+           || __get_user(m32.mtrr, &map->mtrr))
+               return -EFAULT;
+
+       m32.handle = (unsigned long)handle;
+       if (copy_to_user(argp, &m32, sizeof(m32)))
+               return -EFAULT;
+       return 0;
+
+}
+
+static int compat_drm_addmap(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       drm_map32_t __user *argp = (void __user *)arg;
+       drm_map32_t m32;
+       struct drm_map __user *map;
+       int err;
+       void *handle;
+
+       if (copy_from_user(&m32, argp, sizeof(m32)))
+               return -EFAULT;
+
+       map = compat_alloc_user_space(sizeof(*map));
+       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+               return -EFAULT;
+       if (__put_user(m32.offset, &map->offset)
+           || __put_user(m32.size, &map->size)
+           || __put_user(m32.type, &map->type)
+           || __put_user(m32.flags, &map->flags))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_ADD_MAP, (unsigned long)map);
+       if (err)
+               return err;
+
+       if (__get_user(m32.offset, &map->offset)
+           || __get_user(m32.mtrr, &map->mtrr)
+           || __get_user(handle, &map->handle))
+               return -EFAULT;
+
+       m32.handle = (unsigned long)handle;
+       if (m32.handle != (unsigned long)handle && printk_ratelimit())
+               printk(KERN_ERR "compat_drm_addmap truncated handle"
+                      " %p for type %d offset %x\n",
+                      handle, m32.type, m32.offset);
+
+       if (copy_to_user(argp, &m32, sizeof(m32)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int compat_drm_rmmap(struct file *file, unsigned int cmd,
+                           unsigned long arg)
+{
+       drm_map32_t __user *argp = (void __user *)arg;
+       struct drm_map __user *map;
+       u32 handle;
+
+       if (get_user(handle, &argp->handle))
+               return -EFAULT;
+
+       map = compat_alloc_user_space(sizeof(*map));
+       if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+               return -EFAULT;
+       if (__put_user((void *)(unsigned long)handle, &map->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_RM_MAP, (unsigned long)map);
+}
+
+typedef struct drm_client32 {
+       int idx;        /**< Which client desired? */
+       int auth;       /**< Is client authenticated? */
+       u32 pid;        /**< Process ID */
+       u32 uid;        /**< User ID */
+       u32 magic;      /**< Magic */
+       u32 iocs;       /**< Ioctl count */
+} drm_client32_t;
+
+static int compat_drm_getclient(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_client32_t c32;
+       drm_client32_t __user *argp = (void __user *)arg;
+       struct drm_client __user *client;
+       int idx, err;
+
+       if (get_user(idx, &argp->idx))
+               return -EFAULT;
+
+       client = compat_alloc_user_space(sizeof(*client));
+       if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+               return -EFAULT;
+       if (__put_user(idx, &client->idx))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+       if (err)
+               return err;
+
+       if (__get_user(c32.auth, &client->auth)
+           || __get_user(c32.pid, &client->pid)
+           || __get_user(c32.uid, &client->uid)
+           || __get_user(c32.magic, &client->magic)
+           || __get_user(c32.iocs, &client->iocs))
+               return -EFAULT;
+
+       if (copy_to_user(argp, &c32, sizeof(c32)))
+               return -EFAULT;
+       return 0;
+}
+
+typedef struct drm_stats32 {
+       u32 count;
+       struct {
+               u32 value;
+               enum drm_stat_type type;
+       } data[15];
+} drm_stats32_t;
+
+static int compat_drm_getstats(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_stats32_t s32;
+       drm_stats32_t __user *argp = (void __user *)arg;
+       struct drm_stats __user *stats;
+       int i, err;
+
+       stats = compat_alloc_user_space(sizeof(*stats));
+       if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_GET_STATS, (unsigned long)stats);
+       if (err)
+               return err;
+
+       if (__get_user(s32.count, &stats->count))
+               return -EFAULT;
+       for (i = 0; i < 15; ++i)
+               if (__get_user(s32.data[i].value, &stats->data[i].value)
+                   || __get_user(s32.data[i].type, &stats->data[i].type))
+                       return -EFAULT;
+
+       if (copy_to_user(argp, &s32, sizeof(s32)))
+               return -EFAULT;
+       return 0;
+}
+
+typedef struct drm_buf_desc32 {
+       int count;               /**< Number of buffers of this size */
+       int size;                /**< Size in bytes */
+       int low_mark;            /**< Low water mark */
+       int high_mark;           /**< High water mark */
+       int flags;
+       u32 agp_start;           /**< Start address in the AGP aperture */
+} drm_buf_desc32_t;
+
+static int compat_drm_addbufs(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       drm_buf_desc32_t __user *argp = (void __user *)arg;
+       struct drm_buf_desc __user *buf;
+       int err;
+       unsigned long agp_start;
+
+       buf = compat_alloc_user_space(sizeof(*buf));
+       if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
+           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+               return -EFAULT;
+
+       if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
+           || __get_user(agp_start, &argp->agp_start)
+           || __put_user(agp_start, &buf->agp_start))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+       if (err)
+               return err;
+
+       if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
+           || __get_user(agp_start, &buf->agp_start)
+           || __put_user(agp_start, &argp->agp_start))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int compat_drm_markbufs(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_buf_desc32_t b32;
+       drm_buf_desc32_t __user *argp = (void __user *)arg;
+       struct drm_buf_desc __user *buf;
+
+       if (copy_from_user(&b32, argp, sizeof(b32)))
+               return -EFAULT;
+
+       buf = compat_alloc_user_space(sizeof(*buf));
+       if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+               return -EFAULT;
+
+       if (__put_user(b32.size, &buf->size)
+           || __put_user(b32.low_mark, &buf->low_mark)
+           || __put_user(b32.high_mark, &buf->high_mark))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+}
+
+typedef struct drm_buf_info32 {
+       int count;              /**< Entries in list */
+       u32 list;
+} drm_buf_info32_t;
+
+static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_buf_info32_t req32;
+       drm_buf_info32_t __user *argp = (void __user *)arg;
+       drm_buf_desc32_t __user *to;
+       struct drm_buf_info __user *request;
+       struct drm_buf_desc __user *list;
+       size_t nbytes;
+       int i, err;
+       int count, actual;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       count = req32.count;
+       to = (drm_buf_desc32_t __user *)(unsigned long)req32.list;
+       if (count < 0)
+               count = 0;
+       if (count > 0
+           && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
+               return -EFAULT;
+
+       nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
+       request = compat_alloc_user_space(nbytes);
+       if (!access_ok(VERIFY_WRITE, request, nbytes))
+               return -EFAULT;
+       list = (struct drm_buf_desc *) (request + 1);
+
+       if (__put_user(count, &request->count)
+           || __put_user(list, &request->list))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(actual, &request->count))
+               return -EFAULT;
+       if (count >= actual)
+               for (i = 0; i < actual; ++i)
+                       if (__copy_in_user(&to[i], &list[i],
+                                          offsetof(struct drm_buf_desc, flags)))
+                               return -EFAULT;
+
+       if (__put_user(actual, &argp->count))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_buf_pub32 {
+       int idx;                /**< Index into the master buffer list */
+       int total;              /**< Buffer size */
+       int used;               /**< Amount of buffer in use (for DMA) */
+       u32 address;            /**< Address of buffer */
+} drm_buf_pub32_t;
+
+typedef struct drm_buf_map32 {
+       int count;              /**< Length of the buffer list */
+       u32 virtual;            /**< Mmap'd area in user-virtual */
+       u32 list;               /**< Buffer information */
+} drm_buf_map32_t;
+
+static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       drm_buf_map32_t __user *argp = (void __user *)arg;
+       drm_buf_map32_t req32;
+       drm_buf_pub32_t __user *list32;
+       struct drm_buf_map __user *request;
+       struct drm_buf_pub __user *list;
+       int i, err;
+       int count, actual;
+       size_t nbytes;
+       void __user *addr;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+       count = req32.count;
+       list32 = (void __user *)(unsigned long)req32.list;
+
+       if (count < 0)
+               return -EINVAL;
+       nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
+       request = compat_alloc_user_space(nbytes);
+       if (!access_ok(VERIFY_WRITE, request, nbytes))
+               return -EFAULT;
+       list = (struct drm_buf_pub *) (request + 1);
+
+       if (__put_user(count, &request->count)
+           || __put_user(list, &request->list))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(actual, &request->count))
+               return -EFAULT;
+       if (count >= actual)
+               for (i = 0; i < actual; ++i)
+                       if (__copy_in_user(&list32[i], &list[i],
+                                          offsetof(struct drm_buf_pub, address))
+                           || __get_user(addr, &list[i].address)
+                           || __put_user((unsigned long)addr,
+                                         &list32[i].address))
+                               return -EFAULT;
+
+       if (__put_user(actual, &argp->count)
+           || __get_user(addr, &request->virtual)
+           || __put_user((unsigned long)addr, &argp->virtual))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_buf_free32 {
+       int count;
+       u32 list;
+} drm_buf_free32_t;
+
+static int compat_drm_freebufs(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_buf_free32_t req32;
+       struct drm_buf_free __user *request;
+       drm_buf_free32_t __user *argp = (void __user *)arg;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+               return -EFAULT;
+       if (__put_user(req32.count, &request->count)
+           || __put_user((int __user *)(unsigned long)req32.list,
+                         &request->list))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+}
+
+typedef struct drm_ctx_priv_map32 {
+       unsigned int ctx_id;     /**< Context requesting private mapping */
+       u32 handle;             /**< Handle of map */
+} drm_ctx_priv_map32_t;
+
+static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       drm_ctx_priv_map32_t req32;
+       struct drm_ctx_priv_map __user *request;
+       drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+               return -EFAULT;
+       if (__put_user(req32.ctx_id, &request->ctx_id)
+           || __put_user((void *)(unsigned long)req32.handle,
+                         &request->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+}
+
+static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       struct drm_ctx_priv_map __user *request;
+       drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+       int err;
+       unsigned int ctx_id;
+       void *handle;
+
+       if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+           || __get_user(ctx_id, &argp->ctx_id))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+               return -EFAULT;
+       if (__put_user(ctx_id, &request->ctx_id))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(handle, &request->handle)
+           || __put_user((unsigned long)handle, &argp->handle))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_ctx_res32 {
+       int count;
+       u32 contexts;
+} drm_ctx_res32_t;
+
+static int compat_drm_resctx(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       drm_ctx_res32_t __user *argp = (void __user *)arg;
+       drm_ctx_res32_t res32;
+       struct drm_ctx_res __user *res;
+       int err;
+
+       if (copy_from_user(&res32, argp, sizeof(res32)))
+               return -EFAULT;
+
+       res = compat_alloc_user_space(sizeof(*res));
+       if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+               return -EFAULT;
+       if (__put_user(res32.count, &res->count)
+           || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
+                         &res->contexts))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_RES_CTX, (unsigned long)res);
+       if (err)
+               return err;
+
+       if (__get_user(res32.count, &res->count)
+           || __put_user(res32.count, &argp->count))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_dma32 {
+       int context;              /**< Context handle */
+       int send_count;           /**< Number of buffers to send */
+       u32 send_indices;         /**< List of handles to buffers */
+       u32 send_sizes;           /**< Lengths of data to send */
+       enum drm_dma_flags flags;                 /**< Flags */
+       int request_count;        /**< Number of buffers requested */
+       int request_size;         /**< Desired size for buffers */
+       u32 request_indices;      /**< Buffer information */
+       u32 request_sizes;
+       int granted_count;        /**< Number of buffers granted */
+} drm_dma32_t;
+
+static int compat_drm_dma(struct file *file, unsigned int cmd,
+                         unsigned long arg)
+{
+       drm_dma32_t d32;
+       drm_dma32_t __user *argp = (void __user *)arg;
+       struct drm_dma __user *d;
+       int err;
+
+       if (copy_from_user(&d32, argp, sizeof(d32)))
+               return -EFAULT;
+
+       d = compat_alloc_user_space(sizeof(*d));
+       if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+               return -EFAULT;
+
+       if (__put_user(d32.context, &d->context)
+           || __put_user(d32.send_count, &d->send_count)
+           || __put_user((int __user *)(unsigned long)d32.send_indices,
+                         &d->send_indices)
+           || __put_user((int __user *)(unsigned long)d32.send_sizes,
+                         &d->send_sizes)
+           || __put_user(d32.flags, &d->flags)
+           || __put_user(d32.request_count, &d->request_count)
+           || __put_user((int __user *)(unsigned long)d32.request_indices,
+                         &d->request_indices)
+           || __put_user((int __user *)(unsigned long)d32.request_sizes,
+                         &d->request_sizes))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_DMA, (unsigned long)d);
+       if (err)
+               return err;
+
+       if (__get_user(d32.request_size, &d->request_size)
+           || __get_user(d32.granted_count, &d->granted_count)
+           || __put_user(d32.request_size, &argp->request_size)
+           || __put_user(d32.granted_count, &argp->granted_count))
+               return -EFAULT;
+
+       return 0;
+}
+
+#if __OS_HAS_AGP
+typedef struct drm_agp_mode32 {
+       u32 mode;       /**< AGP mode */
+} drm_agp_mode32_t;
+
+static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
+                                unsigned long arg)
+{
+       drm_agp_mode32_t __user *argp = (void __user *)arg;
+       drm_agp_mode32_t m32;
+       struct drm_agp_mode __user *mode;
+
+       if (get_user(m32.mode, &argp->mode))
+               return -EFAULT;
+
+       mode = compat_alloc_user_space(sizeof(*mode));
+       if (put_user(m32.mode, &mode->mode))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+}
+
+typedef struct drm_agp_info32 {
+       int agp_version_major;
+       int agp_version_minor;
+       u32 mode;
+       u32 aperture_base;      /* physical address */
+       u32 aperture_size;      /* bytes */
+       u32 memory_allowed;     /* bytes */
+       u32 memory_used;
+
+       /* PCI information */
+       unsigned short id_vendor;
+       unsigned short id_device;
+} drm_agp_info32_t;
+
+static int compat_drm_agp_info(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_agp_info32_t __user *argp = (void __user *)arg;
+       drm_agp_info32_t i32;
+       struct drm_agp_info __user *info;
+       int err;
+
+       info = compat_alloc_user_space(sizeof(*info));
+       if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_AGP_INFO, (unsigned long)info);
+       if (err)
+               return err;
+
+       if (__get_user(i32.agp_version_major, &info->agp_version_major)
+           || __get_user(i32.agp_version_minor, &info->agp_version_minor)
+           || __get_user(i32.mode, &info->mode)
+           || __get_user(i32.aperture_base, &info->aperture_base)
+           || __get_user(i32.aperture_size, &info->aperture_size)
+           || __get_user(i32.memory_allowed, &info->memory_allowed)
+           || __get_user(i32.memory_used, &info->memory_used)
+           || __get_user(i32.id_vendor, &info->id_vendor)
+           || __get_user(i32.id_device, &info->id_device))
+               return -EFAULT;
+
+       if (copy_to_user(argp, &i32, sizeof(i32)))
+               return -EFAULT;
+
+       return 0;
+}
+
+typedef struct drm_agp_buffer32 {
+       u32 size;       /**< In bytes -- will round to page boundary */
+       u32 handle;     /**< Used for binding / unbinding */
+       u32 type;       /**< Type of memory to allocate */
+       u32 physical;   /**< Physical used by i810 */
+} drm_agp_buffer32_t;
+
+static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_agp_buffer32_t __user *argp = (void __user *)arg;
+       drm_agp_buffer32_t req32;
+       struct drm_agp_buffer __user *request;
+       int err;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.size, &request->size)
+           || __put_user(req32.type, &request->type))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(req32.handle, &request->handle)
+           || __get_user(req32.physical, &request->physical)
+           || copy_to_user(argp, &req32, sizeof(req32))) {
+               drm_ioctl(file->f_dentry->d_inode, file,
+                         DRM_IOCTL_AGP_FREE, (unsigned long)request);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+static int compat_drm_agp_free(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_agp_buffer32_t __user *argp = (void __user *)arg;
+       struct drm_agp_buffer __user *request;
+       u32 handle;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || get_user(handle, &argp->handle)
+           || __put_user(handle, &request->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_AGP_FREE, (unsigned long)request);
+}
+
+typedef struct drm_agp_binding32 {
+       u32 handle;     /**< From drm_agp_buffer */
+       u32 offset;     /**< In bytes -- will round to page boundary */
+} drm_agp_binding32_t;
+
+static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_agp_binding32_t __user *argp = (void __user *)arg;
+       drm_agp_binding32_t req32;
+       struct drm_agp_binding __user *request;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.handle, &request->handle)
+           || __put_user(req32.offset, &request->offset))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_AGP_BIND, (unsigned long)request);
+}
+
+static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
+                                unsigned long arg)
+{
+       drm_agp_binding32_t __user *argp = (void __user *)arg;
+       struct drm_agp_binding __user *request;
+       u32 handle;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || get_user(handle, &argp->handle)
+           || __put_user(handle, &request->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+}
+#endif                         /* __OS_HAS_AGP */
+
+typedef struct drm_scatter_gather32 {
+       u32 size;       /**< In bytes -- will round to page boundary */
+       u32 handle;     /**< Used for mapping / unmapping */
+} drm_scatter_gather32_t;
+
+static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       drm_scatter_gather32_t __user *argp = (void __user *)arg;
+       struct drm_scatter_gather __user *request;
+       int err;
+       unsigned long x;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+           || __get_user(x, &argp->size)
+           || __put_user(x, &request->size))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+       if (err)
+               return err;
+
+       /* XXX not sure about the handle conversion here... */
+       if (__get_user(x, &request->handle)
+           || __put_user(x >> PAGE_SHIFT, &argp->handle))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int compat_drm_sg_free(struct file *file, unsigned int cmd,
+                             unsigned long arg)
+{
+       drm_scatter_gather32_t __user *argp = (void __user *)arg;
+       struct drm_scatter_gather __user *request;
+       unsigned long x;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+           || __get_user(x, &argp->handle)
+           || __put_user(x << PAGE_SHIFT, &request->handle))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_SG_FREE, (unsigned long)request);
+}
+
+struct drm_wait_vblank_request32 {
+       enum drm_vblank_seq_type type;
+       unsigned int sequence;
+       u32 signal;
+};
+
+struct drm_wait_vblank_reply32 {
+       enum drm_vblank_seq_type type;
+       unsigned int sequence;
+       s32 tval_sec;
+       s32 tval_usec;
+};
+
+typedef union drm_wait_vblank32 {
+       struct drm_wait_vblank_request32 request;
+       struct drm_wait_vblank_reply32 reply;
+} drm_wait_vblank32_t;
+
+static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       drm_wait_vblank32_t __user *argp = (void __user *)arg;
+       drm_wait_vblank32_t req32;
+       union drm_wait_vblank __user *request;
+       int err;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.request.type, &request->request.type)
+           || __put_user(req32.request.sequence, &request->request.sequence)
+           || __put_user(req32.request.signal, &request->request.signal))
+               return -EFAULT;
+
+       err = drm_ioctl(file->f_dentry->d_inode, file,
+                       DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+       if (err)
+               return err;
+
+       if (__get_user(req32.reply.type, &request->reply.type)
+           || __get_user(req32.reply.sequence, &request->reply.sequence)
+           || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
+           || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
+               return -EFAULT;
+
+       if (copy_to_user(argp, &req32, sizeof(req32)))
+               return -EFAULT;
+
+       return 0;
+}
+
+drm_ioctl_compat_t *drm_compat_ioctls[] = {
+       [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
+       [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
+       [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
+       [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
+       [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
+       [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
+#if __OS_HAS_AGP
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
+#endif
+       [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
+       [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
+       [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/drm.
+ *
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       drm_ioctl_compat_t *fn;
+       int ret;
+
+
+       /* Assume that ioctls without an explicit compat routine will "just
+        * work".  This may not always be a good assumption, but it's better
+        * than always failing.
+        */
+       if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls))
+               return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+
+       fn = drm_compat_ioctls[nr];
+
+       lock_kernel();          /* XXX for now */
+       if (fn != NULL)
+               ret = (*fn)(filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+       unlock_kernel();
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/psb-kernel-source-4.41.1/drm_ioctl.c b/psb-kernel-source-4.41.1/drm_ioctl.c
new file mode 100644 (file)
index 0000000..395f7b4
--- /dev/null
@@ -0,0 +1,347 @@
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#include "linux/pci.h"
+
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
+ */
+int drm_getunique(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_unique *u = data;
+
+       if (u->unique_len >= dev->unique_len) {
+               if (copy_to_user(u->unique, dev->unique, dev->unique_len))
+                       return -EFAULT;
+       }
+       u->unique_len = dev->unique_len;
+
+       return 0;
+}
+
+/**
+ * Set the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from userspace into drm_device::unique, and verifies that
+ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
+ * in interface version 1.1 and will return EBUSY when setversion has requested
+ * version 1.1 or greater.
+ */
+int drm_setunique(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_unique *u = data;
+       int domain, bus, slot, func, ret;
+
+       if (dev->unique_len || dev->unique)
+               return -EBUSY;
+
+       if (!u->unique_len || u->unique_len > 1024)
+               return -EINVAL;
+
+       dev->unique_len = u->unique_len;
+       dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
+       if (!dev->unique)
+               return -ENOMEM;
+       if (copy_from_user(dev->unique, u->unique, dev->unique_len))
+               return -EFAULT;
+
+       dev->unique[dev->unique_len] = '\0';
+
+       dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + strlen(dev->unique) + 2,
+                                DRM_MEM_DRIVER);
+       if (!dev->devname)
+               return -ENOMEM;
+
+       sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique);
+
+       /* Return error if the busid submitted doesn't match the device's actual
+        * busid.
+        */
+       ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+       if (ret != 3)
+               return -EINVAL;
+       domain = bus >> 8;
+       bus &= 0xff;
+
+       if ((domain != drm_get_pci_domain(dev)) ||
+           (bus != dev->pdev->bus->number) ||
+           (slot != PCI_SLOT(dev->pdev->devfn)) ||
+           (func != PCI_FUNC(dev->pdev->devfn)))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int drm_set_busid(struct drm_device * dev)
+{
+       int len;
+       if (dev->unique != NULL)
+               return -EBUSY;
+
+       dev->unique_len = 40;
+       dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
+       if (dev->unique == NULL)
+               return -ENOMEM;
+
+       len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
+                      drm_get_pci_domain(dev),
+                      dev->pdev->bus->number,
+                      PCI_SLOT(dev->pdev->devfn),
+                      PCI_FUNC(dev->pdev->devfn));
+       if (len > dev->unique_len)
+               DRM_ERROR("buffer overflow");
+
+       dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 2,
+                                DRM_MEM_DRIVER);
+       if (dev->devname == NULL)
+               return -ENOMEM;
+
+       sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique);
+
+       return 0;
+}
+
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_getmap(struct drm_device *dev, void *data,
+              struct drm_file *file_priv)
+{
+       struct drm_map *map = data;
+       struct drm_map_list *r_list = NULL;
+       struct list_head *list;
+       int idx;
+       int i;
+
+       idx = map->offset;
+
+       mutex_lock(&dev->struct_mutex);
+       if (idx < 0) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       i = 0;
+       list_for_each(list, &dev->maplist) {
+               if (i == idx) {
+                       r_list = list_entry(list, struct drm_map_list, head);
+                       break;
+               }
+               i++;
+       }
+       if (!r_list || !r_list->map) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       map->offset = r_list->map->offset;
+       map->size = r_list->map->size;
+       map->type = r_list->map->type;
+       map->flags = r_list->map->flags;
+       map->handle = (void *)(unsigned long) r_list->user_token;
+       map->mtrr = r_list->map->mtrr;
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+int drm_getclient(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_client *client = data;
+       struct drm_file *pt;
+       int idx;
+       int i;
+
+       idx = client->idx;
+       mutex_lock(&dev->struct_mutex);
+
+       i = 0;
+       list_for_each_entry(pt, &dev->filelist, lhead) {
+               if (i++ >= idx) {
+                       client->auth = pt->authenticated;
+                       client->pid = pt->pid;
+                       client->uid = pt->uid;
+                       client->magic = pt->magic;
+                       client->iocs = pt->ioctl_count;
+                       mutex_unlock(&dev->struct_mutex);
+
+                       return 0;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return -EINVAL;
+}
+
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getstats(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
+{
+       struct drm_stats *stats = data;
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+
+       mutex_lock(&dev->struct_mutex);
+
+       for (i = 0; i < dev->counters; i++) {
+               if (dev->types[i] == _DRM_STAT_LOCK)
+                       stats->data[i].value =
+                           (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
+               else
+                       stats->data[i].value = atomic_read(&dev->counts[i]);
+               stats->data[i].type = dev->types[i];
+       }
+
+       stats->count = dev->counters;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_set_version *sv = data;
+       int if_version, retcode = 0;
+
+       if (sv->drm_di_major != -1) {
+               if (sv->drm_di_major != DRM_IF_MAJOR ||
+                   sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+                       retcode = -EINVAL;
+                       goto done;
+               }
+               if_version = DRM_IF_VERSION(sv->drm_di_major,
+                                           sv->drm_di_minor);
+               dev->if_version = max(if_version, dev->if_version);
+               if (sv->drm_di_minor >= 1) {
+                       /*
+                        * Version 1.1 includes tying of DRM to specific device
+                        */
+                       drm_set_busid(dev);
+               }
+       }
+
+       if (sv->drm_dd_major != -1) {
+               if (sv->drm_dd_major != dev->driver->major ||
+                   sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+                   dev->driver->minor) {
+                       retcode = -EINVAL;
+                       goto done;
+               }
+
+               if (dev->driver->set_version)
+                       dev->driver->set_version(dev, sv);
+       }
+
+done:
+       sv->drm_di_major = DRM_IF_MAJOR;
+       sv->drm_di_minor = DRM_IF_MINOR;
+       sv->drm_dd_major = dev->driver->major;
+       sv->drm_dd_minor = dev->driver->minor;
+
+       return retcode;
+}
+
+/** No-op ioctl. */
+int drm_noop(struct drm_device *dev, void *data,
+            struct drm_file *file_priv)
+{
+       DRM_DEBUG("\n");
+       return 0;
+}
diff --git a/psb-kernel-source-4.41.1/drm_irq.c b/psb-kernel-source-4.41.1/drm_irq.c
new file mode 100644 (file)
index 0000000..2ae4948
--- /dev/null
@@ -0,0 +1,463 @@
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#include <linux/interrupt.h>   /* For task queue support */
+
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_irq_busid *p = data;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+           (p->busnum & 0xff) != dev->pdev->bus->number ||
+           p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+               return -EINVAL;
+
+       p->irq = dev->irq;
+
+       DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+                 p->irq);
+
+       return 0;
+}
+
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
+ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * before and after the installation.
+ */
+int drm_irq_install(struct drm_device * dev)
+{
+       int ret;
+       unsigned long sh_flags = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       if (dev->irq == 0)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* Driver must have been initialized */
+       if (!dev->dev_private) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       if (dev->irq_enabled) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EBUSY;
+       }
+       dev->irq_enabled = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+
+       if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
+               init_waitqueue_head(&dev->vbl_queue);
+
+               spin_lock_init(&dev->vbl_lock);
+
+               INIT_LIST_HEAD(&dev->vbl_sigs);
+               INIT_LIST_HEAD(&dev->vbl_sigs2);
+
+               dev->vbl_pending = 0;
+       }
+
+       /* Before installing handler */
+       dev->driver->irq_preinstall(dev);
+
+       /* Install handler */
+       if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+               sh_flags = IRQF_SHARED;
+
+       ret = request_irq(dev->irq, dev->driver->irq_handler,
+                         sh_flags, dev->devname, dev);
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       /* After installing handler */
+       dev->driver->irq_postinstall(dev);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_irq_install);
+
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device * dev)
+{
+       int irq_enabled;
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       irq_enabled = dev->irq_enabled;
+       dev->irq_enabled = 0;
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!irq_enabled)
+               return -EINVAL;
+
+       DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+
+       dev->driver->irq_uninstall(dev);
+
+       free_irq(dev->irq, dev);
+
+       dev->locked_tasklet_func = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_irq_uninstall);
+
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+int drm_control(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_control *ctl = data;
+
+       /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+
+
+       switch (ctl->func) {
+       case DRM_INST_HANDLER:
+               if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+                       return 0;
+               if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+                   ctl->irq != dev->irq)
+                       return -EINVAL;
+               return drm_irq_install(dev);
+       case DRM_UNINST_HANDLER:
+               if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+                       return 0;
+               return drm_irq_uninstall(dev);
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the IRQ is installed.
+ *
+ * If a signal is requested checks if this task has already scheduled the same signal
+ * for the same vblank sequence number - nothing to be done in
+ * that case. If the number of tasks waiting for the interrupt exceeds 100 the
+ * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
+ * task.
+ *
+ * If a signal is not requested, then calls vblank_wait().
+ */
+int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       union drm_wait_vblank *vblwait = data;
+       struct timeval now;
+       int ret = 0;
+       unsigned int flags, seq;
+
+       if ((!dev->irq) || (!dev->irq_enabled))
+               return -EINVAL;
+
+       if (vblwait->request.type &
+           ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+               DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+                         vblwait->request.type,
+                         (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+               return -EINVAL;
+       }
+
+       flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+
+       if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
+                                   DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
+               return -EINVAL;
+
+       seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
+                         : &dev->vbl_received);
+
+       switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+       case _DRM_VBLANK_RELATIVE:
+               vblwait->request.sequence += seq;
+               vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+       case _DRM_VBLANK_ABSOLUTE:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1<<23)) {
+               vblwait->request.sequence = seq + 1;
+       }
+
+       if (flags & _DRM_VBLANK_SIGNAL) {
+               unsigned long irqflags;
+               struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
+                                     ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+               struct drm_vbl_sig *vbl_sig;
+
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+
+               /* Check if this task has already scheduled the same signal
+                * for the same vblank sequence number; nothing to be done in
+                * that case
+                */
+               list_for_each_entry(vbl_sig, vbl_sigs, head) {
+                       if (vbl_sig->sequence == vblwait->request.sequence
+                           && vbl_sig->info.si_signo ==
+                           vblwait->request.signal
+                           && vbl_sig->task == current) {
+                               spin_unlock_irqrestore(&dev->vbl_lock,
+                                                      irqflags);
+                               vblwait->reply.sequence = seq;
+                               goto done;
+                       }
+               }
+
+               if (dev->vbl_pending >= 100) {
+                       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+                       return -EBUSY;
+               }
+
+               dev->vbl_pending++;
+
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+               if (!
+                   (vbl_sig =
+                    drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
+                       return -ENOMEM;
+               }
+
+               memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
+
+               vbl_sig->sequence = vblwait->request.sequence;
+               vbl_sig->info.si_signo = vblwait->request.signal;
+               vbl_sig->task = current;
+
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+
+               list_add_tail(&vbl_sig->head, vbl_sigs);
+
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+               vblwait->reply.sequence = seq;
+       } else {
+               if (flags & _DRM_VBLANK_SECONDARY) {
+                       if (dev->driver->vblank_wait2)
+                               ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
+               } else if (dev->driver->vblank_wait)
+                       ret =
+                           dev->driver->vblank_wait(dev,
+                                                    &vblwait->request.sequence);
+
+               do_gettimeofday(&now);
+               vblwait->reply.tval_sec = now.tv_sec;
+               vblwait->reply.tval_usec = now.tv_usec;
+               /* vblwait->reply.sequence =
+                  vblwait->request.sequence; FIXME: confirm with
+                  Justin if this is necessary*/
+       }
+
+      done:
+       return ret;
+}
+
+/**
+ * Send the VBLANK signals.
+ *
+ * \param dev DRM device.
+ *
+ * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
+ *
+ * If a signal is not requested, then calls vblank_wait().
+ */
+void drm_vbl_send_signals(struct drm_device * dev)
+{
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&dev->vbl_lock, flags);
+
+       for (i = 0; i < 2; i++) {
+               struct drm_vbl_sig *vbl_sig, *tmp;
+               struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+               unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
+                                                  &dev->vbl_received);
+
+               list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
+                       if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+                               vbl_sig->info.si_code = vbl_seq;
+                               send_sig_info(vbl_sig->info.si_signo,
+                                             &vbl_sig->info, vbl_sig->task);
+
+                               list_del(&vbl_sig->head);
+
+                               drm_free(vbl_sig, sizeof(*vbl_sig),
+                                        DRM_MEM_DRIVER);
+
+                               dev->vbl_pending--;
+                       }
+               }
+       }
+
+       spin_unlock_irqrestore(&dev->vbl_lock, flags);
+}
+EXPORT_SYMBOL(drm_vbl_send_signals);
+
+/**
+ * Tasklet wrapper function.
+ *
+ * \param data DRM device in disguise.
+ *
+ * Attempts to grab the HW lock and calls the driver callback on success. On
+ * failure, leave the lock marked as contended so the callback can be called
+ * from drm_unlock().
+ */
+static void drm_locked_tasklet_func(unsigned long data)
+{
+       struct drm_device *dev = (struct drm_device *)data;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+       if (!dev->locked_tasklet_func ||
+           !drm_lock_take(&dev->lock,
+                          DRM_KERNEL_CONTEXT)) {
+               spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+               return;
+       }
+
+       dev->lock.lock_time = jiffies;
+       atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+
+       dev->locked_tasklet_func(dev);
+
+       drm_lock_free(&dev->lock,
+                     DRM_KERNEL_CONTEXT);
+
+       dev->locked_tasklet_func = NULL;
+
+       spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+}
+
+/**
+ * Schedule a tasklet to call back a driver hook with the HW lock held.
+ *
+ * \param dev DRM device.
+ * \param func Driver callback.
+ *
+ * This is intended for triggering actions that require the HW lock from an
+ * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
+ * completes. Note that the callback may be called from interrupt or process
+ * context, it must not make any assumptions about this. Also, the HW lock will
+ * be held with the kernel context or any client context.
+ */
+void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
+{
+       unsigned long irqflags;
+       static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
+
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
+           test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
+               return;
+
+       spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+       if (dev->locked_tasklet_func) {
+               spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+               return;
+       }
+
+       dev->locked_tasklet_func = func;
+
+       spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+       drm_tasklet.data = (unsigned long)dev;
+
+       tasklet_hi_schedule(&drm_tasklet);
+}
+EXPORT_SYMBOL(drm_locked_tasklet);
diff --git a/psb-kernel-source-4.41.1/drm_lock.c b/psb-kernel-source-4.41.1/drm_lock.c
new file mode 100644 (file)
index 0000000..b8e4a5d
--- /dev/null
@@ -0,0 +1,393 @@
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_notifier(void *priv);
+
+/**
+ * Lock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Add the current task to the lock wait queue, and attempt to take to lock.
+ */
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       DECLARE_WAITQUEUE(entry, current);
+       struct drm_lock *lock = data;
+       int ret = 0;
+
+       ++file_priv->lock_count;
+
+       if (lock->context == DRM_KERNEL_CONTEXT) {
+               DRM_ERROR("Process %d using kernel context %d\n",
+                         current->pid, lock->context);
+               return -EINVAL;
+       }
+
+       DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+                 lock->context, current->pid,
+                 dev->lock.hw_lock->lock, lock->flags);
+
+       if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
+               if (lock->context < 0)
+                       return -EINVAL;
+
+       add_wait_queue(&dev->lock.lock_queue, &entry);
+       spin_lock(&dev->lock.spinlock);
+       dev->lock.user_waiters++;
+       spin_unlock(&dev->lock.spinlock);
+       for (;;) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               if (!dev->lock.hw_lock) {
+                       /* Device has been unregistered */
+                       ret = -EINTR;
+                       break;
+               }
+               if (drm_lock_take(&dev->lock, lock->context)) {
+                       dev->lock.file_priv = file_priv;
+                       dev->lock.lock_time = jiffies;
+                       atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+                       break;  /* Got lock */
+               }
+
+               /* Contention */
+               schedule();
+               if (signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+       }
+       spin_lock(&dev->lock.spinlock);
+       dev->lock.user_waiters--;
+       spin_unlock(&dev->lock.spinlock);
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&dev->lock.lock_queue, &entry);
+
+       DRM_DEBUG("%d %s\n", lock->context,
+                 ret ? "interrupted" : "has lock");
+       if (ret) return ret;
+
+       sigemptyset(&dev->sigmask);
+       sigaddset(&dev->sigmask, SIGSTOP);
+       sigaddset(&dev->sigmask, SIGTSTP);
+       sigaddset(&dev->sigmask, SIGTTIN);
+       sigaddset(&dev->sigmask, SIGTTOU);
+       dev->sigdata.context = lock->context;
+       dev->sigdata.lock = dev->lock.hw_lock;
+       block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+
+       if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
+               dev->driver->dma_ready(dev);
+
+       if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+       {
+               if (dev->driver->dma_quiescent(dev)) {
+                       DRM_DEBUG("%d waiting for DMA quiescent\n",
+                                 lock->context);
+                       return -EBUSY;
+               }
+       }
+
+       if (dev->driver->kernel_context_switch &&
+           dev->last_context != lock->context) {
+               dev->driver->kernel_context_switch(dev, dev->last_context,
+                                                  lock->context);
+       }
+
+       return 0;
+}
+
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_lock *lock = data;
+       unsigned long irqflags;
+
+       if (lock->context == DRM_KERNEL_CONTEXT) {
+               DRM_ERROR("Process %d using kernel context %d\n",
+                         current->pid, lock->context);
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+       if (dev->locked_tasklet_func) {
+               dev->locked_tasklet_func(dev);
+
+               dev->locked_tasklet_func = NULL;
+       }
+
+       spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+       atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+       /* kernel_context_switch isn't used by any of the x86 drm
+        * modules but is required by the Sparc driver.
+        */
+       if (dev->driver->kernel_context_switch_unlock)
+               dev->driver->kernel_context_switch_unlock(dev);
+       else {
+               if (drm_lock_free(&dev->lock,lock->context)) {
+                       /* FIXME: Should really bail out here. */
+               }
+       }
+
+       unblock_all_signals();
+       return 0;
+}
+
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+int drm_lock_take(struct drm_lock_data *lock_data,
+                 unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock(&lock_data->spinlock);
+       do {
+               old = *lock;
+               if (old & _DRM_LOCK_HELD)
+                       new = old | _DRM_LOCK_CONT;
+               else {
+                       new = context | _DRM_LOCK_HELD |
+                               ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+                                _DRM_LOCK_CONT : 0);
+               }
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       spin_unlock(&lock_data->spinlock);
+
+       if (_DRM_LOCKING_CONTEXT(old) == context) {
+               if (old & _DRM_LOCK_HELD) {
+                       if (context != DRM_KERNEL_CONTEXT) {
+                               DRM_ERROR("%d holds heavyweight lock\n",
+                                         context);
+                       }
+                       return 0;
+               }
+       }
+
+       if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+               /* Have lock */
+
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * This takes a lock forcibly and hands it to context. Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+                            unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       lock_data->file_priv = NULL;
+       do {
+               old = *lock;
+               new = context | _DRM_LOCK_HELD;
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       return 1;
+}
+
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock(&lock_data->spinlock);
+       if (lock_data->kernel_waiters != 0) {
+               drm_lock_transfer(lock_data, 0);
+               lock_data->idle_has_lock = 1;
+               spin_unlock(&lock_data->spinlock);
+               return 1;
+       }
+       spin_unlock(&lock_data->spinlock);
+
+       do {
+               old = *lock;
+               new = _DRM_LOCKING_CONTEXT(old);
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+
+       if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+               DRM_ERROR("%d freed heavyweight lock held by %d\n",
+                         context, _DRM_LOCKING_CONTEXT(old));
+               return 1;
+       }
+       wake_up_interruptible(&lock_data->lock_queue);
+       return 0;
+}
+
+/**
+ * If we get here, it means that the process has called DRM_IOCTL_LOCK
+ * without calling DRM_IOCTL_UNLOCK.
+ *
+ * If the lock is not held, then let the signal proceed as usual.  If the lock
+ * is held, then set the contended flag and keep the signal blocked.
+ *
+ * \param priv pointer to a drm_sigdata structure.
+ * \return one if the signal should be delivered normally, or zero if the
+ * signal should be blocked.
+ */
+static int drm_notifier(void *priv)
+{
+       struct drm_sigdata *s = (struct drm_sigdata *) priv;
+       unsigned int old, new, prev;
+
+       /* Allow signal delivery if lock isn't held */
+       if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
+           || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
+               return 1;
+
+       /* Otherwise, set flag to force call to
+          drmUnlock */
+       do {
+               old = s->lock->lock;
+               new = old | _DRM_LOCK_CONT;
+               prev = cmpxchg(&s->lock->lock, old, new);
+       } while (prev != old);
+       return 0;
+}
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
+{
+       int ret = 0;
+
+       spin_lock(&lock_data->spinlock);
+       lock_data->kernel_waiters++;
+       if (!lock_data->idle_has_lock) {
+
+               spin_unlock(&lock_data->spinlock);
+               ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+               spin_lock(&lock_data->spinlock);
+
+               if (ret == 1)
+                       lock_data->idle_has_lock = 1;
+       }
+       spin_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
+
+void drm_idlelock_release(struct drm_lock_data *lock_data)
+{
+       unsigned int old, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock(&lock_data->spinlock);
+       if (--lock_data->kernel_waiters == 0) {
+               if (lock_data->idle_has_lock) {
+                       do {
+                               old = *lock;
+                               prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+                       } while (prev != old);
+                       wake_up_interruptible(&lock_data->lock_queue);
+                       lock_data->idle_has_lock = 0;
+               }
+       }
+       spin_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_release);
+
+
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+
+       return (file_priv->lock_count && dev->lock.hw_lock &&
+               _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+               dev->lock.file_priv == file_priv);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/psb-kernel-source-4.41.1/drm_memory.c b/psb-kernel-source-4.41.1/drm_memory.c
new file mode 100644 (file)
index 0000000..becca1e
--- /dev/null
@@ -0,0 +1,343 @@
+/**
+ * \file drm_memory.c
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include "drmP.h"
+
+static struct {
+       spinlock_t lock;
+       uint64_t cur_used;
+       uint64_t emer_used;
+       uint64_t low_threshold;
+       uint64_t high_threshold;
+       uint64_t emer_threshold;
+} drm_memctl = {
+       .lock = SPIN_LOCK_UNLOCKED
+};
+
+int drm_alloc_memctl(size_t size)
+{
+        int ret = 0;
+       unsigned long a_size = drm_size_align(size);
+       unsigned long new_used;
+
+       spin_lock(&drm_memctl.lock);
+       new_used = drm_memctl.cur_used + a_size;
+       if (likely(new_used < drm_memctl.high_threshold)) {
+               drm_memctl.cur_used = new_used;
+               goto out;
+       }
+
+       new_used += drm_memctl.emer_used;
+       if (unlikely(!DRM_SUSER(DRM_CURPROC) ||
+                    (a_size > 16*PAGE_SIZE) ||
+                    (new_used > drm_memctl.emer_threshold))) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       drm_memctl.cur_used = drm_memctl.high_threshold;
+       drm_memctl.emer_used = new_used - drm_memctl.high_threshold;
+out:
+       spin_unlock(&drm_memctl.lock);
+       return ret;
+}
+
+void drm_free_memctl(size_t size)
+{
+       unsigned long a_size = drm_size_align(size);
+
+       spin_lock(&drm_memctl.lock);
+       if (likely(a_size >= drm_memctl.emer_used)) {
+               a_size -= drm_memctl.emer_used;
+               drm_memctl.emer_used = 0;
+       } else {
+               drm_memctl.emer_used -= a_size;
+               a_size = 0;
+       }
+       drm_memctl.cur_used -= a_size;
+       spin_unlock(&drm_memctl.lock);
+}
+EXPORT_SYMBOL(drm_free_memctl);
+
+void drm_query_memctl(uint64_t *cur_used,
+                     uint64_t *emer_used,
+                     uint64_t *low_threshold,
+                     uint64_t *high_threshold,
+                     uint64_t *emer_threshold)
+{
+       spin_lock(&drm_memctl.lock);
+       *cur_used = drm_memctl.cur_used;
+       *emer_used = drm_memctl.emer_used;
+       *low_threshold = drm_memctl.low_threshold;
+       *high_threshold = drm_memctl.high_threshold;
+       *emer_threshold = drm_memctl.emer_threshold;
+       spin_unlock(&drm_memctl.lock);
+}
+EXPORT_SYMBOL(drm_query_memctl);
+
+void drm_init_memctl(size_t p_low_threshold,
+                    size_t p_high_threshold,
+                    size_t unit_size)
+{
+       spin_lock(&drm_memctl.lock);
+       drm_memctl.emer_used = 0;
+       drm_memctl.cur_used = 0;
+       drm_memctl.low_threshold = p_low_threshold * unit_size;
+       drm_memctl.high_threshold = p_high_threshold * unit_size;
+       drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) +
+               drm_memctl.high_threshold;
+       spin_unlock(&drm_memctl.lock);
+}
+
+
+#ifndef DEBUG_MEMORY
+
+/** No-op. */
+void drm_mem_init(void)
+{
+}
+
+/**
+ * Called when "/proc/dri/%dev%/mem" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param len requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * No-op.
+ */
+int drm_mem_info(char *buf, char **start, off_t offset,
+                int len, int *eof, void *data)
+{
+       return 0;
+}
+
+/** Wrapper around kmalloc() */
+void *drm_calloc(size_t nmemb, size_t size, int area)
+{
+       return kcalloc(nmemb, size, GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_calloc);
+
+/** Wrapper around kmalloc() and kfree() */
+void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
+{
+       void *pt;
+
+       if (!(pt = kmalloc(size, GFP_KERNEL)))
+               return NULL;
+       if (oldpt && oldsize) {
+               memcpy(pt, oldpt, oldsize);
+               kfree(oldpt);
+       }
+       return pt;
+}
+
+/**
+ * Allocate pages.
+ *
+ * \param order size order.
+ * \param area memory area. (Not used.)
+ * \return page address on success, or zero on failure.
+ *
+ * Allocate and reserve free pages.
+ */
+unsigned long drm_alloc_pages(int order, int area)
+{
+       unsigned long address;
+       unsigned long bytes = PAGE_SIZE << order;
+       unsigned long addr;
+       unsigned int sz;
+
+       address = __get_free_pages(GFP_KERNEL, order);
+       if (!address)
+               return 0;
+
+       /* Zero */
+       memset((void *)address, 0, bytes);
+
+       /* Reserve */
+       for (addr = address, sz = bytes;
+            sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+               SetPageReserved(virt_to_page(addr));
+       }
+
+       return address;
+}
+
+/**
+ * Free pages.
+ *
+ * \param address address of the pages to free.
+ * \param order size order.
+ * \param area memory area. (Not used.)
+ *
+ * Unreserve and free pages allocated by alloc_pages().
+ */
+void drm_free_pages(unsigned long address, int order, int area)
+{
+       unsigned long bytes = PAGE_SIZE << order;
+       unsigned long addr;
+       unsigned int sz;
+
+       if (!address)
+               return;
+
+       /* Unreserve */
+       for (addr = address, sz = bytes;
+            sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(addr));
+       }
+
+       free_pages(address, order);
+}
+
+#if __OS_HAS_AGP
+static void *agp_remap(unsigned long offset, unsigned long size,
+                             struct drm_device * dev)
+{
+       unsigned long *phys_addr_map, i, num_pages =
+           PAGE_ALIGN(size) / PAGE_SIZE;
+       struct drm_agp_mem *agpmem;
+       struct page **page_map;
+       void *addr;
+
+       size = PAGE_ALIGN(size);
+
+#ifdef __alpha__
+       offset -= dev->hose->mem_space->start;
+#endif
+
+       list_for_each_entry(agpmem, &dev->agp->memory, head)
+               if (agpmem->bound <= offset
+                   && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
+                   (offset + size))
+                       break;
+       if (!agpmem)
+               return NULL;
+
+       /*
+        * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+        * the CPU do not get remapped by the GART.  We fix this by using the kernel's
+        * page-table instead (that's probably faster anyhow...).
+        */
+       /* note: use vmalloc() because num_pages could be large... */
+       page_map = vmalloc(num_pages * sizeof(struct page *));
+       if (!page_map)
+               return NULL;
+
+       phys_addr_map =
+           agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
+       for (i = 0; i < num_pages; ++i)
+               page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
+       addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+       vfree(page_map);
+
+       return addr;
+}
+
+/** Wrapper around agp_allocate_memory() */
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
+{
+       return drm_agp_allocate_memory(pages, type);
+}
+#else
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
+{
+       return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
+}
+#endif
+
+/** Wrapper around agp_free_memory() */
+int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+       return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+}
+
+/** Wrapper around agp_bind_memory() */
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+       return drm_agp_bind_memory(handle, start);
+}
+
+/** Wrapper around agp_unbind_memory() */
+int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+       return drm_agp_unbind_memory(handle);
+}
+
+#else  /* __OS_HAS_AGP*/
+static void *agp_remap(unsigned long offset, unsigned long size,
+                      struct drm_device * dev)
+{
+       return NULL;
+}
+#endif                         /* agp */
+#else
+static void *agp_remap(unsigned long offset, unsigned long size,
+                      struct drm_device * dev)
+{
+       return NULL;
+}
+#endif                         /* debug_memory */
+
+void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               map->handle = agp_remap(map->offset, map->size, dev);
+       else
+               map->handle = ioremap(map->offset, map->size);
+}
+EXPORT_SYMBOL_GPL(drm_core_ioremap);
+
+void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
+{
+       if (!map->handle || !map->size)
+               return;
+
+       if (drm_core_has_AGP(dev) &&
+           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+               vunmap(map->handle);
+       else
+               iounmap(map->handle);
+}
+EXPORT_SYMBOL_GPL(drm_core_ioremapfree);
diff --git a/psb-kernel-source-4.41.1/drm_memory.h b/psb-kernel-source-4.41.1/drm_memory.h
new file mode 100644 (file)
index 0000000..63e425b
--- /dev/null
@@ -0,0 +1,61 @@
+/**
+ * \file drm_memory.h
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+/**
+ * Cut down version of drm_memory_debug.h, which used to be called
+ * drm_memory.h.
+ */
+
+#if __OS_HAS_AGP
+
+#include <linux/vmalloc.h>
+
+#ifdef HAVE_PAGE_AGP
+#include <asm/agp.h>
+#else
+# ifdef __powerpc__
+#  define PAGE_AGP     __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+# else
+#  define PAGE_AGP     PAGE_KERNEL
+# endif
+#endif
+
+#else                          /* __OS_HAS_AGP */
+
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_memory_debug.c b/psb-kernel-source-4.41.1/drm_memory_debug.c
new file mode 100644 (file)
index 0000000..c196ee2
--- /dev/null
@@ -0,0 +1,403 @@
+/**
+ * \file drm_memory_debug.c
+ * Memory management wrappers for DRM.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#ifdef DEBUG_MEMORY
+
+typedef struct drm_mem_stats {
+       const char *name;
+       int succeed_count;
+       int free_count;
+       int fail_count;
+       unsigned long bytes_allocated;
+       unsigned long bytes_freed;
+} drm_mem_stats_t;
+
+static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
+static unsigned long drm_ram_available = 0;    /* In pages */
+static unsigned long drm_ram_used = 0;
+static drm_mem_stats_t drm_mem_stats[] = {
+       [DRM_MEM_DMA] = {"dmabufs"},
+       [DRM_MEM_SAREA] = {"sareas"},
+       [DRM_MEM_DRIVER] = {"driver"},
+       [DRM_MEM_MAGIC] = {"magic"},
+       [DRM_MEM_IOCTLS] = {"ioctltab"},
+       [DRM_MEM_MAPS] = {"maplist"},
+       [DRM_MEM_VMAS] = {"vmalist"},
+       [DRM_MEM_BUFS] = {"buflist"},
+       [DRM_MEM_SEGS] = {"seglist"},
+       [DRM_MEM_PAGES] = {"pagelist"},
+       [DRM_MEM_FILES] = {"files"},
+       [DRM_MEM_QUEUES] = {"queues"},
+       [DRM_MEM_CMDS] = {"commands"},
+       [DRM_MEM_MAPPINGS] = {"mappings"},
+       [DRM_MEM_BUFLISTS] = {"buflists"},
+       [DRM_MEM_AGPLISTS] = {"agplist"},
+       [DRM_MEM_SGLISTS] = {"sglist"},
+       [DRM_MEM_TOTALAGP] = {"totalagp"},
+       [DRM_MEM_BOUNDAGP] = {"boundagp"},
+       [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
+       [DRM_MEM_CTXLIST] = {"ctxlist"},
+       [DRM_MEM_STUB] = {"stub"},
+       {NULL, 0,}              /* Last entry must be null */
+};
+
+void drm_mem_init(void)
+{
+       drm_mem_stats_t *mem;
+       struct sysinfo si;
+
+       for (mem = drm_mem_stats; mem->name; ++mem) {
+               mem->succeed_count = 0;
+               mem->free_count = 0;
+               mem->fail_count = 0;
+               mem->bytes_allocated = 0;
+               mem->bytes_freed = 0;
+       }
+
+       si_meminfo(&si);
+       drm_ram_available = si.totalram;
+       drm_ram_used = 0;
+}
+
+/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
+
+static int drm__mem_info(char *buf, char **start, off_t offset,
+                        int request, int *eof, void *data)
+{
+       drm_mem_stats_t *pt;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *eof = 0;
+       *start = &buf[offset];
+
+       DRM_PROC_PRINT("                  total counts                  "
+                      " |    outstanding  \n");
+       DRM_PROC_PRINT("type       alloc freed fail     bytes      freed"
+                      " | allocs      bytes\n\n");
+       DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
+                      "system", 0, 0, 0,
+                      drm_ram_available << (PAGE_SHIFT - 10));
+       DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
+                      "locked", 0, 0, 0, drm_ram_used >> 10);
+       DRM_PROC_PRINT("\n");
+       for (pt = drm_mem_stats; pt->name; pt++) {
+               DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
+                              pt->name,
+                              pt->succeed_count,
+                              pt->free_count,
+                              pt->fail_count,
+                              pt->bytes_allocated,
+                              pt->bytes_freed,
+                              pt->succeed_count - pt->free_count,
+                              (long)pt->bytes_allocated
+                              - (long)pt->bytes_freed);
+       }
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+int drm_mem_info(char *buf, char **start, off_t offset,
+                int len, int *eof, void *data)
+{
+       int ret;
+
+       spin_lock(&drm_mem_lock);
+       ret = drm__mem_info(buf, start, offset, len, eof, data);
+       spin_unlock(&drm_mem_lock);
+       return ret;
+}
+
+void *drm_alloc(size_t size, int area)
+{
+       void *pt;
+
+       if (!size) {
+               DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
+               return NULL;
+       }
+
+       if (!(pt = kmalloc(size, GFP_KERNEL))) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[area].fail_count;
+               spin_unlock(&drm_mem_lock);
+               return NULL;
+       }
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_allocated += size;
+       spin_unlock(&drm_mem_lock);
+       return pt;
+}
+EXPORT_SYMBOL(drm_alloc);
+
+void *drm_calloc(size_t nmemb, size_t size, int area)
+{
+       void *addr;
+
+       addr = drm_alloc(nmemb * size, area);
+       if (addr != NULL)
+               memset((void *)addr, 0, size * nmemb);
+
+       return addr;
+}
+EXPORT_SYMBOL(drm_calloc);
+
+void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
+{
+       void *pt;
+
+       if (!(pt = drm_alloc(size, area)))
+               return NULL;
+       if (oldpt && oldsize) {
+               memcpy(pt, oldpt, oldsize);
+               drm_free(oldpt, oldsize, area);
+       }
+       return pt;
+}
+EXPORT_SYMBOL(drm_realloc);
+
+void drm_free(void *pt, size_t size, int area)
+{
+       int alloc_count;
+       int free_count;
+
+       if (!pt)
+               DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
+       else
+               kfree(pt);
+       spin_lock(&drm_mem_lock);
+       drm_mem_stats[area].bytes_freed += size;
+       free_count = ++drm_mem_stats[area].free_count;
+       alloc_count = drm_mem_stats[area].succeed_count;
+       spin_unlock(&drm_mem_lock);
+       if (free_count > alloc_count) {
+               DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
+                             free_count, alloc_count);
+       }
+}
+EXPORT_SYMBOL(drm_free);
+
+unsigned long drm_alloc_pages(int order, int area)
+{
+       unsigned long address;
+       unsigned long bytes = PAGE_SIZE << order;
+       unsigned long addr;
+       unsigned int sz;
+
+       spin_lock(&drm_mem_lock);
+       if ((drm_ram_used >> PAGE_SHIFT)
+           > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
+               spin_unlock(&drm_mem_lock);
+               return 0;
+       }
+       spin_unlock(&drm_mem_lock);
+
+       address = __get_free_pages(GFP_KERNEL, order);
+       if (!address) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[area].fail_count;
+               spin_unlock(&drm_mem_lock);
+               return 0;
+       }
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_allocated += bytes;
+       drm_ram_used += bytes;
+       spin_unlock(&drm_mem_lock);
+
+       /* Zero outside the lock */
+       memset((void *)address, 0, bytes);
+
+       /* Reserve */
+       for (addr = address, sz = bytes;
+            sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+               SetPageReserved(virt_to_page(addr));
+       }
+
+       return address;
+}
+
+void drm_free_pages(unsigned long address, int order, int area)
+{
+       unsigned long bytes = PAGE_SIZE << order;
+       int alloc_count;
+       int free_count;
+       unsigned long addr;
+       unsigned int sz;
+
+       if (!address) {
+               DRM_MEM_ERROR(area, "Attempt to free address 0\n");
+       } else {
+               /* Unreserve */
+               for (addr = address, sz = bytes;
+                    sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+                       ClearPageReserved(virt_to_page(addr));
+               }
+               free_pages(address, order);
+       }
+
+       spin_lock(&drm_mem_lock);
+       free_count = ++drm_mem_stats[area].free_count;
+       alloc_count = drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_freed += bytes;
+       drm_ram_used -= bytes;
+       spin_unlock(&drm_mem_lock);
+       if (free_count > alloc_count) {
+               DRM_MEM_ERROR(area,
+                             "Excess frees: %d frees, %d allocs\n",
+                             free_count, alloc_count);
+       }
+}
+
+#if __OS_HAS_AGP
+
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
+{
+       DRM_AGP_MEM *handle;
+
+       if (!pages) {
+               DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
+               return NULL;
+       }
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+       if ((handle = drm_agp_allocate_memory(pages, type))) {
+#else
+       if ((handle = drm_agp_allocate_memory(dev->agp->bridge, pages, type))) {
+#endif
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+                   += pages << PAGE_SHIFT;
+               spin_unlock(&drm_mem_lock);
+               return handle;
+       }
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
+       spin_unlock(&drm_mem_lock);
+       return NULL;
+}
+
+int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+       int alloc_count;
+       int free_count;
+       int retval = -EINVAL;
+
+       if (!handle) {
+               DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+                             "Attempt to free NULL AGP handle\n");
+               return retval;
+       }
+
+       if (drm_agp_free_memory(handle)) {
+               spin_lock(&drm_mem_lock);
+               free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
+               alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+                   += pages << PAGE_SHIFT;
+               spin_unlock(&drm_mem_lock);
+               if (free_count > alloc_count) {
+                       DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+                                     "Excess frees: %d frees, %d allocs\n",
+                                     free_count, alloc_count);
+               }
+               return 0;
+       }
+       return retval;
+}
+
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+       int retcode = -EINVAL;
+
+       if (!handle) {
+               DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+                             "Attempt to bind NULL AGP handle\n");
+               return retcode;
+       }
+
+       if (!(retcode = drm_agp_bind_memory(handle, start))) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+                   += handle->page_count << PAGE_SHIFT;
+               spin_unlock(&drm_mem_lock);
+               return retcode;
+       }
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
+       spin_unlock(&drm_mem_lock);
+       return retcode;
+}
+
+int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+       int alloc_count;
+       int free_count;
+       int retcode = -EINVAL;
+
+       if (!handle) {
+               DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+                             "Attempt to unbind NULL AGP handle\n");
+               return retcode;
+       }
+
+       if ((retcode = drm_agp_unbind_memory(handle)))
+               return retcode;
+       spin_lock(&drm_mem_lock);
+       free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
+       alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+       drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
+           += handle->page_count << PAGE_SHIFT;
+       spin_unlock(&drm_mem_lock);
+       if (free_count > alloc_count) {
+               DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+                             "Excess frees: %d frees, %d allocs\n",
+                             free_count, alloc_count);
+       }
+       return retcode;
+}
+
+#endif
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_memory_debug.h b/psb-kernel-source-4.41.1/drm_memory_debug.h
new file mode 100644 (file)
index 0000000..b055ac0
--- /dev/null
@@ -0,0 +1,379 @@
+/**
+ * \file drm_memory_debug.h
+ * Memory management wrappers for DRM.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+typedef struct drm_mem_stats {
+       const char *name;
+       int succeed_count;
+       int free_count;
+       int fail_count;
+       unsigned long bytes_allocated;
+       unsigned long bytes_freed;
+} drm_mem_stats_t;
+
+static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
+static unsigned long drm_ram_available = 0;    /* In pages */
+static unsigned long drm_ram_used = 0;
+static drm_mem_stats_t drm_mem_stats[] =
+{
+       [DRM_MEM_DMA] = {"dmabufs"},
+       [DRM_MEM_SAREA] = {"sareas"},
+       [DRM_MEM_DRIVER] = {"driver"},
+       [DRM_MEM_MAGIC] = {"magic"},
+       [DRM_MEM_IOCTLS] = {"ioctltab"},
+       [DRM_MEM_MAPS] = {"maplist"},
+       [DRM_MEM_VMAS] = {"vmalist"},
+       [DRM_MEM_BUFS] = {"buflist"},
+       [DRM_MEM_SEGS] = {"seglist"},
+       [DRM_MEM_PAGES] = {"pagelist"},
+       [DRM_MEM_FILES] = {"files"},
+       [DRM_MEM_QUEUES] = {"queues"},
+       [DRM_MEM_CMDS] = {"commands"},
+       [DRM_MEM_MAPPINGS] = {"mappings"},
+       [DRM_MEM_BUFLISTS] = {"buflists"},
+       [DRM_MEM_AGPLISTS] = {"agplist"},
+       [DRM_MEM_SGLISTS] = {"sglist"},
+       [DRM_MEM_TOTALAGP] = {"totalagp"},
+       [DRM_MEM_BOUNDAGP] = {"boundagp"},
+       [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
+       [DRM_MEM_CTXLIST] = {"ctxlist"},
+       [DRM_MEM_STUB] = {"stub"},
+       {NULL, 0,}              /* Last entry must be null */
+};
+
+void drm_mem_init (void) {
+       drm_mem_stats_t *mem;
+       struct sysinfo si;
+
+       for (mem = drm_mem_stats; mem->name; ++mem) {
+               mem->succeed_count = 0;
+               mem->free_count = 0;
+               mem->fail_count = 0;
+               mem->bytes_allocated = 0;
+               mem->bytes_freed = 0;
+       }
+
+       si_meminfo(&si);
+       drm_ram_available = si.totalram;
+       drm_ram_used = 0;
+}
+
+/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
+
+static int drm__mem_info (char *buf, char **start, off_t offset,
+                          int request, int *eof, void *data) {
+       drm_mem_stats_t *pt;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *eof = 0;
+       *start = &buf[offset];
+
+       DRM_PROC_PRINT("                  total counts                  "
+                      " |    outstanding  \n");
+       DRM_PROC_PRINT("type       alloc freed fail     bytes      freed"
+                      " | allocs      bytes\n\n");
+       DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
+                      "system", 0, 0, 0,
+                      drm_ram_available << (PAGE_SHIFT - 10));
+       DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
+                      "locked", 0, 0, 0, drm_ram_used >> 10);
+       DRM_PROC_PRINT("\n");
+       for (pt = drm_mem_stats; pt->name; pt++) {
+               DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
+                              pt->name,
+                              pt->succeed_count,
+                              pt->free_count,
+                              pt->fail_count,
+                              pt->bytes_allocated,
+                              pt->bytes_freed,
+                              pt->succeed_count - pt->free_count,
+                              (long)pt->bytes_allocated
+                              - (long)pt->bytes_freed);
+       }
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+int drm_mem_info (char *buf, char **start, off_t offset,
+                  int len, int *eof, void *data) {
+       int ret;
+
+       spin_lock(&drm_mem_lock);
+       ret = drm__mem_info (buf, start, offset, len, eof, data);
+       spin_unlock(&drm_mem_lock);
+       return ret;
+}
+
+void *drm_alloc (size_t size, int area) {
+       void *pt;
+
+       if (!size) {
+               DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
+               return NULL;
+       }
+
+       if (!(pt = kmalloc(size, GFP_KERNEL))) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[area].fail_count;
+               spin_unlock(&drm_mem_lock);
+               return NULL;
+       }
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_allocated += size;
+       spin_unlock(&drm_mem_lock);
+       return pt;
+}
+
+void *drm_calloc (size_t nmemb, size_t size, int area) {
+       void *addr;
+
+       addr = drm_alloc (nmemb * size, area);
+       if (addr != NULL)
+               memset((void *)addr, 0, size * nmemb);
+
+       return addr;
+}
+
+void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
+       void *pt;
+
+       if (!(pt = drm_alloc (size, area)))
+               return NULL;
+       if (oldpt && oldsize) {
+               memcpy(pt, oldpt, oldsize);
+               drm_free (oldpt, oldsize, area);
+       }
+       return pt;
+}
+
+void drm_free (void *pt, size_t size, int area) {
+       int alloc_count;
+       int free_count;
+
+       if (!pt)
+               DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
+       else
+               kfree(pt);
+       spin_lock(&drm_mem_lock);
+       drm_mem_stats[area].bytes_freed += size;
+       free_count = ++drm_mem_stats[area].free_count;
+       alloc_count = drm_mem_stats[area].succeed_count;
+       spin_unlock(&drm_mem_lock);
+       if (free_count > alloc_count) {
+               DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
+                             free_count, alloc_count);
+       }
+}
+
+unsigned long drm_alloc_pages (int order, int area) {
+       unsigned long address;
+       unsigned long bytes = PAGE_SIZE << order;
+       unsigned long addr;
+       unsigned int sz;
+
+       spin_lock(&drm_mem_lock);
+       if ((drm_ram_used >> PAGE_SHIFT)
+           > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
+               spin_unlock(&drm_mem_lock);
+               return 0;
+       }
+       spin_unlock(&drm_mem_lock);
+
+       address = __get_free_pages(GFP_KERNEL, order);
+       if (!address) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[area].fail_count;
+               spin_unlock(&drm_mem_lock);
+               return 0;
+       }
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_allocated += bytes;
+       drm_ram_used += bytes;
+       spin_unlock(&drm_mem_lock);
+
+       /* Zero outside the lock */
+       memset((void *)address, 0, bytes);
+
+       /* Reserve */
+       for (addr = address, sz = bytes;
+            sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+               SetPageReserved(virt_to_page(addr));
+       }
+
+       return address;
+}
+
+void drm_free_pages (unsigned long address, int order, int area) {
+       unsigned long bytes = PAGE_SIZE << order;
+       int alloc_count;
+       int free_count;
+       unsigned long addr;
+       unsigned int sz;
+
+       if (!address) {
+               DRM_MEM_ERROR(area, "Attempt to free address 0\n");
+       } else {
+               /* Unreserve */
+               for (addr = address, sz = bytes;
+                    sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+                       ClearPageReserved(virt_to_page(addr));
+               }
+               free_pages(address, order);
+       }
+
+       spin_lock(&drm_mem_lock);
+       free_count = ++drm_mem_stats[area].free_count;
+       alloc_count = drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_freed += bytes;
+       drm_ram_used -= bytes;
+       spin_unlock(&drm_mem_lock);
+       if (free_count > alloc_count) {
+               DRM_MEM_ERROR(area,
+                             "Excess frees: %d frees, %d allocs\n",
+                             free_count, alloc_count);
+       }
+}
+
+#if __OS_HAS_AGP
+
+DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) {
+       DRM_AGP_MEM *handle;
+
+       if (!pages) {
+               DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
+               return NULL;
+       }
+
+       if ((handle = drm_agp_allocate_memory (pages, type))) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+                   += pages << PAGE_SHIFT;
+               spin_unlock(&drm_mem_lock);
+               return handle;
+       }
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
+       spin_unlock(&drm_mem_lock);
+       return NULL;
+}
+
+int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
+       int alloc_count;
+       int free_count;
+       int retval = -EINVAL;
+
+       if (!handle) {
+               DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+                             "Attempt to free NULL AGP handle\n");
+               return retval;
+       }
+
+       if (drm_agp_free_memory (handle)) {
+               spin_lock(&drm_mem_lock);
+               free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
+               alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+                   += pages << PAGE_SHIFT;
+               spin_unlock(&drm_mem_lock);
+               if (free_count > alloc_count) {
+                       DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+                                     "Excess frees: %d frees, %d allocs\n",
+                                     free_count, alloc_count);
+               }
+               return 0;
+       }
+       return retval;
+}
+
+int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
+       int retcode = -EINVAL;
+
+       if (!handle) {
+               DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+                             "Attempt to bind NULL AGP handle\n");
+               return retcode;
+       }
+
+       if (!(retcode = drm_agp_bind_memory (handle, start))) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+                   += handle->page_count << PAGE_SHIFT;
+               spin_unlock(&drm_mem_lock);
+               return retcode;
+       }
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
+       spin_unlock(&drm_mem_lock);
+       return retcode;
+}
+
+int drm_unbind_agp (DRM_AGP_MEM * handle) {
+       int alloc_count;
+       int free_count;
+       int retcode = -EINVAL;
+
+       if (!handle) {
+               DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+                             "Attempt to unbind NULL AGP handle\n");
+               return retcode;
+       }
+
+       if ((retcode = drm_agp_unbind_memory (handle)))
+               return retcode;
+       spin_lock(&drm_mem_lock);
+       free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
+       alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+       drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
+           += handle->page_count << PAGE_SHIFT;
+       spin_unlock(&drm_mem_lock);
+       if (free_count > alloc_count) {
+               DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+                             "Excess frees: %d frees, %d allocs\n",
+                             free_count, alloc_count);
+       }
+       return retcode;
+}
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_mm.c b/psb-kernel-source-4.41.1/drm_mm.c
new file mode 100644 (file)
index 0000000..0fa8654
--- /dev/null
@@ -0,0 +1,296 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include <linux/slab.h>
+
+unsigned long drm_mm_tail_space(struct drm_mm *mm)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free)
+               return 0;
+
+       return entry->size;
+}
+
+int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free)
+               return -ENOMEM;
+
+       if (entry->size <= size)
+               return -ENOMEM;
+
+       entry->size -= size;
+       return 0;
+}
+
+
+static int drm_mm_create_tail_node(struct drm_mm *mm,
+                           unsigned long start,
+                           unsigned long size)
+{
+       struct drm_mm_node *child;
+
+       child = (struct drm_mm_node *)
+               drm_alloc(sizeof(*child), DRM_MEM_MM);
+       if (!child)
+               return -ENOMEM;
+
+       child->free = 1;
+       child->size = size;
+       child->start = start;
+       child->mm = mm;
+
+       list_add_tail(&child->ml_entry, &mm->ml_entry);
+       list_add_tail(&child->fl_entry, &mm->fl_entry);
+
+       return 0;
+}
+
+
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+{
+       struct list_head *tail_node;
+       struct drm_mm_node *entry;
+
+       tail_node = mm->ml_entry.prev;
+       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       if (!entry->free) {
+               return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
+       }
+       entry->size += size;
+       return 0;
+}
+
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+                                           unsigned long size)
+{
+       struct drm_mm_node *child;
+
+       child = (struct drm_mm_node *)
+               drm_alloc(sizeof(*child), DRM_MEM_MM);
+       if (!child)
+               return NULL;
+
+       INIT_LIST_HEAD(&child->fl_entry);
+
+       child->free = 0;
+       child->size = size;
+       child->start = parent->start;
+       child->mm = parent->mm;
+
+       list_add_tail(&child->ml_entry, &parent->ml_entry);
+       INIT_LIST_HEAD(&child->fl_entry);
+
+       parent->size -= size;
+       parent->start += size;
+       return child;
+}
+
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
+                               unsigned long size, unsigned alignment)
+{
+
+       struct drm_mm_node *align_splitoff = NULL;
+       struct drm_mm_node *child;
+       unsigned tmp = 0;
+
+       if (alignment)
+               tmp = parent->start % alignment;
+
+       if (tmp) {
+               align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+               if (!align_splitoff)
+                       return NULL;
+       }
+
+       if (parent->size == size) {
+               list_del_init(&parent->fl_entry);
+               parent->free = 0;
+               return parent;
+       } else {
+               child = drm_mm_split_at_start(parent, size);
+       }
+
+       if (align_splitoff)
+               drm_mm_put_block(align_splitoff);
+
+       return child;
+}
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void drm_mm_put_block(struct drm_mm_node * cur)
+{
+
+       struct drm_mm *mm = cur->mm;
+       struct list_head *cur_head = &cur->ml_entry;
+       struct list_head *root_head = &mm->ml_entry;
+       struct drm_mm_node *prev_node = NULL;
+       struct drm_mm_node *next_node;
+
+       int merged = 0;
+
+       if (cur_head->prev != root_head) {
+               prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+               if (prev_node->free) {
+                       prev_node->size += cur->size;
+                       merged = 1;
+               }
+       }
+       if (cur_head->next != root_head) {
+               next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+               if (next_node->free) {
+                       if (merged) {
+                               prev_node->size += next_node->size;
+                               list_del(&next_node->ml_entry);
+                               list_del(&next_node->fl_entry);
+                               drm_free(next_node, sizeof(*next_node),
+                                            DRM_MEM_MM);
+                       } else {
+                               next_node->size += cur->size;
+                               next_node->start = cur->start;
+                               merged = 1;
+                       }
+               }
+       }
+       if (!merged) {
+               cur->free = 1;
+               list_add(&cur->fl_entry, &mm->fl_entry);
+       } else {
+               list_del(&cur->ml_entry);
+               drm_free(cur, sizeof(*cur), DRM_MEM_MM);
+       }
+}
+EXPORT_SYMBOL(drm_mm_put_block);
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
+                                 unsigned long size,
+                                 unsigned alignment, int best_match)
+{
+       struct list_head *list;
+       const struct list_head *free_stack = &mm->fl_entry;
+       struct drm_mm_node *entry;
+       struct drm_mm_node *best;
+       unsigned long best_size;
+       unsigned wasted;
+
+       best = NULL;
+       best_size = ~0UL;
+
+       list_for_each(list, free_stack) {
+               entry = list_entry(list, struct drm_mm_node, fl_entry);
+               wasted = 0;
+
+               if (entry->size < size)
+                       continue;
+
+               if (alignment) {
+                       register unsigned tmp = entry->start % alignment;
+                       if (tmp)
+                               wasted += alignment - tmp;
+               }
+
+
+               if (entry->size >= size + wasted) {
+                       if (!best_match)
+                               return entry;
+                       if (size < best_size) {
+                               best = entry;
+                               best_size = entry->size;
+                       }
+               }
+       }
+
+       return best;
+}
+
+int drm_mm_clean(struct drm_mm * mm)
+{
+       struct list_head *head = &mm->ml_entry;
+
+       return (head->next->next == head);
+}
+
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+       INIT_LIST_HEAD(&mm->ml_entry);
+       INIT_LIST_HEAD(&mm->fl_entry);
+
+       return drm_mm_create_tail_node(mm, start, size);
+}
+
+EXPORT_SYMBOL(drm_mm_init);
+
+void drm_mm_takedown(struct drm_mm * mm)
+{
+       struct list_head *bnode = mm->fl_entry.next;
+       struct drm_mm_node *entry;
+
+       entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+
+       if (entry->ml_entry.next != &mm->ml_entry ||
+           entry->fl_entry.next != &mm->fl_entry) {
+               DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+               return;
+       }
+
+       list_del(&entry->fl_entry);
+       list_del(&entry->ml_entry);
+       drm_free(entry, sizeof(*entry), DRM_MEM_MM);
+}
+
+EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/psb-kernel-source-4.41.1/drm_modes.c b/psb-kernel-source-4.41.1/drm_modes.c
new file mode 100644 (file)
index 0000000..fd00841
--- /dev/null
@@ -0,0 +1,560 @@
+/*
+ * Copyright Â© 1997-2003 by The XFree86 Project, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+/*
+ * Copyright Â© 2007 Dave Airlie
+ */
+
+#include <linux/list.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(struct drm_device *dev,
+                                 struct drm_display_mode *mode)
+{
+       DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x\n",
+                 mode->mode_id, mode->name, mode->vrefresh, mode->clock,
+                 mode->hdisplay, mode->hsync_start,
+                 mode->hsync_end, mode->htotal,
+                 mode->vdisplay, mode->vsync_start,
+                 mode->vsync_end, mode->vtotal, mode->type);
+}
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
+                mode->vdisplay);
+}
+EXPORT_SYMBOL(drm_mode_set_name);
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+       struct list_head *entry, *tmp;
+
+       list_for_each_safe(entry, tmp, head) {
+               list_move_tail(entry, new);
+       }
+}
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(struct drm_display_mode *mode)
+{
+       return mode->hdisplay;
+
+}
+EXPORT_SYMBOL(drm_mode_width);
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(struct drm_display_mode *mode)
+{
+       return mode->vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_height);
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate or calculate it if necessary.
+ *
+ * FIXME: why is this needed?  shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate of @mode x 1000. For precision reasons.
+ */
+int drm_mode_vrefresh(struct drm_display_mode *mode)
+{
+       int refresh = 0;
+       unsigned int calc_val;
+
+       if (mode->vrefresh > 0)
+               refresh = mode->vrefresh;
+       else if (mode->htotal > 0 && mode->vtotal > 0) {
+               /* work out vrefresh the value will be x1000 */
+               calc_val = (mode->clock * 1000);
+
+               calc_val /= mode->htotal;
+               calc_val *= 1000;
+               calc_val /= mode->vtotal;
+
+               refresh = calc_val;
+               if (mode->flags & V_INTERLACE)
+                       refresh *= 2;
+               if (mode->flags & V_DBLSCAN)
+                       refresh /= 2;
+               if (mode->vscan > 1)
+                       refresh /= mode->vscan;
+       }
+       return refresh;
+}
+EXPORT_SYMBOL(drm_mode_vrefresh);
+       
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+       if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+               return;
+
+       p->crtc_hdisplay = p->hdisplay;
+       p->crtc_hsync_start = p->hsync_start;
+       p->crtc_hsync_end = p->hsync_end;
+       p->crtc_htotal = p->htotal;
+       p->crtc_hskew = p->hskew;
+       p->crtc_vdisplay = p->vdisplay;
+       p->crtc_vsync_start = p->vsync_start;
+       p->crtc_vsync_end = p->vsync_end;
+       p->crtc_vtotal = p->vtotal;
+
+       if (p->flags & V_INTERLACE) {
+               if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+                       p->crtc_vdisplay /= 2;
+                       p->crtc_vsync_start /= 2;
+                       p->crtc_vsync_end /= 2;
+                       p->crtc_vtotal /= 2;
+               }
+
+               p->crtc_vtotal |= 1;
+       }
+
+       if (p->flags & V_DBLSCAN) {
+               p->crtc_vdisplay *= 2;
+               p->crtc_vsync_start *= 2;
+               p->crtc_vsync_end *= 2;
+               p->crtc_vtotal *= 2;
+       }
+
+       if (p->vscan > 1) {
+               p->crtc_vdisplay *= p->vscan;
+               p->crtc_vsync_start *= p->vscan;
+               p->crtc_vsync_end *= p->vscan;
+               p->crtc_vtotal *= p->vscan;
+       }
+
+       p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+       p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+       p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+       p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+
+       p->crtc_hadjusted = false;
+       p->crtc_vadjusted = false;
+}
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it.  Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+                                           struct drm_display_mode *mode)
+{
+       struct drm_display_mode *nmode;
+       int new_id;
+
+       nmode = drm_mode_create(dev);
+       if (!nmode)
+               return NULL;
+
+       new_id = nmode->mode_id;
+       *nmode = *mode;
+       nmode->mode_id = new_id;
+       INIT_LIST_HEAD(&nmode->head);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_duplicate);
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+{
+       if (mode1->clock == mode2->clock &&
+           mode1->hdisplay == mode2->hdisplay &&
+           mode1->hsync_start == mode2->hsync_start &&
+           mode1->hsync_end == mode2->hsync_end &&
+           mode1->htotal == mode2->htotal &&
+           mode1->hskew == mode2->hskew &&
+           mode1->vdisplay == mode2->vdisplay &&
+           mode1->vsync_start == mode2->vsync_start &&
+           mode1->vsync_end == mode2->vsync_end &&
+           mode1->vtotal == mode2->vtotal &&
+           mode1->vscan == mode2->vscan &&
+           mode1->flags == mode2->flags)
+               return true;
+       
+       return false;
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits.  Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void drm_mode_validate_size(struct drm_device *dev,
+                           struct list_head *mode_list,
+                           int maxX, int maxY, int maxPitch)
+{
+       struct drm_display_mode *mode;
+
+       list_for_each_entry(mode, mode_list, head) {
+               if (maxPitch > 0 && mode->hdisplay > maxPitch)
+                       mode->status = MODE_BAD_WIDTH;
+               
+               if (maxX > 0 && mode->hdisplay > maxX)
+                       mode->status = MODE_VIRTUAL_X;
+
+               if (maxY > 0 && mode->vdisplay > maxY)
+                       mode->status = MODE_VIRTUAL_Y;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_size);
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question.  This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void drm_mode_validate_clocks(struct drm_device *dev,
+                             struct list_head *mode_list,
+                             int *min, int *max, int n_ranges)
+{
+       struct drm_display_mode *mode;
+       int i;
+
+       list_for_each_entry(mode, mode_list, head) {
+               bool good = false;
+               for (i = 0; i < n_ranges; i++) {
+                       if (mode->clock >= min[i] && mode->clock <= max[i]) {
+                               good = true;
+                               break;
+                       }
+               }
+               if (!good)
+                       mode->status = MODE_CLOCK_RANGE;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_clocks);
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list.  If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+                           struct list_head *mode_list, bool verbose)
+{
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, mode_list, head) {
+               if (mode->status != MODE_OK) {
+                       list_del(&mode->head);
+                       if (verbose) {
+                               drm_mode_debug_printmodeline(dev, mode);
+                               DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
+                       }
+                       kfree(mode);
+               }
+       }
+}
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+{
+       struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+       struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+       int diff;
+
+       diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+               ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+       if (diff)
+               return diff;
+       diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+       if (diff)
+               return diff;
+       diff = b->clock - a->clock;
+       return diff;
+}
+
+/* FIXME: what we don't have a list sort function? */
+/* list sort from Mark J Roberts (mjr@znex.org) */
+void list_sort(struct list_head *head, int (*cmp)(struct list_head *a, struct list_head *b))
+{
+       struct list_head *p, *q, *e, *list, *tail, *oldhead;
+       int insize, nmerges, psize, qsize, i;
+       
+       list = head->next;
+       list_del(head);
+       insize = 1;
+       for (;;) {
+               p = oldhead = list;
+               list = tail = NULL;
+               nmerges = 0;
+               
+               while (p) {
+                       nmerges++;
+                       q = p;
+                       psize = 0;
+                       for (i = 0; i < insize; i++) {
+                               psize++;
+                               q = q->next == oldhead ? NULL : q->next;
+                               if (!q)
+                                       break;
+                       }
+                       
+                       qsize = insize;
+                       while (psize > 0 || (qsize > 0 && q)) {
+                               if (!psize) {
+                                       e = q;
+                                       q = q->next;
+                                       qsize--;
+                                       if (q == oldhead)
+                                               q = NULL;
+                               } else if (!qsize || !q) {
+                                       e = p;
+                                       p = p->next;
+                                       psize--;
+                                       if (p == oldhead)
+                                               p = NULL;
+                               } else if (cmp(p, q) <= 0) {
+                                       e = p;
+                                       p = p->next;
+                                       psize--;
+                                       if (p == oldhead)
+                                               p = NULL;
+                               } else {
+                                       e = q;
+                                       q = q->next;
+                                       qsize--;
+                                       if (q == oldhead)
+                                               q = NULL;
+                               }
+                               if (tail)
+                                       tail->next = e;
+                               else
+                                       list = e;
+                               e->prev = tail;
+                               tail = e;
+                       }
+                       p = q;
+               }
+               
+               tail->next = list;
+               list->prev = tail;
+               
+               if (nmerges <= 1)
+                       break;
+               
+               insize *= 2;
+       }
+       
+       head->next = list;
+       head->prev = list->prev;
+       list->prev->next = head;
+       list->prev = head;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+       list_sort(mode_list, drm_mode_compare);
+}
+
+
+/**
+ * drm_mode_output_list_update - update the mode list for the output
+ * @output: the output to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @output probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_output_list_update(struct drm_output *output)
+{
+       struct drm_display_mode *mode;
+       struct drm_display_mode *pmode, *pt;
+       int found_it;
+       list_for_each_entry_safe(pmode, pt, &output->probed_modes,
+                                head) {
+               found_it = 0;
+               /* go through current modes checking for the new probed mode */
+               list_for_each_entry(mode, &output->modes, head) {
+                       if (drm_mode_equal(pmode, mode)) {
+                               found_it = 1;
+                               /* if equal delete the probed mode */
+                               mode->status = pmode->status;
+                               list_del(&pmode->head);
+                               kfree(pmode);
+                               break;
+                       }
+               }
+
+               if (!found_it) {
+                       list_move_tail(&pmode->head, &output->modes);
+               }
+       }
+}
diff --git a/psb-kernel-source-4.41.1/drm_object.c b/psb-kernel-source-4.41.1/drm_object.c
new file mode 100644 (file)
index 0000000..5ade617
--- /dev/null
@@ -0,0 +1,294 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
+                       int shareable)
+{
+       struct drm_device *dev = priv->head->dev;
+       int ret;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
+       /* The refcount will be bumped to 1 when we add the ref object below. */
+       atomic_set(&item->refcount, 0);
+       item->shareable = shareable;
+       item->owner = priv;
+
+       ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
+                                       (unsigned long)item, 32, 0, 0);
+       if (ret)
+               return ret;
+
+       ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
+       if (ret)
+               ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_add_user_object);
+
+struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct drm_hash_item *hash;
+       int ret;
+       struct drm_user_object *item;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
+       ret = drm_ht_find_item(&dev->object_hash, key, &hash);
+       if (ret)
+               return NULL;
+
+       item = drm_hash_entry(hash, struct drm_user_object, hash);
+
+       if (priv != item->owner) {
+               struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
+               ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
+               if (ret) {
+                       DRM_ERROR("Object not registered for usage\n");
+                       return NULL;
+               }
+       }
+       return item;
+}
+EXPORT_SYMBOL(drm_lookup_user_object);
+
+static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
+{
+       struct drm_device *dev = priv->head->dev;
+       int ret;
+
+       if (atomic_dec_and_test(&item->refcount)) {
+               ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
+               BUG_ON(ret);
+               item->remove(priv, item);
+       }
+}
+
+static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
+                                enum drm_ref_type action)
+{
+       int ret = 0;
+
+       switch (action) {
+       case _DRM_REF_USE:
+               atomic_inc(&ro->refcount);
+               break;
+       default:
+               if (!ro->ref_struct_locked) {
+                       break;
+               } else {
+                       ro->ref_struct_locked(priv, ro, action);
+               }
+       }
+       return ret;
+}
+
+int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
+                      enum drm_ref_type ref_action)
+{
+       int ret = 0;
+       struct drm_ref_object *item;
+       struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
+
+       DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
+       if (!referenced_object->shareable && priv != referenced_object->owner) {
+               DRM_ERROR("Not allowed to reference this object\n");
+               return -EINVAL;
+       }
+
+       /*
+        * If this is not a usage reference, Check that usage has been registered
+        * first. Otherwise strange things may happen on destruction.
+        */
+
+       if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
+               item =
+                   drm_lookup_ref_object(priv, referenced_object,
+                                         _DRM_REF_USE);
+               if (!item) {
+                       DRM_ERROR
+                           ("Object not registered for usage by this client\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (NULL !=
+           (item =
+            drm_lookup_ref_object(priv, referenced_object, ref_action))) {
+               atomic_inc(&item->refcount);
+               return drm_object_ref_action(priv, referenced_object,
+                                            ref_action);
+       }
+
+       item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
+       if (item == NULL) {
+               DRM_ERROR("Could not allocate reference object\n");
+               return -ENOMEM;
+       }
+
+       atomic_set(&item->refcount, 1);
+       item->hash.key = (unsigned long)referenced_object;
+       ret = drm_ht_insert_item(ht, &item->hash);
+       item->unref_action = ref_action;
+
+       if (ret)
+               goto out;
+
+       list_add(&item->list, &priv->refd_objects);
+       ret = drm_object_ref_action(priv, referenced_object, ref_action);
+out:
+       return ret;
+}
+
+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
+                                       struct drm_user_object *referenced_object,
+                                       enum drm_ref_type ref_action)
+{
+       struct drm_hash_item *hash;
+       int ret;
+
+       DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
+       ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
+                              (unsigned long)referenced_object, &hash);
+       if (ret)
+               return NULL;
+
+       return drm_hash_entry(hash, struct drm_ref_object, hash);
+}
+EXPORT_SYMBOL(drm_lookup_ref_object);
+
+static void drm_remove_other_references(struct drm_file *priv,
+                                       struct drm_user_object *ro)
+{
+       int i;
+       struct drm_open_hash *ht;
+       struct drm_hash_item *hash;
+       struct drm_ref_object *item;
+
+       for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
+               ht = &priv->refd_object_hash[i];
+               while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
+                       item = drm_hash_entry(hash, struct drm_ref_object, hash);
+                       drm_remove_ref_object(priv, item);
+               }
+       }
+}
+
+void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
+{
+       int ret;
+       struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
+       struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
+       enum drm_ref_type unref_action;
+
+       DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
+       unref_action = item->unref_action;
+       if (atomic_dec_and_test(&item->refcount)) {
+               ret = drm_ht_remove_item(ht, &item->hash);
+               BUG_ON(ret);
+               list_del_init(&item->list);
+               if (unref_action == _DRM_REF_USE)
+                       drm_remove_other_references(priv, user_object);
+               drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
+       }
+
+       switch (unref_action) {
+       case _DRM_REF_USE:
+               drm_deref_user_object(priv, user_object);
+               break;
+       default:
+               BUG_ON(!user_object->unref);
+               user_object->unref(priv, user_object, unref_action);
+               break;
+       }
+
+}
+EXPORT_SYMBOL(drm_remove_ref_object);
+
+int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
+                       enum drm_object_type type, struct drm_user_object **object)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct drm_user_object *uo;
+       struct drm_hash_item *hash;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
+       if (ret) {
+               DRM_ERROR("Could not find user object to reference.\n");
+               goto out_err;
+       }
+       uo = drm_hash_entry(hash, struct drm_user_object, hash);
+       if (uo->type != type) {
+               ret = -EINVAL;
+               goto out_err;
+       }
+       ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
+       if (ret)
+               goto out_err;
+       mutex_unlock(&dev->struct_mutex);
+       *object = uo;
+       return 0;
+out_err:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
+                         enum drm_object_type type)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct drm_user_object *uo;
+       struct drm_ref_object *ro;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       uo = drm_lookup_user_object(priv, user_token);
+       if (!uo || (uo->type != type)) {
+               ret = -EINVAL;
+               goto out_err;
+       }
+       ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
+       if (!ro) {
+               ret = -EINVAL;
+               goto out_err;
+       }
+       drm_remove_ref_object(priv, ro);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+out_err:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
diff --git a/psb-kernel-source-4.41.1/drm_objects.h b/psb-kernel-source-4.41.1/drm_objects.h
new file mode 100644 (file)
index 0000000..99fe476
--- /dev/null
@@ -0,0 +1,721 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_OBJECTS_H
+#define _DRM_OBJECTS_H
+
+struct drm_device;
+struct drm_bo_mem_reg;
+
+/***************************************************
+ * User space objects. (drm_object.c)
+ */
+
+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+enum drm_object_type {
+       drm_fence_type,
+       drm_buffer_type,
+       drm_lock_type,
+           /*
+            * Add other user space object types here.
+            */
+       drm_driver_type0 = 256,
+       drm_driver_type1,
+       drm_driver_type2,
+       drm_driver_type3,
+       drm_driver_type4
+};
+
+/*
+ * A user object is a structure that helps the drm give out user handles
+ * to kernel internal objects and to keep track of these objects so that
+ * they can be destroyed, for example when the user space process exits.
+ * Designed to be accessible using a user space 32-bit handle.
+ */
+
+struct drm_user_object {
+       struct drm_hash_item hash;
+       struct list_head list;
+       enum drm_object_type type;
+       atomic_t refcount;
+       int shareable;
+       struct drm_file *owner;
+       void (*ref_struct_locked) (struct drm_file *priv,
+                                  struct drm_user_object *obj,
+                                  enum drm_ref_type ref_action);
+       void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
+                      enum drm_ref_type unref_action);
+       void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
+};
+
+/*
+ * A ref object is a structure which is used to
+ * keep track of references to user objects and to keep track of these
+ * references so that they can be destroyed for example when the user space
+ * process exits. Designed to be accessible using a pointer to the _user_ object.
+ */
+
+struct drm_ref_object {
+       struct drm_hash_item hash;
+       struct list_head list;
+       atomic_t refcount;
+       enum drm_ref_type unref_action;
+};
+
+/**
+ * Must be called with the struct_mutex held.
+ */
+
+extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
+                              int shareable);
+/**
+ * Must be called with the struct_mutex held.
+ */
+
+extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
+                                                uint32_t key);
+
+/*
+ * Must be called with the struct_mutex held. May temporarily release it.
+ */
+
+extern int drm_add_ref_object(struct drm_file *priv,
+                             struct drm_user_object *referenced_object,
+                             enum drm_ref_type ref_action);
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+
+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
+                                       struct drm_user_object *referenced_object,
+                                       enum drm_ref_type ref_action);
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
+extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
+                              enum drm_object_type type,
+                              struct drm_user_object **object);
+extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
+                                enum drm_object_type type);
+
+/***************************************************
+ * Fence objects. (drm_fence.c)
+ */
+
+struct drm_fence_object {
+       struct drm_user_object base;
+       struct drm_device *dev;
+       atomic_t usage;
+
+       /*
+        * The below three fields are protected by the fence manager spinlock.
+        */
+
+       struct list_head ring;
+       int fence_class;
+       uint32_t native_types;
+       uint32_t type;
+       uint32_t signaled_types;
+       uint32_t sequence;
+       uint32_t waiting_types;
+       uint32_t error;
+};
+
+#define _DRM_FENCE_CLASSES 8
+
+struct drm_fence_class_manager {
+       struct list_head ring;
+       uint32_t pending_flush;
+       uint32_t waiting_types;
+       wait_queue_head_t fence_queue;
+       uint32_t highest_waiting_sequence;
+        uint32_t latest_queued_sequence;
+};
+
+struct drm_fence_manager {
+       int initialized;
+       rwlock_t lock;
+       struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
+       uint32_t num_classes;
+       atomic_t count;
+};
+
+struct drm_fence_driver {
+       unsigned long *waiting_jiffies;
+       uint32_t num_classes;
+       uint32_t wrap_diff;
+       uint32_t flush_diff;
+       uint32_t sequence_mask;
+
+       /*
+        * Driver implemented functions:
+        * has_irq() : 1 if the hardware can update the indicated type_flags using an
+        * irq handler. 0 if polling is required.
+        *
+        * emit() : Emit a sequence number to the command stream.
+        * Return the sequence number.
+        *
+        * flush() : Make sure the flags indicated in fc->pending_flush will eventually
+        * signal for fc->highest_received_sequence and all preceding sequences.
+        * Acknowledge by clearing the flags fc->pending_flush.
+        *
+        * poll() : Call drm_fence_handler with any new information.
+        *
+        * needed_flush() : Given the current state of the fence->type flags and previusly 
+        * executed or queued flushes, return the type_flags that need flushing.
+        *
+        * wait(): Wait for the "mask" flags to signal on a given fence, performing
+        * whatever's necessary to make this happen.
+        */
+
+       int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
+                       uint32_t flags);
+       int (*emit) (struct drm_device *dev, uint32_t fence_class,
+                    uint32_t flags, uint32_t *breadcrumb,
+                    uint32_t *native_type);
+       void (*flush) (struct drm_device *dev, uint32_t fence_class);
+       void (*poll) (struct drm_device *dev, uint32_t fence_class,
+               uint32_t types);
+       uint32_t (*needed_flush) (struct drm_fence_object *fence);
+       int (*wait) (struct drm_fence_object *fence, int lazy,
+                    int interruptible, uint32_t mask);
+};
+
+extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
+                                 int interruptible, uint32_t mask,
+                                 unsigned long end_jiffies);
+extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
+                             uint32_t sequence, uint32_t type,
+                             uint32_t error);
+extern void drm_fence_manager_init(struct drm_device *dev);
+extern void drm_fence_manager_takedown(struct drm_device *dev);
+extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
+                               uint32_t sequence);
+extern int drm_fence_object_flush(struct drm_fence_object *fence,
+                                 uint32_t type);
+extern int drm_fence_object_signaled(struct drm_fence_object *fence,
+                                    uint32_t type);
+extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
+extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
+extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
+extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
+                                        struct drm_fence_object *src);
+extern int drm_fence_object_wait(struct drm_fence_object *fence,
+                                int lazy, int ignore_signals, uint32_t mask);
+extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
+                                  uint32_t fence_flags, uint32_t fence_class,
+                                  struct drm_fence_object **c_fence);
+extern int drm_fence_object_emit(struct drm_fence_object *fence,
+                                uint32_t fence_flags, uint32_t class,
+                                uint32_t type);
+extern void drm_fence_fill_arg(struct drm_fence_object *fence,
+                              struct drm_fence_arg *arg);
+
+extern int drm_fence_add_user_object(struct drm_file *priv,
+                                    struct drm_fence_object *fence,
+                                    int shareable);
+
+extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv);
+extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv);
+extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
+                                      struct drm_file *file_priv);
+extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
+                                struct drm_file *file_priv);
+extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv);
+/**************************************************
+ *TTMs
+ */
+
+/*
+ * The ttm backend GTT interface. (In our case AGP).
+ * Any similar type of device (PCIE?)
+ * needs only to implement these functions to be usable with the TTM interface.
+ * The AGP backend implementation lives in drm_agpsupport.c
+ * basically maps these calls to available functions in agpgart.
+ * Each drm device driver gets an
+ * additional function pointer that creates these types,
+ * so that the device can choose the correct aperture.
+ * (Multiple AGP apertures, etc.)
+ * Most device drivers will let this point to the standard AGP implementation.
+ */
+
+#define DRM_BE_FLAG_NEEDS_FREE     0x00000001
+#define DRM_BE_FLAG_BOUND_CACHED   0x00000002
+
+struct drm_ttm_backend;
+struct drm_ttm_backend_func {
+       int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
+       int (*populate) (struct drm_ttm_backend *backend,
+                        unsigned long num_pages, struct page **pages);
+       void (*clear) (struct drm_ttm_backend *backend);
+       int (*bind) (struct drm_ttm_backend *backend,
+                    struct drm_bo_mem_reg *bo_mem);
+       int (*unbind) (struct drm_ttm_backend *backend);
+       void (*destroy) (struct drm_ttm_backend *backend);
+};
+
+
+struct drm_ttm_backend {
+       struct drm_device *dev;
+       uint32_t flags;
+       struct drm_ttm_backend_func *func;
+};
+
+struct drm_ttm {
+       struct page *dummy_read_page;
+       struct page **pages;
+       uint32_t page_flags;
+       unsigned long num_pages;
+       atomic_t vma_count;
+       struct drm_device *dev;
+       int destroy;
+       uint32_t mapping_offset;
+       struct drm_ttm_backend *be;
+       enum {
+               ttm_bound,
+               ttm_evicted,
+               ttm_unbound,
+               ttm_unpopulated,
+       } state;
+
+};
+
+extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
+extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
+extern void drm_ttm_unbind(struct drm_ttm *ttm);
+extern void drm_ttm_evict(struct drm_ttm *ttm);
+extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
+extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
+extern void drm_ttm_cache_flush(void);
+extern int drm_ttm_populate(struct drm_ttm *ttm);
+extern int drm_ttm_set_user(struct drm_ttm *ttm,
+                           struct task_struct *tsk,
+                           int write,
+                           unsigned long start,
+                           unsigned long num_pages,
+                           struct page *dummy_read_page);
+unsigned long drm_ttm_size(struct drm_device *dev,
+                          unsigned long num_pages,
+                          int user_bo);
+
+
+/*
+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
+ * this which calls this function iff there are no vmas referencing it anymore.
+ * Otherwise it is called when the last vma exits.
+ */
+
+extern int drm_destroy_ttm(struct drm_ttm *ttm);
+
+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
+}
+
+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
+
+/*
+ * Page flags.
+ */
+
+#define DRM_TTM_PAGE_UNCACHED   (1 << 0)
+#define DRM_TTM_PAGE_USED       (1 << 1)
+#define DRM_TTM_PAGE_BOUND      (1 << 2)
+#define DRM_TTM_PAGE_PRESENT    (1 << 3)
+#define DRM_TTM_PAGE_VMALLOC    (1 << 4)
+#define DRM_TTM_PAGE_USER       (1 << 5)
+#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
+#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
+#define DRM_TTM_PAGE_USER_DMA   (1 << 8)
+
+/***************************************************
+ * Buffer objects. (drm_bo.c, drm_bo_move.c)
+ */
+
+struct drm_bo_mem_reg {
+       struct drm_mm_node *mm_node;
+       unsigned long size;
+       unsigned long num_pages;
+       uint32_t page_alignment;
+       uint32_t mem_type;
+       uint64_t flags;
+       uint64_t mask;
+       uint32_t desired_tile_stride;
+       uint32_t hw_tile_stride;
+};
+
+enum drm_bo_type {
+       drm_bo_type_dc,
+       drm_bo_type_user,
+       drm_bo_type_kernel, /* for initial kernel allocations */
+};
+
+struct drm_buffer_object {
+       struct drm_device *dev;
+       struct drm_user_object base;
+
+       /*
+        * If there is a possibility that the usage variable is zero,
+        * then dev->struct_mutext should be locked before incrementing it.
+        */
+
+       atomic_t usage;
+       unsigned long buffer_start;
+       enum drm_bo_type type;
+       unsigned long offset;
+       atomic_t mapped;
+       struct drm_bo_mem_reg mem;
+
+       struct list_head lru;
+       struct list_head ddestroy;
+
+       uint32_t fence_type;
+       uint32_t fence_class;
+       uint32_t new_fence_type;
+       uint32_t new_fence_class;
+       struct drm_fence_object *fence;
+       uint32_t priv_flags;
+       wait_queue_head_t event_queue;
+       struct mutex mutex;
+       unsigned long num_pages;
+       unsigned long reserved_size;
+
+       /* For pinned buffers */
+       struct drm_mm_node *pinned_node;
+       uint32_t pinned_mem_type;
+       struct list_head pinned_lru;
+
+       /* For vm */
+       struct drm_ttm *ttm;
+       struct drm_map_list map_list;
+       uint32_t memory_type;
+       unsigned long bus_offset;
+       uint32_t vm_flags;
+       void *iomap;
+
+#ifdef DRM_ODD_MM_COMPAT
+       /* dev->struct_mutex only protected. */
+       struct list_head vma_list;
+       struct list_head p_mm_list;
+#endif
+
+};
+
+#define _DRM_BO_FLAG_UNFENCED 0x00000001
+#define _DRM_BO_FLAG_EVICTED  0x00000002
+
+struct drm_mem_type_manager {
+       int has_type;
+       int use_type;
+       struct drm_mm manager;
+       struct list_head lru;
+       struct list_head pinned;
+       uint32_t flags;
+       uint32_t drm_bus_maptype;
+       unsigned long gpu_offset;
+       unsigned long io_offset;
+       unsigned long io_size;
+       void *io_addr;
+};
+
+struct drm_bo_lock {
+       struct drm_user_object base;
+       wait_queue_head_t queue;
+       atomic_t write_lock_pending;
+       atomic_t readers;
+};
+
+#define _DRM_FLAG_MEMTYPE_FIXED     0x00000001 /* Fixed (on-card) PCI memory */
+#define _DRM_FLAG_MEMTYPE_MAPPABLE  0x00000002 /* Memory mappable */
+#define _DRM_FLAG_MEMTYPE_CACHED    0x00000004 /* Cached binding */
+#define _DRM_FLAG_NEEDS_IOREMAP     0x00000008 /* Fixed memory needs ioremap
+                                                  before kernel access. */
+#define _DRM_FLAG_MEMTYPE_CMA       0x00000010 /* Can't map aperture */
+#define _DRM_FLAG_MEMTYPE_CSELECT   0x00000020 /* Select caching */
+
+struct drm_buffer_manager {
+       struct drm_bo_lock bm_lock;
+       struct mutex evict_mutex;
+       int nice_mode;
+       int initialized;
+       struct drm_file *last_to_validate;
+       struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
+       struct list_head unfenced;
+       struct list_head ddestroy;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+       struct work_struct wq;
+#else
+       struct delayed_work wq;
+#endif
+       uint32_t fence_type;
+       unsigned long cur_pages;
+       atomic_t count;
+       struct page *dummy_read_page;
+};
+
+struct drm_bo_driver {
+       const uint32_t *mem_type_prio;
+       const uint32_t *mem_busy_prio;
+       uint32_t num_mem_type_prio;
+       uint32_t num_mem_busy_prio;
+       struct drm_ttm_backend *(*create_ttm_backend_entry)
+        (struct drm_device *dev);
+       int (*backend_size) (struct drm_device *dev,
+                            unsigned long num_pages);
+       int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
+                          uint32_t *type);
+       int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
+       int (*init_mem_type) (struct drm_device *dev, uint32_t type,
+                             struct drm_mem_type_manager *man);
+        uint32_t(*evict_mask) (struct drm_buffer_object *bo);
+       int (*move) (struct drm_buffer_object *bo,
+                    int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
+       void (*ttm_cache_flush)(struct drm_ttm *ttm);
+
+       /*
+        * command_stream_barrier
+        *
+        * @dev: The drm device.
+        *
+        * @bo: The buffer object to validate.
+        *
+        * @new_fence_class: The new fence class for the buffer object.
+        *
+        * @new_fence_type: The new fence type for the buffer object.
+        *
+        * @no_wait: whether this should give up and return -EBUSY
+        * if this operation would require sleeping
+        *
+        * Insert a command stream barrier that makes sure that the
+        * buffer is idle once the commands associated with the
+        * current validation are starting to execute. If an error
+        * condition is returned, or the function pointer is NULL,
+        * the drm core will force buffer idle
+        * during validation.
+        */
+
+       int (*command_stream_barrier) (struct drm_buffer_object *bo,
+                                      uint32_t new_fence_class,
+                                      uint32_t new_fence_type,
+                                      int no_wait);                                   
+};
+
+/*
+ * buffer objects (drm_bo.c)
+ */
+extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
+extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_driver_finish(struct drm_device *dev);
+extern int drm_bo_driver_init(struct drm_device *dev);
+extern int drm_bo_pci_offset(struct drm_device *dev,
+                            struct drm_bo_mem_reg *mem,
+                            unsigned long *bus_base,
+                            unsigned long *bus_offset,
+                            unsigned long *bus_size);
+extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
+
+extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
+extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
+extern void drm_putback_buffer_objects(struct drm_device *dev);
+extern int drm_fence_buffer_objects(struct drm_device *dev,
+                                   struct list_head *list,
+                                   uint32_t fence_flags,
+                                   struct drm_fence_object *fence,
+                                   struct drm_fence_object **used_fence);
+extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
+extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
+                                   enum drm_bo_type type, uint64_t mask,
+                                   uint32_t hint, uint32_t page_alignment,
+                                   unsigned long buffer_start,
+                                   struct drm_buffer_object **bo);
+extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
+                      int no_wait);
+extern int drm_bo_mem_space(struct drm_buffer_object *bo,
+                           struct drm_bo_mem_reg *mem, int no_wait);
+extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
+                             uint64_t new_mem_flags,
+                             int no_wait, int move_unfenced);
+extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
+extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
+                         unsigned long p_offset, unsigned long p_size);
+extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
+                                 uint32_t fence_class, uint64_t flags,
+                                 uint64_t mask, uint32_t hint,
+                                 int use_old_fence_class,
+                                 struct drm_bo_info_rep *rep,
+                                 struct drm_buffer_object **bo_rep);
+extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
+                                                         uint32_t handle,
+                                                         int check_owner);
+extern int drm_bo_do_validate(struct drm_buffer_object *bo,
+                             uint64_t flags, uint64_t mask, uint32_t hint,
+                             uint32_t fence_class,
+                             int no_wait,
+                             struct drm_bo_info_rep *rep);
+extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
+                               struct drm_bo_info_rep *rep);
+/*
+ * Buffer object memory move- and map helpers.
+ * drm_bo_move.c
+ */
+
+extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
+                          int evict, int no_wait,
+                          struct drm_bo_mem_reg *new_mem);
+extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
+                             int evict,
+                             int no_wait, struct drm_bo_mem_reg *new_mem);
+extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
+                                    int evict, int no_wait,
+                                    uint32_t fence_class, uint32_t fence_type,
+                                    uint32_t fence_flags,
+                                    struct drm_bo_mem_reg *new_mem);
+extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
+extern unsigned long drm_bo_offset_end(unsigned long offset,
+                                      unsigned long end);
+
+struct drm_bo_kmap_obj {
+       void *virtual;
+       struct page *page;
+       enum {
+               bo_map_iomap,
+               bo_map_vmap,
+               bo_map_kmap,
+               bo_map_premapped,
+       } bo_kmap_type;
+};
+
+static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
+{
+       *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
+                    map->bo_kmap_type == bo_map_premapped);
+       return map->virtual;
+}
+extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
+extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
+                      unsigned long num_pages, struct drm_bo_kmap_obj *map);
+
+
+/*
+ * drm_regman.c
+ */
+
+struct drm_reg {
+       struct list_head head;
+       struct drm_fence_object *fence;
+       uint32_t fence_type;
+       uint32_t new_fence_type;
+};
+
+struct drm_reg_manager {
+       struct list_head free;
+       struct list_head lru;
+       struct list_head unfenced;
+
+       int (*reg_reusable)(const struct drm_reg *reg, const void *data);
+       void (*reg_destroy)(struct drm_reg *reg);
+};
+
+extern int drm_regs_alloc(struct drm_reg_manager *manager,
+                         const void *data,
+                         uint32_t fence_class,
+                         uint32_t fence_type,
+                         int interruptible,
+                         int no_wait,
+                         struct drm_reg **reg);
+
+extern void drm_regs_fence(struct drm_reg_manager *regs,
+                          struct drm_fence_object *fence);
+
+extern void drm_regs_free(struct drm_reg_manager *manager);
+extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
+extern void drm_regs_init(struct drm_reg_manager *manager,
+                         int (*reg_reusable)(const struct drm_reg *,
+                                             const void *),
+                         void (*reg_destroy)(struct drm_reg *));
+
+extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
+                              void **virtual);
+extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
+                               void *virtual);
+/*
+ * drm_bo_lock.c
+ * Simple replacement for the hardware lock on buffer manager init and clean.
+ */
+
+
+extern void drm_bo_init_lock(struct drm_bo_lock *lock);
+extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
+extern int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible);
+extern int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
+                            struct drm_file *file_priv);
+
+extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
+                              struct drm_file *file_priv);
+
+#ifdef CONFIG_DEBUG_MUTEXES
+#define DRM_ASSERT_LOCKED(_mutex)                                      \
+       BUG_ON(!mutex_is_locked(_mutex) ||                              \
+              ((_mutex)->owner != current_thread_info()))
+#else
+#define DRM_ASSERT_LOCKED(_mutex)
+#endif
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_os_linux.h b/psb-kernel-source-4.41.1/drm_os_linux.h
new file mode 100644 (file)
index 0000000..8921944
--- /dev/null
@@ -0,0 +1,145 @@
+/**
+ * \file drm_os_linux.h
+ * OS abstraction macros.
+ */
+
+#include <linux/interrupt.h>   /* For task queue support */
+#include <linux/delay.h>
+
+/** Current process ID */
+#define DRM_CURRENTPID                 current->pid
+#define DRM_SUSER(p)                   capable(CAP_SYS_ADMIN)
+#define DRM_UDELAY(d)                  udelay(d)
+#if LINUX_VERSION_CODE <= 0x020608     /* KERNEL_VERSION(2,6,8) */
+#ifndef __iomem
+#define __iomem
+#endif
+/** Read a byte from a MMIO region */
+#define DRM_READ8(map, offset)         readb(((void __iomem *)(map)->handle) + (offset))
+/** Read a word from a MMIO region */
+#define DRM_READ16(map, offset)                readw(((void __iomem *)(map)->handle) + (offset))
+/** Read a dword from a MMIO region */
+#define DRM_READ32(map, offset)                readl(((void __iomem *)(map)->handle) + (offset))
+/** Write a byte into a MMIO region */
+#define DRM_WRITE8(map, offset, val)   writeb(val, ((void __iomem *)(map)->handle) + (offset))
+/** Write a word into a MMIO region */
+#define DRM_WRITE16(map, offset, val)  writew(val, ((void __iomem *)(map)->handle) + (offset))
+/** Write a dword into a MMIO region */
+#define DRM_WRITE32(map, offset, val)  writel(val, ((void __iomem *)(map)->handle) + (offset))
+#else
+/** Read a byte from a MMIO region */
+#define DRM_READ8(map, offset)         readb((map)->handle + (offset))
+/** Read a word from a MMIO region */
+#define DRM_READ16(map, offset)                readw((map)->handle + (offset))
+/** Read a dword from a MMIO region */
+#define DRM_READ32(map, offset)                readl((map)->handle + (offset))
+/** Write a byte into a MMIO region */
+#define DRM_WRITE8(map, offset, val)   writeb(val, (map)->handle + (offset))
+/** Write a word into a MMIO region */
+#define DRM_WRITE16(map, offset, val)  writew(val, (map)->handle + (offset))
+/** Write a dword into a MMIO region */
+#define DRM_WRITE32(map, offset, val)  writel(val, (map)->handle + (offset))
+#endif
+/** Read memory barrier */
+#define DRM_READMEMORYBARRIER()                rmb()
+/** Write memory barrier */
+#define DRM_WRITEMEMORYBARRIER()       wmb()
+/** Read/write memory barrier */
+#define DRM_MEMORYBARRIER()            mb()
+
+/** IRQ handler arguments and return type and values */
+#define DRM_IRQ_ARGS           int irq, void *arg
+/** backwards compatibility with old irq return values */
+#ifndef IRQ_HANDLED
+typedef void irqreturn_t;
+#define IRQ_HANDLED            /* nothing */
+#define IRQ_NONE               /* nothing */
+#endif
+
+/** AGP types */
+#if __OS_HAS_AGP
+#define DRM_AGP_MEM            struct agp_memory
+#define DRM_AGP_KERN           struct agp_kern_info
+#else
+/* define some dummy types for non AGP supporting kernels */
+struct no_agp_kern {
+       unsigned long aper_base;
+       unsigned long aper_size;
+};
+#define DRM_AGP_MEM            int
+#define DRM_AGP_KERN           struct no_agp_kern
+#endif
+
+#if !(__OS_HAS_MTRR)
+static __inline__ int mtrr_add(unsigned long base, unsigned long size,
+                              unsigned int type, char increment)
+{
+       return -ENODEV;
+}
+
+static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
+{
+       return -ENODEV;
+}
+
+#define MTRR_TYPE_WRCOMB     1
+#endif
+
+/** Other copying of data to kernel space */
+#define DRM_COPY_FROM_USER(arg1, arg2, arg3)           \
+       copy_from_user(arg1, arg2, arg3)
+/** Other copying of data from kernel space */
+#define DRM_COPY_TO_USER(arg1, arg2, arg3)             \
+       copy_to_user(arg1, arg2, arg3)
+/* Macros for copyfrom user, but checking readability only once */
+#define DRM_VERIFYAREA_READ( uaddr, size )             \
+       (access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT)
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+       __copy_from_user(arg1, arg2, arg3)
+#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)   \
+       __copy_to_user(arg1, arg2, arg3)
+#define DRM_GET_USER_UNCHECKED(val, uaddr)             \
+       __get_user(val, uaddr)
+
+#define DRM_HZ HZ
+
+#define DRM_WAIT_ON( ret, queue, timeout, condition )          \
+do {                                                           \
+       DECLARE_WAITQUEUE(entry, current);                      \
+       unsigned long end = jiffies + (timeout);                \
+       add_wait_queue(&(queue), &entry);                       \
+                                                               \
+       for (;;) {                                              \
+               __set_current_state(TASK_INTERRUPTIBLE);        \
+               if (condition)                                  \
+                       break;                                  \
+               if (time_after_eq(jiffies, end)) {              \
+                       ret = -EBUSY;                           \
+                       break;                                  \
+               }                                               \
+               schedule_timeout((HZ/100 > 1) ? HZ/100 : 1);    \
+               if (signal_pending(current)) {                  \
+                       ret = -EINTR;                           \
+                       break;                                  \
+               }                                               \
+       }                                                       \
+       __set_current_state(TASK_RUNNING);                      \
+       remove_wait_queue(&(queue), &entry);                    \
+} while (0)
+
+#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
+#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
+
+/** Type for the OS's non-sleepable mutex lock */
+#define DRM_SPINTYPE           spinlock_t
+/**
+ * Initialize the lock for use.  name is an optional string describing the
+ * lock
+ */
+#define DRM_SPININIT(l,name)   spin_lock_init(l)
+#define DRM_SPINUNINIT(l)
+#define DRM_SPINLOCK(l)                spin_lock(l)
+#define DRM_SPINUNLOCK(l)      spin_unlock(l)
+#define DRM_SPINLOCK_IRQSAVE(l, _flags)        spin_lock_irqsave(l, _flags);
+#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
+#define DRM_SPINLOCK_ASSERT(l)         do {} while (0)
diff --git a/psb-kernel-source-4.41.1/drm_pci.c b/psb-kernel-source-4.41.1/drm_pci.c
new file mode 100644 (file)
index 0000000..7569286
--- /dev/null
@@ -0,0 +1,177 @@
+/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
+/**
+ * \file drm_pci.c
+ * \brief Functions and ioctls to manage PCI memory
+ *
+ * \warning These interfaces aren't stable yet.
+ *
+ * \todo Implement the remaining ioctl's for the PCI pools.
+ * \todo The wrappers here are so thin that they would be better off inlined..
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Leif Delgass <ldelgass@retinalburn.net>
+ */
+
+/*
+ * Copyright 2003 Jos�Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "drmP.h"
+
+/**********************************************************************/
+/** \name PCI memory */
+/*@{*/
+
+/**
+ * \brief Allocate a PCI consistent memory block, for DMA.
+ */
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
+                               dma_addr_t maxaddr)
+{
+       drm_dma_handle_t *dmah;
+       unsigned long addr;
+       size_t sz;
+#ifdef DRM_DEBUG_MEMORY
+       int area = DRM_MEM_DMA;
+
+       spin_lock(&drm_mem_lock);
+       if ((drm_ram_used >> PAGE_SHIFT)
+           > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
+               spin_unlock(&drm_mem_lock);
+               return 0;
+       }
+       spin_unlock(&drm_mem_lock);
+#endif
+
+       /* pci_alloc_consistent only guarantees alignment to the smallest
+        * PAGE_SIZE order which is greater than or equal to the requested size.
+        * Return NULL here for now to make sure nobody tries for larger alignment
+        */
+       if (align > size)
+               return NULL;
+
+       if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
+               DRM_ERROR("Setting pci dma mask failed\n");
+               return NULL;
+       }
+
+       dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+       if (!dmah)
+               return NULL;
+
+       dmah->size = size;
+       dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+
+#ifdef DRM_DEBUG_MEMORY
+       if (dmah->vaddr == NULL) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[area].fail_count;
+               spin_unlock(&drm_mem_lock);
+               kfree(dmah);
+               return NULL;
+       }
+
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_allocated += size;
+       drm_ram_used += size;
+       spin_unlock(&drm_mem_lock);
+#else
+       if (dmah->vaddr == NULL) {
+               kfree(dmah);
+               return NULL;
+       }
+#endif
+
+       memset(dmah->vaddr, 0, size);
+
+       /* XXX - Is virt_to_page() legal for consistent mem? */
+       /* Reserve */
+       for (addr = (unsigned long)dmah->vaddr, sz = size;
+            sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+               SetPageReserved(virt_to_page(addr));
+       }
+
+       return dmah;
+}
+EXPORT_SYMBOL(drm_pci_alloc);
+
+/**
+ * \brief Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
+ */
+void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
+{
+       unsigned long addr;
+       size_t sz;
+#ifdef DRM_DEBUG_MEMORY
+       int area = DRM_MEM_DMA;
+       int alloc_count;
+       int free_count;
+#endif
+
+       if (!dmah->vaddr) {
+#ifdef DRM_DEBUG_MEMORY
+               DRM_MEM_ERROR(area, "Attempt to free address 0\n");
+#endif
+       } else {
+               /* XXX - Is virt_to_page() legal for consistent mem? */
+               /* Unreserve */
+               for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+                    sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+                       ClearPageReserved(virt_to_page(addr));
+               }
+               dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+                                 dmah->busaddr);
+       }
+
+#ifdef DRM_DEBUG_MEMORY
+       spin_lock(&drm_mem_lock);
+       free_count = ++drm_mem_stats[area].free_count;
+       alloc_count = drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_freed += size;
+       drm_ram_used -= size;
+       spin_unlock(&drm_mem_lock);
+       if (free_count > alloc_count) {
+               DRM_MEM_ERROR(area,
+                             "Excess frees: %d frees, %d allocs\n",
+                             free_count, alloc_count);
+       }
+#endif
+
+}
+
+/**
+ * \brief Free a PCI consistent memory block
+ */
+void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
+{
+       __drm_pci_free(dev, dmah);
+       kfree(dmah);
+}
+EXPORT_SYMBOL(drm_pci_free);
+
+/*@}*/
diff --git a/psb-kernel-source-4.41.1/drm_pciids.h b/psb-kernel-source-4.41.1/drm_pciids.h
new file mode 100644 (file)
index 0000000..c8c8318
--- /dev/null
@@ -0,0 +1,511 @@
+/*
+   This file is auto-generated from the drm_pciids.txt in the DRM CVS
+   Please contact dri-devel@lists.sf.net to add new cards to this list
+*/
+#define radeon_PCI_IDS \
+       {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
+       {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
+       {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+       {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+       {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+       {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+       {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+       {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+       {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+       {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+       {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+       {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+       {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+       {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
+       {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+       {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+       {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+       {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+       {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+       {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+       {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+       {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+       {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+       {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+       {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+       {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+       {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+       {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+       {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+       {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+       {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
+       {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
+       {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+       {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+       {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+       {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
+       {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+       {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+       {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+       {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+       {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+       {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+       {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+       {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+       {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+       {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+       {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+       {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+       {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+       {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+       {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0, 0, 0}
+
+#define r128_PCI_IDS \
+       {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define mga_PCI_IDS \
+       {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+       {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+       {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
+       {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
+       {0, 0, 0}
+
+#define mach64_PCI_IDS \
+       {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define sis_PCI_IDS \
+       {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+       {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+       {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+       {0, 0, 0}
+
+#define tdfx_PCI_IDS \
+       {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define viadrv_PCI_IDS \
+       {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
+       {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
+       {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
+       {0, 0, 0}
+
+#define i810_PCI_IDS \
+       {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define i830_PCI_IDS \
+       {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define gamma_PCI_IDS \
+       {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define savage_PCI_IDS \
+       {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+       {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+       {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
+       {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
+       {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+       {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+       {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+       {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+       {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+       {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
+       {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
+       {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+       {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+       {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+       {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+       {0, 0, 0}
+
+#define ffb_PCI_IDS \
+       {0, 0, 0}
+
+#define i915_PCI_IDS \
+       {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
+       {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
+       {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
+       {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
+       {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
+       {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
+       {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
+       {0x8086, 0x27A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
+       {0x8086, 0x27AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
+       {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
+       {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
+       {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
+       {0x8086, 0x29A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
+       {0x8086, 0x2A02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
+       {0x8086, 0x2A12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
+       {0x8086, 0x29C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
+       {0x8086, 0x29B2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
+       {0x8086, 0x29D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
+       {0, 0, 0}
+
+#define psb_PCI_IDS \
+       {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
+       {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
+       {0, 0, 0}
+
+#define imagine_PCI_IDS \
+       {0x105d, 0x2309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128}, \
+       {0x105d, 0x2339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128_2}, \
+       {0x105d, 0x493d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_T2R}, \
+       {0x105d, 0x5348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_REV4}, \
+       {0, 0, 0}
+
+#define nv_PCI_IDS \
+       {0x10DE, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
+       {0x10DE, 0x0028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
+       {0x10DE, 0x002A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
+       {0x10DE, 0x002C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
+       {0x10DE, 0x0029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
+       {0x10DE, 0x002D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
+       {0x10DE, 0x00A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
+       {0x10DE, 0x0100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0113, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0171, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0172, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0173, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0174, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0175, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0176, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0177, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0178, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0179, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x017A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x017C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x017D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0185, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0189, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x018A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x018B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x018C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x018D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x01A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x01F0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
+       {0x10DE, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0203, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0251, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0252, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0258, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0259, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x025B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0282, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x028C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
+       {0x10DE, 0x0301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0308, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0314, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x031A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x031B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x031C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x031D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x031E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x031F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0323, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0327, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0329, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x032A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x032B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x032C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x032D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x032F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0331, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0332, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0333, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x033F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0334, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0338, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0342, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0345, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0349, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x034B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x034C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x034E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x034F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
+       {0x10DE, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x004E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x00C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x00C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x00C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x00C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x00C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x00CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x00CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x00CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10de, 0x00f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10de, 0x00f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x014B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x014C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x014D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x014E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x014F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0161, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0163, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0164, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0165, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0166, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0167, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x016B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x016C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x016D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x016E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0212, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0215, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0222, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0228, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0090, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0091, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0092, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0093, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0094, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0098, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x0099, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x009C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x009D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0x10DE, 0x009E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
+       {0, 0, 0}
+
+#define xgi_PCI_IDS \
+       {0x18ca, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x18ca, 0x0047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
diff --git a/psb-kernel-source-4.41.1/drm_pciids.txt b/psb-kernel-source-4.41.1/drm_pciids.txt
new file mode 100644 (file)
index 0000000..d90befa
--- /dev/null
@@ -0,0 +1,490 @@
+[radeon]
+0x1002 0x3150 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 M24"
+0x1002 0x3152 CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X300 M24"
+0x1002 0x3154 CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI FireGL M24 GL"
+0x1002 0x3E50 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV380 X600"
+0x1002 0x3E54 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireGL V3200 RV380"
+0x1002 0x4136 CHIP_RS100|RADEON_IS_IGP "ATI Radeon RS100 IGP 320"
+0x1002 0x4137 CHIP_RS200|RADEON_IS_IGP "ATI Radeon RS200 IGP 340"
+0x1002 0x4144 CHIP_R300 "ATI Radeon AD 9500"
+0x1002 0x4145 CHIP_R300 "ATI Radeon AE 9700 Pro"
+0x1002 0x4146 CHIP_R300 "ATI Radeon AF R300 9600TX"
+0x1002 0x4147 CHIP_R300 "ATI FireGL AG Z1"
+0x1002 0x4148 CHIP_R350 "ATI Radeon AH 9800 SE"
+0x1002 0x4149 CHIP_R350 "ATI Radeon AI 9800"
+0x1002 0x414A CHIP_R350 "ATI Radeon AJ 9800"
+0x1002 0x414B CHIP_R350 "ATI FireGL AK X2"
+0x1002 0x4150 CHIP_RV350 "ATI Radeon AP 9600"
+0x1002 0x4151 CHIP_RV350 "ATI Radeon AQ 9600 SE"
+0x1002 0x4152 CHIP_RV350 "ATI Radeon AR 9600 XT"
+0x1002 0x4153 CHIP_RV350 "ATI Radeon AS 9550"
+0x1002 0x4154 CHIP_RV350 "ATI FireGL AT T2"
+0x1002 0x4155 CHIP_RV350 "ATI Radeon 9650"
+0x1002 0x4156 CHIP_RV350 "ATI FireGL AV RV360 T2"
+0x1002 0x4237 CHIP_RS200|RADEON_IS_IGP "ATI Radeon RS250 IGP"
+0x1002 0x4242 CHIP_R200 "ATI Radeon BB R200 AIW 8500DV"
+0x1002 0x4243 CHIP_R200 "ATI Radeon BC R200"
+0x1002 0x4336 CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS100 Mobility U1"
+0x1002 0x4337 CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS200 Mobility IGP 340M"
+0x1002 0x4437 CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS250 Mobility IGP"
+0x1002 0x4966 CHIP_RV250 "ATI Radeon If RV250 9000"
+0x1002 0x4967 CHIP_RV250 "ATI Radeon Ig RV250 9000"
+0x1002 0x4A48 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JH R420 X800"
+0x1002 0x4A49 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JI R420 X800 Pro"
+0x1002 0x4A4A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JJ R420 X800 SE"
+0x1002 0x4A4B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JK R420 X800 XT"
+0x1002 0x4A4C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JL R420 X800"
+0x1002 0x4A4D CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL JM X3-256"
+0x1002 0x4A4E CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon JN R420 Mobility M18"
+0x1002 0x4A4F CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JO R420 X800 SE"
+0x1002 0x4A50 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JP R420 X800 XT PE"
+0x1002 0x4A54 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JT R420 AIW X800 VE"
+0x1002 0x4B49 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 XT"
+0x1002 0x4B4A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 SE"
+0x1002 0x4B4B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 Pro"
+0x1002 0x4B4C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 XT PE"
+0x1002 0x4C57 CHIP_RV200|RADEON_IS_MOBILITY "ATI Radeon LW RV200 Mobility 7500 M7"
+0x1002 0x4C58 CHIP_RV200|RADEON_IS_MOBILITY "ATI Radeon LX RV200 Mobility FireGL 7800 M7"
+0x1002 0x4C59 CHIP_RV100|RADEON_IS_MOBILITY "ATI Radeon LY RV100 Mobility M6"
+0x1002 0x4C5A CHIP_RV100|RADEON_IS_MOBILITY "ATI Radeon LZ RV100 Mobility M6"
+0x1002 0x4C64 CHIP_RV250|RADEON_IS_MOBILITY "ATI Radeon Ld RV250 Mobility 9000 M9"
+0x1002 0x4C66 CHIP_RV250 "ATI Radeon Lf RV250 Mobility 9000 M9 / FireMV 2400 PCI"
+0x1002 0x4C67 CHIP_RV250|RADEON_IS_MOBILITY "ATI Radeon Lg RV250 Mobility 9000 M9"
+0x1002 0x4E44 CHIP_R300 "ATI Radeon ND R300 9700 Pro"
+0x1002 0x4E45 CHIP_R300 "ATI Radeon NE R300 9500 Pro / 9700"
+0x1002 0x4E46 CHIP_R300 "ATI Radeon NF R300 9600TX"
+0x1002 0x4E47 CHIP_R300 "ATI Radeon NG R300 FireGL X1"
+0x1002 0x4E48 CHIP_R350 "ATI Radeon NH R350 9800 Pro"
+0x1002 0x4E49 CHIP_R350 "ATI Radeon NI R350 9800"
+0x1002 0x4E4A CHIP_R350 "ATI Radeon NJ R360 9800 XT"
+0x1002 0x4E4B CHIP_R350 "ATI FireGL NK X2"
+0x1002 0x4E50 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NP"
+0x1002 0x4E51 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NQ"
+0x1002 0x4E52 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M11 NR"
+0x1002 0x4E53 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NS"
+0x1002 0x4E54 CHIP_RV350|RADEON_IS_MOBILITY "ATI FireGL T2/T2e"
+0x1002 0x4E56 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon Mobility 9550"
+0x1002 0x5144 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QD R100"
+0x1002 0x5145 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QE R100"
+0x1002 0x5146 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QF R100"
+0x1002 0x5147 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QG R100"
+0x1002 0x5148 CHIP_R200 "ATI Radeon QH R200 8500"
+0x1002 0x514C CHIP_R200 "ATI Radeon QL R200 8500 LE"
+0x1002 0x514D CHIP_R200 "ATI Radeon QM R200 9100"
+0x1002 0x5157 CHIP_RV200 "ATI Radeon QW RV200 7500"
+0x1002 0x5158 CHIP_RV200 "ATI Radeon QX RV200 7500"
+0x1002 0x5159 CHIP_RV100 "ATI Radeon QY RV100 7000/VE"
+0x1002 0x515A CHIP_RV100 "ATI Radeon QZ RV100 7000/VE"
+0x1002 0x515E CHIP_RV100 "ATI ES1000 RN50"
+0x1002 0x5460 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X300 M22"
+0x1002 0x5462 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C"
+0x1002 0x5464 CHIP_RV380|RADEON_IS_MOBILITY "ATI FireGL M22 GL 5464"
+0x1002 0x5548 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800"
+0x1002 0x5549 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro"
+0x1002 0x554A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE"
+0x1002 0x554B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 SE"
+0x1002 0x554C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XTP"
+0x1002 0x554D CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XL"
+0x1002 0x554E CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 SE"
+0x1002 0x554F CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800"
+0x1002 0x5550 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL V7100 R423"
+0x1002 0x5551 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL V5100 R423 UQ"
+0x1002 0x5552 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UR"
+0x1002 0x5554 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UT"
+0x1002 0x564A CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5000 M26"
+0x1002 0x564B CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5000 M26"
+0x1002 0x564F CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26"
+0x1002 0x5652 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
+0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
+0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP"
+0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP"
+0x1002 0x5954 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI RS480 XPRESS 200G"
+0x1002 0x5955 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5955"
+0x1002 0x5974 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS482 XPRESS 200"
+0x1002 0x5975 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS485 XPRESS 1100 IGP"
+0x1002 0x5960 CHIP_RV280 "ATI Radeon RV280 9250"
+0x1002 0x5961 CHIP_RV280 "ATI Radeon RV280 9200"
+0x1002 0x5962 CHIP_RV280 "ATI Radeon RV280 9200"
+0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE"
+0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI"
+0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50"
+0x1002 0x5a41 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS400 XPRESS 200"
+0x1002 0x5a42 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS400 XPRESS 200M"
+0x1002 0x5a61 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200"
+0x1002 0x5a62 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200M"
+0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE"
+0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro"
+0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550"
+0x1002 0x5b64 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireGL V3100 (RV370) 5B64"
+0x1002 0x5b65 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireMV 2200 PCIE (RV370) 5B65"
+0x1002 0x5c61 CHIP_RV280|RADEON_IS_MOBILITY "ATI Radeon RV280 Mobility"
+0x1002 0x5c63 CHIP_RV280|RADEON_IS_MOBILITY "ATI Radeon RV280 Mobility"
+0x1002 0x5d48 CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 XT M28"
+0x1002 0x5d49 CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5100 M28"
+0x1002 0x5d4a CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 M28"
+0x1002 0x5d4c CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850"
+0x1002 0x5d4d CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT PE"
+0x1002 0x5d4e CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 SE"
+0x1002 0x5d4f CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 Pro"
+0x1002 0x5d50 CHIP_R420|RADEON_NEW_MEMMAP "ATI unknown Radeon / FireGL R480"
+0x1002 0x5d52 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT"
+0x1002 0x5d57 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT"
+0x1002 0x5e48 CHIP_RV410|RADEON_NEW_MEMMAP "ATI FireGL V5000 RV410"
+0x1002 0x5e4a CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 XT"
+0x1002 0x5e4b CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 Pro"
+0x1002 0x5e4c CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE"
+0x1002 0x5e4d CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700"
+0x1002 0x5e4f CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE"
+0x1002 0x7834 CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP"
+0x1002 0x7835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP"
+
+[r128]
+0x1002 0x4c45 0 "ATI Rage 128 Mobility LE (PCI)"
+0x1002 0x4c46 0 "ATI Rage 128 Mobility LF (AGP)"
+0x1002 0x4d46 0 "ATI Rage 128 Mobility MF (AGP)"
+0x1002 0x4d4c 0 "ATI Rage 128 Mobility ML (AGP)"
+0x1002 0x5041 0 "ATI Rage 128 Pro PA (PCI)"
+0x1002 0x5042 0 "ATI Rage 128 Pro PB (AGP)"
+0x1002 0x5043 0 "ATI Rage 128 Pro PC (AGP)"
+0x1002 0x5044 0 "ATI Rage 128 Pro PD (PCI)"
+0x1002 0x5045 0 "ATI Rage 128 Pro PE (AGP)"
+0x1002 0x5046 0 "ATI Rage 128 Pro PF (AGP)"
+0x1002 0x5047 0 "ATI Rage 128 Pro PG (PCI)"
+0x1002 0x5048 0 "ATI Rage 128 Pro PH (AGP)"
+0x1002 0x5049 0 "ATI Rage 128 Pro PI (AGP)"
+0x1002 0x504A 0 "ATI Rage 128 Pro PJ (PCI)"
+0x1002 0x504B 0 "ATI Rage 128 Pro PK (AGP)"
+0x1002 0x504C 0 "ATI Rage 128 Pro PL (AGP)"
+0x1002 0x504D 0 "ATI Rage 128 Pro PM (PCI)"
+0x1002 0x504E 0 "ATI Rage 128 Pro PN (AGP)"
+0x1002 0x504F 0 "ATI Rage 128 Pro PO (AGP)"
+0x1002 0x5050 0 "ATI Rage 128 Pro PP (PCI)"
+0x1002 0x5051 0 "ATI Rage 128 Pro PQ (AGP)"
+0x1002 0x5052 0 "ATI Rage 128 Pro PR (PCI)"
+0x1002 0x5053 0 "ATI Rage 128 Pro PS (PCI)"
+0x1002 0x5054 0 "ATI Rage 128 Pro PT (AGP)"
+0x1002 0x5055 0 "ATI Rage 128 Pro PU (AGP)"
+0x1002 0x5056 0 "ATI Rage 128 Pro PV (PCI)"
+0x1002 0x5057 0 "ATI Rage 128 Pro PW (AGP)"
+0x1002 0x5058 0 "ATI Rage 128 Pro PX (AGP)"
+0x1002 0x5245 0 "ATI Rage 128 RE (PCI)"
+0x1002 0x5246 0 "ATI Rage 128 RF (AGP)"
+0x1002 0x5247 0 "ATI Rage 128 RG (AGP)"
+0x1002 0x524b 0 "ATI Rage 128 RK (PCI)"
+0x1002 0x524c 0 "ATI Rage 128 RL (AGP)"
+0x1002 0x534d 0 "ATI Rage 128 SM (AGP)"
+0x1002 0x5446 0 "ATI Rage 128 Pro Ultra TF (AGP)"
+0x1002 0x544C 0 "ATI Rage 128 Pro Ultra TL (AGP)"
+0x1002 0x5452 0 "ATI Rage 128 Pro Ultra TR (AGP)"
+
+[mga]
+0x102b 0x0520 MGA_CARD_TYPE_G200 "Matrox G200 (PCI)"
+0x102b 0x0521 MGA_CARD_TYPE_G200 "Matrox G200 (AGP)"
+0x102b 0x0525 MGA_CARD_TYPE_G400 "Matrox G400/G450 (AGP)"
+0x102b 0x2527 MGA_CARD_TYPE_G550 "Matrox G550 (AGP)"
+
+[mach64]
+0x1002 0x4749 0 "3D Rage Pro"
+0x1002 0x4750 0 "3D Rage Pro 215GP"
+0x1002 0x4751 0 "3D Rage Pro 215GQ"
+0x1002 0x4742 0 "3D Rage Pro AGP 1X/2X"
+0x1002 0x4744 0 "3D Rage Pro AGP 1X"
+0x1002 0x4c49 0 "3D Rage LT Pro"
+0x1002 0x4c50 0 "3D Rage LT Pro"
+0x1002 0x4c51 0 "3D Rage LT Pro"
+0x1002 0x4c42 0 "3D Rage LT Pro AGP-133"
+0x1002 0x4c44 0 "3D Rage LT Pro AGP-66"
+0x1002 0x474c 0 "Rage XC"
+0x1002 0x474f 0 "Rage XL"
+0x1002 0x4752 0 "Rage XL"
+0x1002 0x4753 0 "Rage XC"
+0x1002 0x474d 0 "Rage XL AGP 2X"
+0x1002 0x474e 0 "Rage XC AGP"
+0x1002 0x4c52 0 "Rage Mobility P/M"
+0x1002 0x4c53 0 "Rage Mobility L"
+0x1002 0x4c4d 0 "Rage Mobility P/M AGP 2X"
+0x1002 0x4c4e 0 "Rage Mobility L AGP 2X"
+
+[sis]
+0x1039 0x0300 0 "SiS 300/305"
+0x1039 0x5300 0 "SiS 540"
+0x1039 0x6300 0 "SiS 630"
+0x1039 0x6330 SIS_CHIP_315 "SiS 661"
+0x1039 0x7300 0 "SiS 730"
+0x18CA 0x0040 SIS_CHIP_315 "Volari V3XT/V5/V8"
+0x18CA 0x0042 SIS_CHIP_315 "Volari Unknown"
+
+[tdfx]
+0x121a 0x0003 0 "3dfx Voodoo Banshee"
+0x121a 0x0004 0 "3dfx Voodoo3 2000"
+0x121a 0x0005 0 "3dfx Voodoo3 3000"
+0x121a 0x0007 0 "3dfx Voodoo4 4500"
+0x121a 0x0009 0 "3dfx Voodoo5 5500"
+0x121a 0x000b 0 "3dfx Voodoo4 4200"
+
+[viadrv]
+0x1106 0x3022 0 "VIA CLE266 3022"
+0x1106 0x3118 VIA_PRO_GROUP_A "VIA CN400 / PM8X0"
+0x1106 0x3122 0 "VIA CLE266"
+0x1106 0x7205 0 "VIA KM400"
+0x1106 0x3108 0 "VIA K8M800"
+0x1106 0x3344 0 "VIA CN700 / VM800 / P4M800Pro"
+0x1106 0x3343 0 "VIA P4M890"
+0x1106 0x3230 VIA_DX9_0 "VIA K8M890"
+0x1106 0x3157 VIA_PRO_GROUP_A "VIA CX700"
+
+[i810]
+0x8086 0x7121 0 "Intel i810 GMCH"
+0x8086 0x7123 0 "Intel i810-DC100 GMCH"
+0x8086 0x7125 0 "Intel i810E GMCH"
+0x8086 0x1132 0 "Intel i815 GMCH"
+
+[i830]
+0x8086 0x3577 0 "Intel i830M GMCH"
+0x8086 0x2562 0 "Intel i845G GMCH"
+0x8086 0x3582 0 "Intel i852GM/i855GM GMCH"
+0x8086 0x2572 0 "Intel i865G GMCH"
+
+[gamma]
+0x3d3d 0x0008 0 "3DLabs GLINT Gamma G1"
+
+[savage]
+0x5333 0x8a20 S3_SAVAGE3D "Savage 3D"
+0x5333 0x8a21 S3_SAVAGE3D "Savage 3D/MV"
+0x5333 0x8a22 S3_SAVAGE4 "Savage4"
+0x5333 0x8a23 S3_SAVAGE4 "Savage4"
+0x5333 0x8c10 S3_SAVAGE_MX "Savage/MX-MV"
+0x5333 0x8c11 S3_SAVAGE_MX "Savage/MX"
+0x5333 0x8c12 S3_SAVAGE_MX "Savage/IX-MV"
+0x5333 0x8c13 S3_SAVAGE_MX "Savage/IX"
+0x5333 0x8c22 S3_SUPERSAVAGE "SuperSavage MX/128"
+0x5333 0x8c24 S3_SUPERSAVAGE "SuperSavage MX/64"
+0x5333 0x8c26 S3_SUPERSAVAGE "SuperSavage MX/64C"
+0x5333 0x8c2a S3_SUPERSAVAGE "SuperSavage IX/128 SDR"
+0x5333 0x8c2b S3_SUPERSAVAGE "SuperSavage IX/128 DDR"
+0x5333 0x8c2c S3_SUPERSAVAGE "SuperSavage IX/64 SDR"
+0x5333 0x8c2d S3_SUPERSAVAGE "SuperSavage IX/64 DDR"
+0x5333 0x8c2e S3_SUPERSAVAGE "SuperSavage IX/C SDR"
+0x5333 0x8c2f S3_SUPERSAVAGE "SuperSavage IX/C DDR"
+0x5333 0x8a25 S3_PROSAVAGE "ProSavage PM133"
+0x5333 0x8a26 S3_PROSAVAGE "ProSavage KM133"
+0x5333 0x8d01 S3_TWISTER "ProSavage Twister PN133"
+0x5333 0x8d02 S3_TWISTER "ProSavage Twister KN133"
+0x5333 0x8d03 S3_PROSAVAGEDDR "ProSavage DDR"
+0x5333 0x8d04 S3_PROSAVAGEDDR "ProSavage DDR-K"
+
+[ffb]
+
+[i915]
+0x8086 0x3577 CHIP_I8XX "Intel i830M GMCH"
+0x8086 0x2562 CHIP_I8XX "Intel i845G GMCH"
+0x8086 0x3582 CHIP_I8XX "Intel i852GM/i855GM GMCH"
+0x8086 0x2572 CHIP_I8XX "Intel i865G GMCH"
+0x8086 0x2582 CHIP_I9XX|CHIP_I915 "Intel i915G"
+0x8086 0x2592 CHIP_I9XX|CHIP_I915 "Intel i915GM"
+0x8086 0x2772 CHIP_I9XX|CHIP_I915 "Intel i945G"
+0x8086 0x27A2 CHIP_I9XX|CHIP_I915 "Intel i945GM"
+0x8086 0x27AE CHIP_I9XX|CHIP_I915 "Intel i945GME"
+0x8086 0x2972 CHIP_I9XX|CHIP_I965 "Intel i946GZ"
+0x8086 0x2982 CHIP_I9XX|CHIP_I965 "Intel i965G"
+0x8086 0x2992 CHIP_I9XX|CHIP_I965 "Intel i965Q"
+0x8086 0x29A2 CHIP_I9XX|CHIP_I965 "Intel i965G"
+0x8086 0x2A02 CHIP_I9XX|CHIP_I965 "Intel i965GM"
+0x8086 0x2A12 CHIP_I9XX|CHIP_I965 "Intel i965GME/GLE"
+0x8086 0x29C2 CHIP_I9XX|CHIP_I915 "Intel G33"
+0x8086 0x29B2 CHIP_I9XX|CHIP_I915 "Intel Q35"
+0x8086 0x29D2 CHIP_I9XX|CHIP_I915 "Intel Q33"
+
+[psb]
+0x8086 0x8108 CHIP_PSB_8108 "Intel GMA500"
+0x8086 0x8109 CHIP_PSB_8109 "Intel GMA500"
+
+[imagine]
+0x105d 0x2309 IMAGINE_128 "Imagine 128"
+0x105d 0x2339 IMAGINE_128_2 "Imagine 128-II"
+0x105d 0x493d IMAGINE_T2R "Ticket to Ride"
+0x105d 0x5348 IMAGINE_REV4 "Revolution IV"
+
+[nv]
+0x10DE 0x0020 NV04 "NVidia RIVA TNT"
+0x10DE 0x0028 NV04 "NVidia RIVA TNT2"
+0x10DE 0x002A NV04 "NVidia Unknown TNT2"
+0x10DE 0x002C NV04 "NVidia Vanta"
+0x10DE 0x0029 NV04 "NVidia RIVA TNT2 Ultra"
+0x10DE 0x002D NV04 "NVidia RIVA TNT2 Model 64"
+0x10DE 0x00A0 NV04 "NVidia Aladdin TNT2"
+0x10DE 0x0100 NV10 "NVidia GeForce 256"
+0x10DE 0x0101 NV10 "NVidia GeForce DDR"
+0x10DE 0x0103 NV10 "NVidia Quadro"
+0x10DE 0x0110 NV10 "NVidia GeForce2 MX/MX 400"
+0x10DE 0x0111 NV10 "NVidia GeForce2 MX 100/200"
+0x10DE 0x0112 NV10 "NVidia GeForce2 Go"
+0x10DE 0x0113 NV10 "NVidia Quadro2 MXR/EX/Go"
+0x10DE 0x0150 NV10 "NVidia GeForce2 GTS"
+0x10DE 0x0151 NV10 "NVidia GeForce2 Ti"
+0x10DE 0x0152 NV10 "NVidia GeForce2 Ultra"
+0x10DE 0x0153 NV10 "NVidia Quadro2 Pro"
+0x10DE 0x0170 NV10 "NVidia GeForce4 MX 460"
+0x10DE 0x0171 NV10 "NVidia GeForce4 MX 440"
+0x10DE 0x0172 NV10 "NVidia GeForce4 MX 420"
+0x10DE 0x0173 NV10 "NVidia GeForce4 MX 440-SE"
+0x10DE 0x0174 NV10 "NVidia GeForce4 440 Go"
+0x10DE 0x0175 NV10 "NVidia GeForce4 420 Go"
+0x10DE 0x0176 NV10 "NVidia GeForce4 420 Go 32M"
+0x10DE 0x0177 NV10 "NVidia GeForce4 460 Go"
+0x10DE 0x0178 NV10 "NVidia Quadro4 550 XGL"
+0x10DE 0x0179 NV10 "NVidia GeForce4"
+0x10DE 0x017A NV10 "NVidia Quadro4 NVS"
+0x10DE 0x017C NV10 "NVidia Quadro4 500 GoGL"
+0x10DE 0x017D NV10 "NVidia GeForce4 410 Go 16M"
+0x10DE 0x0181 NV10 "NVidia GeForce4 MX 440 with AGP8X"
+0x10DE 0x0182 NV10 "NVidia GeForce4 MX 440SE with AGP8X"
+0x10DE 0x0183 NV10 "NVidia GeForce4 MX 420 with AGP8X"
+0x10DE 0x0185 NV10 "NVidia GeForce4 MX 4000"
+0x10DE 0x0186 NV10 "NVidia GeForce4 448 Go"
+0x10DE 0x0187 NV10 "NVidia GeForce4 488 Go"
+0x10DE 0x0188 NV10 "NVidia Quadro4 580 XGL"
+0x10DE 0x0189 NV10 "NVidia GeForce4 MX with AGP8X (Mac)"
+0x10DE 0x018A NV10 "NVidia Quadro4 280 NVS"
+0x10DE 0x018B NV10 "NVidia Quadro4 380 XGL"
+0x10DE 0x018C NV10 "NVidia Quadro NVS 50 PCI"
+0x10DE 0x018D NV10 "NVidia GeForce4 448 Go"
+0x10DE 0x01A0 NV10 "NVidia GeForce2 Integrated GPU"
+0x10DE 0x01F0 NV10 "NVidia GeForce4 MX Integrated GPU"
+0x10DE 0x0200 NV20 "NVidia GeForce3"
+0x10DE 0x0201 NV20 "NVidia GeForce3 Ti 200"
+0x10DE 0x0202 NV20 "NVidia GeForce3 Ti 500"
+0x10DE 0x0203 NV20 "NVidia Quadro DCC"
+0x10DE 0x0250 NV20 "NVidia GeForce4 Ti 4600"
+0x10DE 0x0251 NV20 "NVidia GeForce4 Ti 4400"
+0x10DE 0x0252 NV20 "NVidia 0x0252"
+0x10DE 0x0253 NV20 "NVidia GeForce4 Ti 4200"
+0x10DE 0x0258 NV20 "NVidia Quadro4 900 XGL"
+0x10DE 0x0259 NV20 "NVidia Quadro4 750 XGL"
+0x10DE 0x025B NV20 "NVidia Quadro4 700 XGL"
+0x10DE 0x0280 NV20 "NVidia GeForce4 Ti 4800"
+0x10DE 0x0281 NV20 "NVidia GeForce4 Ti 4200 with AGP8X"
+0x10DE 0x0282 NV20 "NVidia GeForce4 Ti 4800 SE"
+0x10DE 0x0286 NV20 "NVidia GeForce4 4200 Go"
+0x10DE 0x028C NV20 "NVidia Quadro4 700 GoGL"
+0x10DE 0x0288 NV20 "NVidia Quadro4 980 XGL"
+0x10DE 0x0289 NV20 "NVidia Quadro4 780 XGL"
+0x10DE 0x0301 NV30 "NVidia GeForce FX 5800 Ultra"
+0x10DE 0x0302 NV30 "NVidia GeForce FX 5800"
+0x10DE 0x0308 NV30 "NVidia Quadro FX 2000"
+0x10DE 0x0309 NV30 "NVidia Quadro FX 1000"
+0x10DE 0x0311 NV30 "NVidia GeForce FX 5600 Ultra"
+0x10DE 0x0312 NV30 "NVidia GeForce FX 5600"
+0x10DE 0x0313 NV30 "NVidia 0x0313"
+0x10DE 0x0314 NV30 "NVidia GeForce FX 5600SE"
+0x10DE 0x0316 NV30 "NVidia 0x0316"
+0x10DE 0x0317 NV30 "NVidia 0x0317"
+0x10DE 0x031A NV30 "NVidia GeForce FX Go5600"
+0x10DE 0x031B NV30 "NVidia GeForce FX Go5650"
+0x10DE 0x031C NV30 "NVidia Quadro FX Go700"
+0x10DE 0x031D NV30 "NVidia 0x031D"
+0x10DE 0x031E NV30 "NVidia 0x031E"
+0x10DE 0x031F NV30 "NVidia 0x031F"
+0x10DE 0x0320 NV30 "NVidia GeForce FX 5200"
+0x10DE 0x0321 NV30 "NVidia GeForce FX 5200 Ultra"
+0x10DE 0x0322 NV30 "NVidia GeForce FX 5200"
+0x10DE 0x0323 NV30 "NVidia GeForce FX 5200SE"
+0x10DE 0x0324 NV30 "NVidia GeForce FX Go5200"
+0x10DE 0x0325 NV30 "NVidia GeForce FX Go5250"
+0x10DE 0x0326 NV30 "NVidia GeForce FX 5500"
+0x10DE 0x0327 NV30 "NVidia GeForce FX 5100"
+0x10DE 0x0328 NV30 "NVidia GeForce FX Go5200 32M/64M"
+0x10DE 0x0329 NV30 "NVidia GeForce FX 5200 (Mac)"
+0x10DE 0x032A NV30 "NVidia Quadro NVS 280 PCI"
+0x10DE 0x032B NV30 "NVidia Quadro FX 500/600 PCI"
+0x10DE 0x032C NV30 "NVidia GeForce FX Go53xx Series"
+0x10DE 0x032D NV30 "NVidia GeForce FX Go5100"
+0x10DE 0x032F NV30 "NVidia 0x032F"
+0x10DE 0x0330 NV30 "NVidia GeForce FX 5900 Ultra"
+0x10DE 0x0331 NV30 "NVidia GeForce FX 5900"
+0x10DE 0x0332 NV30 "NVidia GeForce FX 5900XT"
+0x10DE 0x0333 NV30 "NVidia GeForce FX 5950 Ultra"
+0x10DE 0x033F NV30 "NVidia Quadro FX 700"
+0x10DE 0x0334 NV30 "NVidia GeForce FX 5900ZT"
+0x10DE 0x0338 NV30 "NVidia Quadro FX 3000"
+0x10DE 0x0341 NV30 "NVidia GeForce FX 5700 Ultra"
+0x10DE 0x0342 NV30 "NVidia GeForce FX 5700"
+0x10DE 0x0343 NV30 "NVidia GeForce FX 5700LE"
+0x10DE 0x0344 NV30 "NVidia GeForce FX 5700VE"
+0x10DE 0x0345 NV30 "NVidia 0x0345"
+0x10DE 0x0347 NV30 "NVidia GeForce FX Go5700"
+0x10DE 0x0348 NV30 "NVidia GeForce FX Go5700"
+0x10DE 0x0349 NV30 "NVidia 0x0349"
+0x10DE 0x034B NV30 "NVidia 0x034B"
+0x10DE 0x034C NV30 "NVidia Quadro FX Go1000"
+0x10DE 0x034E NV30 "NVidia Quadro FX 1100"
+0x10DE 0x034F NV30 "NVidia 0x034F"
+0x10DE 0x0040 NV40 "NVidia GeForce 6800 Ultra"
+0x10DE 0x0041 NV40 "NVidia GeForce 6800"
+0x10DE 0x0042 NV40 "NVidia GeForce 6800 LE"
+0x10DE 0x0043 NV40 "NVidia 0x0043"
+0x10DE 0x0045 NV40 "NVidia GeForce 6800 GT"
+0x10DE 0x0046 NV40 "NVidia GeForce 6800 GT"
+0x10DE 0x0049 NV40 "NVidia 0x0049"
+0x10DE 0x004E NV40 "NVidia Quadro FX 4000"
+0x10DE 0x00C0 NV40 "NVidia 0x00C0"
+0x10DE 0x00C1 NV40 "NVidia GeForce 6800"
+0x10DE 0x00C2 NV40 "NVidia GeForce 6800 LE"
+0x10DE 0x00C8 NV40 "NVidia GeForce Go 6800"
+0x10DE 0x00C9 NV40 "NVidia GeForce Go 6800 Ultra"
+0x10DE 0x00CC NV40 "NVidia Quadro FX Go1400"
+0x10DE 0x00CD NV40 "NVidia Quadro FX 3450/4000 SDI"
+0x10DE 0x00CE NV40 "NVidia Quadro FX 1400"
+0x10de 0x00f0 NV40 "Nvidia GeForce 6600 GT"
+0x10de 0x00f1 NV40 "Nvidia GeForce 6600 GT"
+0x10DE 0x0140 NV40 "NVidia GeForce 6600 GT"
+0x10DE 0x0141 NV40 "NVidia GeForce 6600"
+0x10DE 0x0142 NV40 "NVidia GeForce 6600 LE"
+0x10DE 0x0143 NV40 "NVidia 0x0143"
+0x10DE 0x0144 NV40 "NVidia GeForce Go 6600"
+0x10DE 0x0145 NV40 "NVidia GeForce 6610 XL"
+0x10DE 0x0146 NV40 "NVidia GeForce Go 6600 TE/6200 TE"
+0x10DE 0x0147 NV40 "NVidia GeForce 6700 XL"
+0x10DE 0x0148 NV40 "NVidia GeForce Go 6600"
+0x10DE 0x0149 NV40 "NVidia GeForce Go 6600 GT"
+0x10DE 0x014B NV40 "NVidia 0x014B"
+0x10DE 0x014C NV40 "NVidia 0x014C"
+0x10DE 0x014D NV40 "NVidia 0x014D"
+0x10DE 0x014E NV40 "NVidia Quadro FX 540"
+0x10DE 0x014F NV40 "NVidia GeForce 6200"
+0x10DE 0x0160 NV40 "NVidia 0x0160"
+0x10DE 0x0161 NV40 "NVidia GeForce 6200 TurboCache(TM)"
+0x10DE 0x0162 NV40 "NVidia GeForce 6200SE TurboCache(TM)"
+0x10DE 0x0163 NV40 "NVidia 0x0163"
+0x10DE 0x0164 NV40 "NVidia GeForce Go 6200"
+0x10DE 0x0165 NV40 "NVidia Quadro NVS 285"
+0x10DE 0x0166 NV40 "NVidia GeForce Go 6400"
+0x10DE 0x0167 NV40 "NVidia GeForce Go 6200"
+0x10DE 0x0168 NV40 "NVidia GeForce Go 6400"
+0x10DE 0x0169 NV40 "NVidia 0x0169"
+0x10DE 0x016B NV40 "NVidia 0x016B"
+0x10DE 0x016C NV40 "NVidia 0x016C"
+0x10DE 0x016D NV40 "NVidia 0x016D"
+0x10DE 0x016E NV40 "NVidia 0x016E"
+0x10DE 0x0210 NV40 "NVidia 0x0210"
+0x10DE 0x0211 NV40 "NVidia GeForce 6800"
+0x10DE 0x0212 NV40 "NVidia GeForce 6800 LE"
+0x10DE 0x0215 NV40 "NVidia GeForce 6800 GT"
+0x10DE 0x0220 NV40 "NVidia 0x0220"
+0x10DE 0x0221 NV40 "NVidia GeForce 6200"
+0x10DE 0x0222 NV40 "NVidia 0x0222"
+0x10DE 0x0228 NV40 "NVidia 0x0228"
+0x10DE 0x0090 NV40 "NVidia 0x0090"
+0x10DE 0x0091 NV40 "NVidia GeForce 7800 GTX"
+0x10DE 0x0092 NV40 "NVidia 0x0092"
+0x10DE 0x0093 NV40 "NVidia 0x0093"
+0x10DE 0x0094 NV40 "NVidia 0x0094"
+0x10DE 0x0098 NV40 "NVidia 0x0098"
+0x10DE 0x0099 NV40 "NVidia GeForce Go 7800 GTX"
+0x10DE 0x009C NV40 "NVidia 0x009C"
+0x10DE 0x009D NV40 "NVidia Quadro FX 4500"
+0x10DE 0x009E NV40 "NVidia 0x009E"
+
+[xgi]
+0x18ca 0x2200 0 "XP5"
+0x18ca 0x0047 0 "XP10 / XG47"
diff --git a/psb-kernel-source-4.41.1/drm_proc.c b/psb-kernel-source-4.41.1/drm_proc.c
new file mode 100644 (file)
index 0000000..9f423c6
--- /dev/null
@@ -0,0 +1,646 @@
+/**
+ * \file drm_proc.c
+ * /proc support for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * \par Acknowledgements:
+ *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
+ *    the problem with the proc files not outputting all their information.
+ */
+
+/*
+ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_name_info(char *buf, char **start, off_t offset,
+                        int request, int *eof, void *data);
+static int drm_vm_info(char *buf, char **start, off_t offset,
+                      int request, int *eof, void *data);
+static int drm_clients_info(char *buf, char **start, off_t offset,
+                           int request, int *eof, void *data);
+static int drm_queues_info(char *buf, char **start, off_t offset,
+                          int request, int *eof, void *data);
+static int drm_bufs_info(char *buf, char **start, off_t offset,
+                        int request, int *eof, void *data);
+static int drm_objects_info(char *buf, char **start, off_t offset,
+                        int request, int *eof, void *data);
+#if DRM_DEBUG_CODE
+static int drm_vma_info(char *buf, char **start, off_t offset,
+                       int request, int *eof, void *data);
+#endif
+
+/**
+ * Proc file list.
+ */
+static struct drm_proc_list {
+       const char *name;       /**< file name */
+       int (*f) (char *, char **, off_t, int, int *, void *);          /**< proc callback*/
+} drm_proc_list[] = {
+       {"name", drm_name_info},
+       {"mem", drm_mem_info},
+       {"vm", drm_vm_info},
+       {"clients", drm_clients_info},
+       {"queues", drm_queues_info},
+       {"bufs", drm_bufs_info},
+       {"objects", drm_objects_info},
+#if DRM_DEBUG_CODE
+       {"vma", drm_vma_info},
+#endif
+};
+
+#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
+
+/**
+ * Initialize the DRI proc filesystem for a device.
+ *
+ * \param dev DRM device.
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root resulting DRI device proc dir entry.
+ * \return root entry pointer on success, or NULL on failure.
+ *
+ * Create the DRI proc root entry "/proc/dri", the device proc root entry
+ * "/proc/dri/%minor%/", and each entry in proc_list as
+ * "/proc/dri/%minor%/%name%".
+ */
+int drm_proc_init(struct drm_device * dev, int minor,
+                 struct proc_dir_entry *root, struct proc_dir_entry **dev_root)
+{
+       struct proc_dir_entry *ent;
+       int i, j;
+       char name[64];
+
+       sprintf(name, "%d", minor);
+       *dev_root = proc_mkdir(name, root);
+       if (!*dev_root) {
+               DRM_ERROR("Cannot create /proc/dri/%s\n", name);
+               return -1;
+       }
+
+       for (i = 0; i < DRM_PROC_ENTRIES; i++) {
+               ent = create_proc_entry(drm_proc_list[i].name,
+                                       S_IFREG | S_IRUGO, *dev_root);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
+                                 name, drm_proc_list[i].name);
+                       for (j = 0; j < i; j++)
+                               remove_proc_entry(drm_proc_list[i].name,
+                                                 *dev_root);
+                       remove_proc_entry(name, root);
+                       return -1;
+               }
+               ent->read_proc = drm_proc_list[i].f;
+               ent->data = dev;
+       }
+       return 0;
+}
+
+/**
+ * Cleanup the proc filesystem resources.
+ *
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root DRI device proc dir entry.
+ * \return always zero.
+ *
+ * Remove all proc entries created by proc_init().
+ */
+int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
+                    struct proc_dir_entry *dev_root)
+{
+       int i;
+       char name[64];
+
+       if (!root || !dev_root)
+               return 0;
+
+       for (i = 0; i < DRM_PROC_ENTRIES; i++)
+               remove_proc_entry(drm_proc_list[i].name, dev_root);
+       sprintf(name, "%d", minor);
+       remove_proc_entry(name, root);
+
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+static int drm_name_info(char *buf, char **start, off_t offset, int request,
+                        int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+
+       if (dev->unique) {
+               DRM_PROC_PRINT("%s %s %s\n",
+                              dev->driver->pci_driver.name,
+                              pci_name(dev->pdev), dev->unique);
+       } else {
+               DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
+                              pci_name(dev->pdev));
+       }
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+static int drm__vm_info(char *buf, char **start, off_t offset, int request,
+                       int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int len = 0;
+       struct drm_map *map;
+       struct drm_map_list *r_list;
+
+       /* Hardcoded from _DRM_FRAME_BUFFER,
+          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP,
+          _DRM_SCATTER_GATHER, and _DRM_CONSISTENT. */
+       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+       const char *type;
+       int i;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+
+       DRM_PROC_PRINT("slot     offset       size type flags    "
+                      "address mtrr\n\n");
+       i = 0;
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               map = r_list->map;
+               if (!map)
+                       continue;
+               if (map->type < 0 || map->type > 5)
+                       type = "??";
+               else
+                       type = types[map->type];
+               DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+                              i,
+                              map->offset,
+                              map->size, type, map->flags,
+                              (unsigned long) r_list->user_token);
+
+               if (map->mtrr < 0) {
+                       DRM_PROC_PRINT("none\n");
+               } else {
+                       DRM_PROC_PRINT("%4d\n", map->mtrr);
+               }
+               i++;
+       }
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+/**
+ * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_vm_info(char *buf, char **start, off_t offset, int request,
+                      int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm__vm_info(buf, start, offset, request, eof, data);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ * Called when "/proc/dri/.../queues" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__queues_info(char *buf, char **start, off_t offset,
+                           int request, int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int len = 0;
+       int i;
+       struct drm_queue *q;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+
+       DRM_PROC_PRINT("  ctx/flags   use   fin"
+                      "   blk/rw/rwf  wait    flushed     queued"
+                      "      locks\n\n");
+       for (i = 0; i < dev->queue_count; i++) {
+               q = dev->queuelist[i];
+               atomic_inc(&q->use_count);
+               DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
+                                  "%5d/0x%03x %5d %5d"
+                                  " %5d/%c%c/%c%c%c %5Zd\n",
+                                  i,
+                                  q->flags,
+                                  atomic_read(&q->use_count),
+                                  atomic_read(&q->finalization),
+                                  atomic_read(&q->block_count),
+                                  atomic_read(&q->block_read) ? 'r' : '-',
+                                  atomic_read(&q->block_write) ? 'w' : '-',
+                                  waitqueue_active(&q->read_queue) ? 'r' : '-',
+                                  waitqueue_active(&q->
+                                                   write_queue) ? 'w' : '-',
+                                  waitqueue_active(&q->
+                                                   flush_queue) ? 'f' : '-',
+                                  DRM_BUFCOUNT(&q->waitlist));
+               atomic_dec(&q->use_count);
+       }
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+/**
+ * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_queues_info(char *buf, char **start, off_t offset, int request,
+                          int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm__queues_info(buf, start, offset, request, eof, data);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
+                         int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int len = 0;
+       struct drm_device_dma *dma = dev->dma;
+       int i;
+
+       if (!dma || offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+
+       DRM_PROC_PRINT(" o     size count  free  segs pages    kB\n\n");
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].buf_count)
+                       DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
+                                      i,
+                                      dma->bufs[i].buf_size,
+                                      dma->bufs[i].buf_count,
+                                      atomic_read(&dma->bufs[i]
+                                                  .freelist.count),
+                                      dma->bufs[i].seg_count,
+                                      dma->bufs[i].seg_count
+                                      * (1 << dma->bufs[i].page_order),
+                                      (dma->bufs[i].seg_count
+                                       * (1 << dma->bufs[i].page_order))
+                                      * PAGE_SIZE / 1024);
+       }
+       DRM_PROC_PRINT("\n");
+       for (i = 0; i < dma->buf_count; i++) {
+               if (i && !(i % 32))
+                       DRM_PROC_PRINT("\n");
+               DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
+       }
+       DRM_PROC_PRINT("\n");
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+/**
+ * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
+                        int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm__bufs_info(buf, start, offset, request, eof, data);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ * Called when "/proc/dri/.../objects" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__objects_info(char *buf, char **start, off_t offset, int request,
+                         int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int len = 0;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_fence_manager *fm = &dev->fm;
+       uint64_t used_mem;
+       uint64_t used_emer;
+       uint64_t low_mem;
+       uint64_t high_mem;
+       uint64_t emer_mem;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+
+       DRM_PROC_PRINT("Object accounting:\n\n");
+       if (fm->initialized) {
+               DRM_PROC_PRINT("Number of active fence objects: %d.\n",
+                              atomic_read(&fm->count));
+       } else {
+               DRM_PROC_PRINT("Fence objects are not supported by this driver\n");
+       }
+
+       if (bm->initialized) {
+               DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
+                              atomic_read(&bm->count));
+       }
+       DRM_PROC_PRINT("Memory accounting:\n\n");
+       if (bm->initialized) {
+               DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
+       } else {
+               DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
+       }
+
+       drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem);
+
+       if (used_mem > 16*PAGE_SIZE) {
+               DRM_PROC_PRINT("Used object memory is %lu pages.\n",
+                              (unsigned long) (used_mem >> PAGE_SHIFT));
+       } else {
+               DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
+                              (unsigned long) used_mem);
+       }
+       if (used_emer > 16*PAGE_SIZE) {
+               DRM_PROC_PRINT("Used emergency memory is %lu pages.\n",
+                              (unsigned long) (used_emer >> PAGE_SHIFT));
+       } else {
+               DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n",
+                              (unsigned long) used_emer);
+       }
+       DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
+                      (unsigned long) (low_mem >> PAGE_SHIFT));
+       DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
+                      (unsigned long) (high_mem >> PAGE_SHIFT));
+       DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n",
+                      (unsigned long) (emer_mem >> PAGE_SHIFT));
+
+       DRM_PROC_PRINT("\n");
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+/**
+ * Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_objects_info(char *buf, char **start, off_t offset, int request,
+                        int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm__objects_info(buf, start, offset, request, eof, data);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__clients_info(char *buf, char **start, off_t offset,
+                            int request, int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int len = 0;
+       struct drm_file *priv;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+
+       DRM_PROC_PRINT("a dev   pid    uid      magic     ioctls\n\n");
+       list_for_each_entry(priv, &dev->filelist, lhead) {
+               DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
+                              priv->authenticated ? 'y' : 'n',
+                              priv->minor,
+                              priv->pid,
+                              priv->uid, priv->magic, priv->ioctl_count);
+       }
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+/**
+ * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_clients_info(char *buf, char **start, off_t offset,
+                           int request, int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm__clients_info(buf, start, offset, request, eof, data);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+#if DRM_DEBUG_CODE
+
+static int drm__vma_info(char *buf, char **start, off_t offset, int request,
+                        int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int len = 0;
+       struct drm_vma_entry *pt;
+       struct vm_area_struct *vma;
+#if defined(__i386__)
+       unsigned int pgprot;
+#endif
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+
+       DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+                      atomic_read(&dev->vma_count),
+                      high_memory, virt_to_phys(high_memory));
+       list_for_each_entry(pt, &dev->vmalist, head) {
+               if (!(vma = pt->vma))
+                       continue;
+               DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+                              pt->pid,
+                              vma->vm_start,
+                              vma->vm_end,
+                              vma->vm_flags & VM_READ ? 'r' : '-',
+                              vma->vm_flags & VM_WRITE ? 'w' : '-',
+                              vma->vm_flags & VM_EXEC ? 'x' : '-',
+                              vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+                              vma->vm_flags & VM_LOCKED ? 'l' : '-',
+                              vma->vm_flags & VM_IO ? 'i' : '-',
+                              vma->vm_pgoff);
+
+#if defined(__i386__)
+               pgprot = pgprot_val(vma->vm_page_prot);
+               DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
+                              pgprot & _PAGE_PRESENT ? 'p' : '-',
+                              pgprot & _PAGE_RW ? 'w' : 'r',
+                              pgprot & _PAGE_USER ? 'u' : 's',
+                              pgprot & _PAGE_PWT ? 't' : 'b',
+                              pgprot & _PAGE_PCD ? 'u' : 'c',
+                              pgprot & _PAGE_ACCESSED ? 'a' : '-',
+                              pgprot & _PAGE_DIRTY ? 'd' : '-',
+                              pgprot & _PAGE_PSE ? 'm' : 'k',
+                              pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+               DRM_PROC_PRINT("\n");
+       }
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+static int drm_vma_info(char *buf, char **start, off_t offset, int request,
+                       int *eof, void *data)
+{
+       struct drm_device *dev = (struct drm_device *) data;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm__vma_info(buf, start, offset, request, eof, data);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_regman.c b/psb-kernel-source-4.41.1/drm_regman.c
new file mode 100644 (file)
index 0000000..aa11732
--- /dev/null
@@ -0,0 +1,200 @@
+/**************************************************************************
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * An allocate-fence manager implementation intended for sets of base-registers
+ * or tiling-registers.
+ */
+
+#include "drmP.h"
+
+/*
+ * Allocate a compatible register and put it on the unfenced list.
+ */
+
+int drm_regs_alloc(struct drm_reg_manager *manager,
+                  const void *data,
+                  uint32_t fence_class,
+                  uint32_t fence_type,
+                  int interruptible, int no_wait, struct drm_reg **reg)
+{
+       struct drm_reg *entry, *next_entry;
+       int ret;
+
+       *reg = NULL;
+
+       /*
+        * Search the unfenced list.
+        */
+
+       list_for_each_entry(entry, &manager->unfenced, head) {
+               if (manager->reg_reusable(entry, data)) {
+                       entry->new_fence_type |= fence_type;
+                       goto out;
+               }
+       }
+
+       /*
+        * Search the lru list.
+        */
+
+       list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
+               struct drm_fence_object *fence = entry->fence;
+               if (fence->fence_class == fence_class &&
+                   (entry->fence_type & fence_type) == entry->fence_type &&
+                   manager->reg_reusable(entry, data)) {
+                       list_del(&entry->head);
+                       entry->new_fence_type = fence_type;
+                       list_add_tail(&entry->head, &manager->unfenced);
+                       goto out;
+               }
+       }
+
+       /*
+        * Search the free list.
+        */
+
+       list_for_each_entry(entry, &manager->free, head) {
+               list_del(&entry->head);
+               entry->new_fence_type = fence_type;
+               list_add_tail(&entry->head, &manager->unfenced);
+               goto out;
+       }
+
+       if (no_wait)
+               return -EBUSY;
+
+       /*
+        * Go back to the lru list and try to expire fences.
+        */
+
+       list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
+               BUG_ON(!entry->fence);
+               ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
+                                           entry->fence_type);
+               if (ret)
+                       return ret;
+
+               drm_fence_usage_deref_unlocked(&entry->fence);
+               list_del(&entry->head);
+               entry->new_fence_type = fence_type;
+               list_add_tail(&entry->head, &manager->unfenced);
+               goto out;
+       }
+
+       /*
+        * Oops. All registers are used up :(.
+        */
+
+       return -EBUSY;
+out:
+       *reg = entry;
+       return 0;
+}
+EXPORT_SYMBOL(drm_regs_alloc);
+
+void drm_regs_fence(struct drm_reg_manager *manager,
+                   struct drm_fence_object *fence)
+{
+       struct drm_reg *entry;
+       struct drm_reg *next_entry;
+
+       if (!fence) {
+
+               /*
+                * Old fence (if any) is still valid.
+                * Put back on free and lru lists.
+                */
+
+               list_for_each_entry_safe_reverse(entry, next_entry,
+                                                &manager->unfenced, head) {
+                       list_del(&entry->head);
+                       list_add(&entry->head, (entry->fence) ?
+                                &manager->lru : &manager->free);
+               }
+       } else {
+
+               /*
+                * Fence with a new fence and put on lru list.
+                */
+
+               list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
+                                        head) {
+                       list_del(&entry->head);
+                       if (entry->fence)
+                               drm_fence_usage_deref_unlocked(&entry->fence);
+                       drm_fence_reference_unlocked(&entry->fence, fence);
+
+                       entry->fence_type = entry->new_fence_type;
+                       BUG_ON((entry->fence_type & fence->type) !=
+                              entry->fence_type);
+
+                       list_add_tail(&entry->head, &manager->lru);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_regs_fence);
+
+void drm_regs_free(struct drm_reg_manager *manager)
+{
+       struct drm_reg *entry;
+       struct drm_reg *next_entry;
+
+       drm_regs_fence(manager, NULL);
+
+       list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
+               list_del(&entry->head);
+               manager->reg_destroy(entry);
+       }
+
+       list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
+
+               (void)drm_fence_object_wait(entry->fence, 1, 1,
+                                           entry->fence_type);
+               list_del(&entry->head);
+               drm_fence_usage_deref_unlocked(&entry->fence);
+               manager->reg_destroy(entry);
+       }
+}
+EXPORT_SYMBOL(drm_regs_free);
+
+void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
+{
+       reg->fence = NULL;
+       list_add_tail(&reg->head, &manager->free);
+}
+EXPORT_SYMBOL(drm_regs_add);
+
+void drm_regs_init(struct drm_reg_manager *manager,
+                  int (*reg_reusable) (const struct drm_reg *, const void *),
+                  void (*reg_destroy) (struct drm_reg *))
+{
+       INIT_LIST_HEAD(&manager->free);
+       INIT_LIST_HEAD(&manager->lru);
+       INIT_LIST_HEAD(&manager->unfenced);
+       manager->reg_reusable = reg_reusable;
+       manager->reg_destroy = reg_destroy;
+}
+EXPORT_SYMBOL(drm_regs_init);
diff --git a/psb-kernel-source-4.41.1/drm_sarea.h b/psb-kernel-source-4.41.1/drm_sarea.h
new file mode 100644 (file)
index 0000000..8b67752
--- /dev/null
@@ -0,0 +1,84 @@
+/**
+ * \file drm_sarea.h
+ * \brief SAREA definitions
+ *
+ * \author Michel D�zer <michel@daenzer.net>
+ */
+
+/*
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_SAREA_H_
+#define _DRM_SAREA_H_
+
+#include "drm.h"
+
+/* SAREA area needs to be at least a page */
+#if defined(__alpha__)
+#define SAREA_MAX                       0x2000
+#elif defined(__ia64__)
+#define SAREA_MAX                       0x10000        /* 64kB */
+#else
+/* Intel 830M driver needs at least 8k SAREA */
+#define SAREA_MAX                       0x2000UL
+#endif
+
+/** Maximum number of drawables in the SAREA */
+#define SAREA_MAX_DRAWABLES            256
+
+#define SAREA_DRAWABLE_CLAIMED_ENTRY    0x80000000
+
+/** SAREA drawable */
+struct drm_sarea_drawable {
+       unsigned int stamp;
+       unsigned int flags;
+};
+
+/** SAREA frame */
+struct drm_sarea_frame {
+       unsigned int x;
+       unsigned int y;
+       unsigned int width;
+       unsigned int height;
+       unsigned int fullscreen;
+};
+
+/** SAREA */
+struct drm_sarea {
+    /** first thing is always the DRM locking structure */
+       struct drm_hw_lock lock;
+    /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
+       struct drm_hw_lock drawable_lock;
+       struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES];   /**< drawables */
+       struct drm_sarea_frame frame;   /**< frame */
+       drm_context_t dummy_context;
+};
+
+#ifndef __KERNEL__
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
+#endif
+
+#endif                         /* _DRM_SAREA_H_ */
diff --git a/psb-kernel-source-4.41.1/drm_scatter.c b/psb-kernel-source-4.41.1/drm_scatter.c
new file mode 100644 (file)
index 0000000..920b11c
--- /dev/null
@@ -0,0 +1,219 @@
+/**
+ * \file drm_scatter.c
+ * IOCTLs to manage scatter/gather memory
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+#define DEBUG_SCATTER 0
+
+void drm_sg_cleanup(struct drm_sg_mem *entry)
+{
+       struct page *page;
+       int i;
+
+       for (i = 0; i < entry->pages; i++) {
+               page = entry->pagelist[i];
+               if (page)
+                       ClearPageReserved(page);
+       }
+
+       vfree(entry->virtual);
+
+       drm_free(entry->busaddr,
+                entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
+       drm_free(entry->pagelist,
+                entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
+       drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
+}
+EXPORT_SYMBOL(drm_sg_cleanup);
+
+#ifdef _LP64
+# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+# define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+{
+       struct drm_sg_mem *entry;
+       unsigned long pages, i, j;
+
+       DRM_DEBUG("%s\n", __FUNCTION__);
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       if (dev->sg)
+               return -EINVAL;
+
+       entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
+       if (!entry)
+               return -ENOMEM;
+
+       memset(entry, 0, sizeof(*entry));
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
+
+       entry->pages = pages;
+       entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
+                                   DRM_MEM_PAGES);
+       if (!entry->pagelist) {
+               drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
+               return -ENOMEM;
+       }
+
+       memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
+
+       entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr),
+                                  DRM_MEM_PAGES);
+       if (!entry->busaddr) {
+               drm_free(entry->pagelist,
+                        entry->pages * sizeof(*entry->pagelist),
+                        DRM_MEM_PAGES);
+               drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
+               return -ENOMEM;
+       }
+       memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
+
+       entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
+       if (!entry->virtual) {
+               drm_free(entry->busaddr,
+                        entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
+               drm_free(entry->pagelist,
+                        entry->pages * sizeof(*entry->pagelist),
+                        DRM_MEM_PAGES);
+               drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
+               return -ENOMEM;
+       }
+
+       /* This also forces the mapping of COW pages, so our page list
+        * will be valid.  Please don't remove it...
+        */
+       memset(entry->virtual, 0, pages << PAGE_SHIFT);
+
+       entry->handle = ScatterHandle((unsigned long)entry->virtual);
+
+       DRM_DEBUG("sg alloc handle  = %08lx\n", entry->handle);
+       DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
+
+       for (i = (unsigned long)entry->virtual, j = 0; j < pages;
+            i += PAGE_SIZE, j++) {
+               entry->pagelist[j] = vmalloc_to_page((void *)i);
+               if (!entry->pagelist[j])
+                       goto failed;
+               SetPageReserved(entry->pagelist[j]);
+       }
+
+       request->handle = entry->handle;
+
+       dev->sg = entry;
+
+#if DEBUG_SCATTER
+       /* Verify that each page points to its virtual address, and vice
+        * versa.
+        */
+       {
+               int error = 0;
+
+               for (i = 0; i < pages; i++) {
+                       unsigned long *tmp;
+
+                       tmp = page_address(entry->pagelist[i]);
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               *tmp = 0xcafebabe;
+                       }
+                       tmp = (unsigned long *)((u8 *) entry->virtual +
+                                               (PAGE_SIZE * i));
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               if (*tmp != 0xcafebabe && error == 0) {
+                                       error = 1;
+                                       DRM_ERROR("Scatter allocation error, "
+                                                 "pagelist does not match "
+                                                 "virtual mapping\n");
+                               }
+                       }
+                       tmp = page_address(entry->pagelist[i]);
+                       for (j = 0;
+                            j < PAGE_SIZE / sizeof(unsigned long);
+                            j++, tmp++) {
+                               *tmp = 0;
+                       }
+               }
+               if (error == 0)
+                       DRM_ERROR("Scatter allocation matches pagelist\n");
+       }
+#endif
+
+       return 0;
+
+      failed:
+       drm_sg_cleanup(entry);
+       return -ENOMEM;
+
+}
+EXPORT_SYMBOL(drm_sg_alloc);
+
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_scatter_gather *request = data;
+
+       return drm_sg_alloc(dev, request);
+
+}
+
+int drm_sg_free(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
+{
+       struct drm_scatter_gather *request = data;
+       struct drm_sg_mem *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_SG))
+               return -EINVAL;
+
+       entry = dev->sg;
+       dev->sg = NULL;
+
+       if (!entry || entry->handle != request->handle)
+               return -EINVAL;
+
+       DRM_DEBUG("sg free virtual  = %p\n", entry->virtual);
+
+       drm_sg_cleanup(entry);
+
+       return 0;
+}
diff --git a/psb-kernel-source-4.41.1/drm_sman.c b/psb-kernel-source-4.41.1/drm_sman.c
new file mode 100644 (file)
index 0000000..8421a93
--- /dev/null
@@ -0,0 +1,353 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple memory manager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drm_sman.h"
+
+struct drm_owner_item {
+       struct drm_hash_item owner_hash;
+       struct list_head sman_list;
+       struct list_head mem_blocks;
+};
+
+void drm_sman_takedown(struct drm_sman * sman)
+{
+       drm_ht_remove(&sman->user_hash_tab);
+       drm_ht_remove(&sman->owner_hash_tab);
+       if (sman->mm)
+               drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
+                        DRM_MEM_MM);
+}
+
+EXPORT_SYMBOL(drm_sman_takedown);
+
+int
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+             unsigned int user_order, unsigned int owner_order)
+{
+       int ret = 0;
+
+       sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm),
+                                               DRM_MEM_MM);
+       if (!sman->mm) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       sman->num_managers = num_managers;
+       INIT_LIST_HEAD(&sman->owner_items);
+       ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
+       if (ret)
+               goto out1;
+       ret = drm_ht_create(&sman->user_hash_tab, user_order);
+       if (!ret)
+               goto out;
+
+       drm_ht_remove(&sman->owner_hash_tab);
+out1:
+       drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
+out:
+       return ret;
+}
+
+EXPORT_SYMBOL(drm_sman_init);
+
+static void *drm_sman_mm_allocate(void *private, unsigned long size,
+                                 unsigned alignment)
+{
+       struct drm_mm *mm = (struct drm_mm *) private;
+       struct drm_mm_node *tmp;
+
+       tmp = drm_mm_search_free(mm, size, alignment, 1);
+       if (!tmp) {
+               return NULL;
+       }
+       tmp = drm_mm_get_block(tmp, size, alignment);
+       return tmp;
+}
+
+static void drm_sman_mm_free(void *private, void *ref)
+{
+       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+
+       drm_mm_put_block(node);
+}
+
+static void drm_sman_mm_destroy(void *private)
+{
+       struct drm_mm *mm = (struct drm_mm *) private;
+       drm_mm_takedown(mm);
+       drm_free(mm, sizeof(*mm), DRM_MEM_MM);
+}
+
+static unsigned long drm_sman_mm_offset(void *private, void *ref)
+{
+       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+       return node->start;
+}
+
+int
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+                  unsigned long start, unsigned long size)
+{
+       struct drm_sman_mm *sman_mm;
+       struct drm_mm *mm;
+       int ret;
+
+       BUG_ON(manager >= sman->num_managers);
+
+       sman_mm = &sman->mm[manager];
+       mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM);
+       if (!mm) {
+               return -ENOMEM;
+       }
+       sman_mm->private = mm;
+       ret = drm_mm_init(mm, start, size);
+
+       if (ret) {
+               drm_free(mm, sizeof(*mm), DRM_MEM_MM);
+               return ret;
+       }
+
+       sman_mm->allocate = drm_sman_mm_allocate;
+       sman_mm->free = drm_sman_mm_free;
+       sman_mm->destroy = drm_sman_mm_destroy;
+       sman_mm->offset = drm_sman_mm_offset;
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_set_range);
+
+int
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+                    struct drm_sman_mm * allocator)
+{
+       BUG_ON(manager >= sman->num_managers);
+       sman->mm[manager] = *allocator;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_sman_set_manager);
+
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
+                                                unsigned long owner)
+{
+       int ret;
+       struct drm_hash_item *owner_hash_item;
+       struct drm_owner_item *owner_item;
+
+       ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
+       if (!ret) {
+               return drm_hash_entry(owner_hash_item, struct drm_owner_item,
+                                     owner_hash);
+       }
+
+       owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM);
+       if (!owner_item)
+               goto out;
+
+       INIT_LIST_HEAD(&owner_item->mem_blocks);
+       owner_item->owner_hash.key = owner;
+       if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
+               goto out1;
+
+       list_add_tail(&owner_item->sman_list, &sman->owner_items);
+       return owner_item;
+
+out1:
+       drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
+out:
+       return NULL;
+}
+
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
+                                   unsigned long size, unsigned alignment,
+                                   unsigned long owner)
+{
+       void *tmp;
+       struct drm_sman_mm *sman_mm;
+       struct drm_owner_item *owner_item;
+       struct drm_memblock_item *memblock;
+
+       BUG_ON(manager >= sman->num_managers);
+
+       sman_mm = &sman->mm[manager];
+       tmp = sman_mm->allocate(sman_mm->private, size, alignment);
+
+       if (!tmp) {
+               return NULL;
+       }
+
+       memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM);
+
+       if (!memblock)
+               goto out;
+
+       memblock->mm_info = tmp;
+       memblock->mm = sman_mm;
+       memblock->sman = sman;
+
+       if (drm_ht_just_insert_please
+           (&sman->user_hash_tab, &memblock->user_hash,
+            (unsigned long)memblock, 32, 0, 0))
+               goto out1;
+
+       owner_item = drm_sman_get_owner_item(sman, owner);
+       if (!owner_item)
+               goto out2;
+
+       list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
+
+       return memblock;
+
+out2:
+       drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
+out1:
+       drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
+out:
+       sman_mm->free(sman_mm->private, tmp);
+
+       return NULL;
+}
+
+EXPORT_SYMBOL(drm_sman_alloc);
+
+static void drm_sman_free(struct drm_memblock_item *item)
+{
+       struct drm_sman *sman = item->sman;
+
+       list_del(&item->owner_list);
+       drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
+       item->mm->free(item->mm->private, item->mm_info);
+       drm_free(item, sizeof(*item), DRM_MEM_MM);
+}
+
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
+{
+       struct drm_hash_item *hash_item;
+       struct drm_memblock_item *memblock_item;
+
+       if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
+               return -EINVAL;
+
+       memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+                                      user_hash);
+       drm_sman_free(memblock_item);
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_free_key);
+
+static void drm_sman_remove_owner(struct drm_sman *sman,
+                                 struct drm_owner_item *owner_item)
+{
+       list_del(&owner_item->sman_list);
+       drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
+       drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
+}
+
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
+{
+
+       struct drm_hash_item *hash_item;
+       struct drm_owner_item *owner_item;
+
+       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+               return -1;
+       }
+
+       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+       if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
+               drm_sman_remove_owner(sman, owner_item);
+               return -1;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_owner_clean);
+
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+                                     struct drm_owner_item *owner_item)
+{
+       struct drm_memblock_item *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
+                                owner_list) {
+               drm_sman_free(entry);
+       }
+       drm_sman_remove_owner(sman, owner_item);
+}
+
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
+{
+
+       struct drm_hash_item *hash_item;
+       struct drm_owner_item *owner_item;
+
+       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+
+               return;
+       }
+
+       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+       drm_sman_do_owner_cleanup(sman, owner_item);
+}
+
+EXPORT_SYMBOL(drm_sman_owner_cleanup);
+
+void drm_sman_cleanup(struct drm_sman *sman)
+{
+       struct drm_owner_item *entry, *next;
+       unsigned int i;
+       struct drm_sman_mm *sman_mm;
+
+       list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
+               drm_sman_do_owner_cleanup(sman, entry);
+       }
+       if (sman->mm) {
+               for (i = 0; i < sman->num_managers; ++i) {
+                       sman_mm = &sman->mm[i];
+                       if (sman_mm->private) {
+                               sman_mm->destroy(sman_mm->private);
+                               sman_mm->private = NULL;
+                       }
+               }
+       }
+}
+
+EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/psb-kernel-source-4.41.1/drm_sman.h b/psb-kernel-source-4.41.1/drm_sman.h
new file mode 100644 (file)
index 0000000..39a39fe
--- /dev/null
@@ -0,0 +1,176 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple memory MANager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef DRM_SMAN_H
+#define DRM_SMAN_H
+
+#include "drmP.h"
+#include "drm_hashtab.h"
+
+/*
+ * A class that is an abstration of a simple memory allocator.
+ * The sman implementation provides a default such allocator
+ * using the drm_mm.c implementation. But the user can replace it.
+ * See the SiS implementation, which may use the SiS FB kernel module
+ * for memory management.
+ */
+
+struct drm_sman_mm {
+       /* private info. If allocated, needs to be destroyed by the destroy
+          function */
+       void *private;
+
+       /* Allocate a memory block with given size and alignment.
+          Return an opaque reference to the memory block */
+
+       void *(*allocate) (void *private, unsigned long size,
+                          unsigned alignment);
+
+       /* Free a memory block. "ref" is the opaque reference that we got from
+          the "alloc" function */
+
+       void (*free) (void *private, void *ref);
+
+       /* Free all resources associated with this allocator */
+
+       void (*destroy) (void *private);
+
+       /* Return a memory offset from the opaque reference returned from the
+          "alloc" function */
+
+       unsigned long (*offset) (void *private, void *ref);
+};
+
+struct drm_memblock_item {
+       struct list_head owner_list;
+       struct drm_hash_item user_hash;
+       void *mm_info;
+       struct drm_sman_mm *mm;
+       struct drm_sman *sman;
+};
+
+struct drm_sman {
+       struct drm_sman_mm *mm;
+       int num_managers;
+       struct drm_open_hash owner_hash_tab;
+       struct drm_open_hash user_hash_tab;
+       struct list_head owner_items;
+};
+
+/*
+ * Take down a memory manager. This function should only be called after a
+ * successful init and after a call to drm_sman_cleanup.
+ */
+
+extern void drm_sman_takedown(struct drm_sman * sman);
+
+/*
+ * Allocate structures for a manager.
+ * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
+ * user_order is the log2 of the number of buckets in the user hash table.
+ *         set this to approximately log2 of the max number of memory regions
+ *         that will be allocated for _all_ pools together.
+ * owner_order is the log2 of the number of buckets in the owner hash table.
+ *         set this to approximately log2 of
+ *         the number of client file connections that will
+ *         be using the manager.
+ *
+ */
+
+extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+                        unsigned int user_order, unsigned int owner_order);
+
+/*
+ * Initialize a drm_mm.c allocator. Should be called only once for each
+ * manager unless a customized allogator is used.
+ */
+
+extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+                             unsigned long start, unsigned long size);
+
+/*
+ * Initialize a customized allocator for one of the managers.
+ * (See the SiS module). The object pointed to by "allocator" is copied,
+ * so it can be destroyed after this call.
+ */
+
+extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
+                               struct drm_sman_mm * allocator);
+
+/*
+ * Allocate a memory block. Aligment is not implemented yet.
+ */
+
+extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
+                                               unsigned int manager,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long owner);
+/*
+ * Free a memory block identified by its user hash key.
+ */
+
+extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
+
+/*
+ * returns 1 iff there are no stale memory blocks associated with this owner.
+ * Typically called to determine if we need to idle the hardware and call
+ * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
+ * resources associated with owner.
+ */
+
+extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
+
+/*
+ * Frees all stale memory blocks associated with this owner. Note that this
+ * requires that the hardware is finished with all blocks, so the graphics engine
+ * should be idled before this call is made. This function also frees
+ * any resources associated with "owner" and should be called when owner
+ * is not going to be referenced anymore.
+ */
+
+extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
+
+/*
+ * Frees all stale memory blocks associated with the memory manager.
+ * See idling above.
+ */
+
+extern void drm_sman_cleanup(struct drm_sman * sman);
+
+#endif
diff --git a/psb-kernel-source-4.41.1/drm_stub.c b/psb-kernel-source-4.41.1/drm_stub.c
new file mode 100644 (file)
index 0000000..cc759d5
--- /dev/null
@@ -0,0 +1,325 @@
+/**
+ * \file drm_stub.c
+ * Stub support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "drmP.h"
+#include "drm_core.h"
+
+unsigned int drm_cards_limit = 16;     /* Enough for one machine */
+unsigned int drm_debug = 0;            /* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
+
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
+MODULE_PARM_DESC(debug, "Enable debug output");
+
+module_param_named(cards_limit, drm_cards_limit, int, 0444);
+module_param_named(debug, drm_debug, int, 0600);
+
+struct drm_head **drm_heads;
+struct class *drm_class;
+struct proc_dir_entry *drm_proc_root;
+
+static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+                          const struct pci_device_id *ent,
+                          struct drm_driver *driver)
+{
+       int retcode;
+
+       INIT_LIST_HEAD(&dev->filelist);
+       INIT_LIST_HEAD(&dev->ctxlist);
+       INIT_LIST_HEAD(&dev->vmalist);
+       INIT_LIST_HEAD(&dev->maplist);
+
+       spin_lock_init(&dev->count_lock);
+       spin_lock_init(&dev->drw_lock);
+       spin_lock_init(&dev->tasklet_lock);
+       spin_lock_init(&dev->lock.spinlock);
+       init_timer(&dev->timer);
+       mutex_init(&dev->struct_mutex);
+       mutex_init(&dev->ctxlist_mutex);
+       mutex_init(&dev->bm.evict_mutex);
+
+       idr_init(&dev->drw_idr);
+
+       dev->pdev = pdev;
+       dev->pci_device = pdev->device;
+       dev->pci_vendor = pdev->vendor;
+
+#ifdef __alpha__
+       dev->hose = pdev->sysdata;
+#endif
+       dev->irq = pdev->irq;
+       dev->irq_enabled = 0;
+
+       if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
+               return -ENOMEM;
+
+       if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+                       DRM_FILE_PAGE_OFFSET_SIZE)) {
+               drm_ht_remove(&dev->map_hash);
+               return -ENOMEM;
+       }
+
+       if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
+               drm_ht_remove(&dev->map_hash);
+               drm_mm_takedown(&dev->offset_manager);
+               return -ENOMEM;
+       }
+
+       /* the DRM has 6 counters */
+       dev->counters = 6;
+       dev->types[0] = _DRM_STAT_LOCK;
+       dev->types[1] = _DRM_STAT_OPENS;
+       dev->types[2] = _DRM_STAT_CLOSES;
+       dev->types[3] = _DRM_STAT_IOCTLS;
+       dev->types[4] = _DRM_STAT_LOCKS;
+       dev->types[5] = _DRM_STAT_UNLOCKS;
+
+       dev->driver = driver;
+
+       if (drm_core_has_AGP(dev)) {
+               if (drm_device_is_agp(dev))
+                       dev->agp = drm_agp_init(dev);
+               if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+                   && (dev->agp == NULL)) {
+                       DRM_ERROR("Cannot initialize the agpgart module.\n");
+                       retcode = -EINVAL;
+                       goto error_out_unreg;
+               }
+
+               if (drm_core_has_MTRR(dev)) {
+                       if (dev->agp)
+                               dev->agp->agp_mtrr =
+                                   mtrr_add(dev->agp->agp_info.aper_base,
+                                            dev->agp->agp_info.aper_size *
+                                            1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+               }
+       }
+
+       if (dev->driver->load)
+               if ((retcode = dev->driver->load(dev, ent->driver_data)))
+                       goto error_out_unreg;
+
+       retcode = drm_ctxbitmap_init(dev);
+       if (retcode) {
+               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+               goto error_out_unreg;
+       }
+
+       drm_fence_manager_init(dev);
+       return 0;
+
+error_out_unreg:
+       drm_lastclose(dev);
+       return retcode;
+}
+
+/**
+ * Get a secondary minor number.
+ *
+ * \param dev device data structure
+ * \param sec-minor structure to hold the assigned minor
+ * \return negative number on failure.
+ *
+ * Search an empty entry and initialize it to the given parameters, and
+ * create the proc init entry via proc_init(). This routines assigns
+ * minor numbers to secondary heads of multi-headed cards
+ */
+static int drm_get_head(struct drm_device * dev, struct drm_head * head)
+{
+       struct drm_head **heads = drm_heads;
+       int ret;
+       int minor;
+
+       DRM_DEBUG("\n");
+
+       for (minor = 0; minor < drm_cards_limit; minor++, heads++) {
+               if (!*heads) {
+
+                       *head = (struct drm_head) {
+                               .dev = dev,
+                               .device = MKDEV(DRM_MAJOR, minor),
+                               .minor = minor,
+                       };
+                       if ((ret =
+                            drm_proc_init(dev, minor, drm_proc_root,
+                                          &head->dev_root))) {
+                               printk(KERN_ERR
+                                      "DRM: Failed to initialize /proc/dri.\n");
+                               goto err_g1;
+                       }
+
+                       ret = drm_sysfs_device_add(dev, head);
+                       if (ret) {
+                               printk(KERN_ERR
+                                      "DRM: Error sysfs_device_add.\n");
+                               goto err_g2;
+                       }
+                       *heads = head;
+
+                       DRM_DEBUG("new minor assigned %d\n", minor);
+                       return 0;
+               }
+       }
+       DRM_ERROR("out of minors\n");
+       return -ENOMEM;
+err_g2:
+       drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
+err_g1:
+       *head = (struct drm_head) {
+               .dev = NULL};
+       return ret;
+}
+
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+               struct drm_driver *driver)
+{
+       struct drm_device *dev;
+       int ret;
+
+       DRM_DEBUG("\n");
+
+       dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB);
+       if (!dev)
+               return -ENOMEM;
+
+       if (!drm_fb_loaded) {
+               pci_set_drvdata(pdev, dev);
+               ret = pci_request_regions(pdev, driver->pci_driver.name);
+               if (ret)
+                       goto err_g1;
+       }
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto err_g2;
+       pci_set_master(pdev);
+
+       if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
+               printk(KERN_ERR "DRM: fill_in_dev failed\n");
+               goto err_g3;
+       }
+       if ((ret = drm_get_head(dev, &dev->primary)))
+               goto err_g3;
+
+       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+                driver->name, driver->major, driver->minor, driver->patchlevel,
+                driver->date, dev->primary.minor);
+
+       return 0;
+
+ err_g3:
+       if (!drm_fb_loaded)
+               pci_disable_device(pdev);
+ err_g2:
+       if (!drm_fb_loaded)
+               pci_release_regions(pdev);
+ err_g1:
+       if (!drm_fb_loaded)
+               pci_set_drvdata(pdev, NULL);
+
+       drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
+       printk(KERN_ERR "DRM: drm_get_dev failed.\n");
+       return ret;
+}
+EXPORT_SYMBOL(drm_get_dev);
+
+
+/**
+ * Put a device minor number.
+ *
+ * \param dev device data structure
+ * \return always zero
+ *
+ * Cleans up the proc resources. If it is the last minor then release the foreign
+ * "drm" data, otherwise unregisters the "drm" data, frees the dev list and
+ * unregisters the character device.
+ */
+int drm_put_dev(struct drm_device * dev)
+{
+       DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
+
+       if (dev->unique) {
+               drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
+               dev->unique = NULL;
+               dev->unique_len = 0;
+       }
+       if (dev->devname) {
+               drm_free(dev->devname, strlen(dev->devname) + 1,
+                        DRM_MEM_DRIVER);
+               dev->devname = NULL;
+       }
+       drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
+       return 0;
+}
+
+/**
+ * Put a secondary minor number.
+ *
+ * \param sec_minor - structure to be released
+ * \return always zero
+ *
+ * Cleans up the proc resources. Not legal for this to be the
+ * last minor released.
+ *
+ */
+int drm_put_head(struct drm_head * head)
+{
+       int minor = head->minor;
+
+       DRM_DEBUG("release secondary minor %d\n", minor);
+
+       drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
+       drm_sysfs_device_remove(head->dev);
+
+       *head = (struct drm_head) {.dev = NULL};
+
+       drm_heads[minor] = NULL;
+       return 0;
+}
diff --git a/psb-kernel-source-4.41.1/drm_sysfs.c b/psb-kernel-source-4.41.1/drm_sysfs.c
new file mode 100644 (file)
index 0000000..acd1d04
--- /dev/null
@@ -0,0 +1,212 @@
+
+/*
+ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
+ *               extra sysfs attribute from DRM. Normal drm_sysfs_class
+ *               does not allow adding attributes.
+ *
+ * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2003-2004 IBM Corp.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include "drm_core.h"
+#include "drmP.h"
+
+#define to_drm_device(d) container_of(d, struct drm_device, dev)
+
+/**
+ * drm_sysfs_suspend - DRM class suspend hook
+ * @dev: Linux device to suspend
+ * @state: power state to enter
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its suspend hook, if present.
+ */
+static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
+{
+       struct drm_device *drm_dev = to_drm_device(dev);
+
+       printk(KERN_ERR "%s\n", __FUNCTION__);
+
+       if (drm_dev->driver->suspend)
+               return drm_dev->driver->suspend(drm_dev);
+
+       return 0;
+}
+
+/**
+ * drm_sysfs_resume - DRM class resume hook
+ * @dev: Linux device to resume
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its resume hook, if present.
+ */
+static int drm_sysfs_resume(struct device *dev)
+{
+       struct drm_device *drm_dev = to_drm_device(dev);
+
+       if (drm_dev->driver->resume)
+               return drm_dev->driver->resume(drm_dev);
+
+       return 0;
+}
+
+/* Display the version of drm_core. This doesn't work right in current design */
+static ssize_t version_show(struct class *dev, char *buf)
+{
+       return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
+                      CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+}
+
+static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
+
+/**
+ * drm_sysfs_create - create a struct drm_sysfs_class structure
+ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
+ * @name: pointer to a string for the name of this class.
+ *
+ * This is used to create DRM class pointer that can then be used
+ * in calls to drm_sysfs_device_add().
+ *
+ * Note, the pointer created here is to be destroyed when finished by making a
+ * call to drm_sysfs_destroy().
+ */
+struct class *drm_sysfs_create(struct module *owner, char *name)
+{
+       struct class *class;
+       int err;
+
+       class = class_create(owner, name);
+       if (IS_ERR(class)) {
+               err = PTR_ERR(class);
+               goto err_out;
+       }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
+       class->suspend = drm_sysfs_suspend;
+       class->resume = drm_sysfs_resume;
+#endif
+
+       err = class_create_file(class, &class_attr_version);
+       if (err)
+               goto err_out_class;
+
+       return class;
+
+err_out_class:
+       class_destroy(class);
+err_out:
+       return ERR_PTR(err);
+}
+
+/**
+ * drm_sysfs_destroy - destroys DRM class
+ *
+ * Destroy the DRM device class.
+ */
+void drm_sysfs_destroy(void)
+{
+       if ((drm_class == NULL) || (IS_ERR(drm_class)))
+               return;
+       class_remove_file(drm_class, &class_attr_version);
+       class_destroy(drm_class);
+}
+
+static ssize_t show_dri(struct device *device, struct device_attribute *attr,
+                       char *buf)
+{
+       struct drm_device *dev = to_drm_device(device);
+       if (dev->driver->dri_library_name)
+               return dev->driver->dri_library_name(dev, buf);
+       return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
+}
+
+static struct device_attribute device_attrs[] = {
+       __ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
+};
+
+/**
+ * drm_sysfs_device_release - do nothing
+ * @dev: Linux device
+ *
+ * Normally, this would free the DRM device associated with @dev, along
+ * with cleaning up any other stuff.  But we do that in the DRM core, so
+ * this function can just return and hope that the core does its job.
+ */
+static void drm_sysfs_device_release(struct device *dev)
+{
+       return;
+}
+
+/**
+ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+ * @dev: DRM device to be added
+ * @head: DRM head in question
+ *
+ * Add a DRM device to the DRM's device model class.  We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
+ */
+int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
+{
+       int err;
+       int i, j;
+
+       dev->dev.parent = &dev->pdev->dev;
+       dev->dev.class = drm_class;
+       dev->dev.release = drm_sysfs_device_release;
+       /*
+        * This will actually add the major:minor file so that udev
+        * will create the device node.  We don't want to do that just
+        * yet...
+        */
+       dev->dev.devt = head->device;
+       snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor);
+
+       err = device_register(&dev->dev);
+       if (err) {
+               DRM_ERROR("device add failed: %d\n", err);
+               goto err_out;
+       }
+
+/*     for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+               err = device_create_file(&dev->dev, &device_attrs[i]);
+               if (err)
+                       goto err_out_files;
+       }
+
+       return 0;
+
+err_out_files:
+       if (i > 0)
+               for (j = 0; j < i; j++)
+                       device_remove_file(&dev->dev, &device_attrs[i]);
+       device_unregister(&dev->dev);
+*/
+err_out:
+
+       return err;
+}
+
+/**
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
+ *
+ * This call unregisters and cleans up a class device that was created with a
+ * call to drm_sysfs_device_add()
+ */
+void drm_sysfs_device_remove(struct drm_device *dev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+               device_remove_file(&dev->dev, &device_attrs[i]);
+       device_unregister(&dev->dev);
+}
diff --git a/psb-kernel-source-4.41.1/drm_ttm.c b/psb-kernel-source-4.41.1/drm_ttm.c
new file mode 100644 (file)
index 0000000..e0afb04
--- /dev/null
@@ -0,0 +1,430 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+static void drm_ttm_ipi_handler(void *null)
+{
+       flush_agp_cache();
+}
+
+void drm_ttm_cache_flush(void)
+{
+  #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+  if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1) != 0) 
+  #else 
+  if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
+  #endif
+               DRM_ERROR("Timed out waiting for drm cache flush.\n");
+}
+EXPORT_SYMBOL(drm_ttm_cache_flush);
+
+/*
+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
+ */
+
+static void ttm_alloc_pages(struct drm_ttm *ttm)
+{
+       unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
+       ttm->pages = NULL;
+
+       if (size <= PAGE_SIZE)
+               ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
+
+       if (!ttm->pages) {
+               ttm->pages = vmalloc_user(size);
+               if (ttm->pages)
+                       ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
+       }
+}
+
+static void ttm_free_pages(struct drm_ttm *ttm)
+{
+       unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
+
+       if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
+               vfree(ttm->pages);
+               ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
+       } else {
+               drm_free(ttm->pages, size, DRM_MEM_TTM);
+       }
+       ttm->pages = NULL;
+}
+
+static struct page *drm_ttm_alloc_page(void)
+{
+       struct page *page;
+
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+       if (!page)
+               return NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+       SetPageReserved(page);
+#endif
+       return page;
+}
+
+/*
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
+ */
+
+static int drm_set_caching(struct drm_ttm *ttm, int noncached)
+{
+       int i;
+       struct page **cur_page;
+       int do_tlbflush = 0;
+
+       if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
+               return 0;
+
+       if (noncached)
+               drm_ttm_cache_flush();
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               cur_page = ttm->pages + i;
+               if (*cur_page) {
+                       if (!PageHighMem(*cur_page)) {
+                               if (noncached) {
+                                       map_page_into_agp(*cur_page);
+                               } else {
+                                       unmap_page_from_agp(*cur_page);
+                               }
+                               do_tlbflush = 1;
+                       }
+               }
+       }
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
+       if (do_tlbflush)
+               flush_agp_mappings();
+#endif
+
+       DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
+
+       return 0;
+}
+
+
+static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
+{
+       int write;
+       int dirty;
+       struct page *page;
+       int i;
+
+       BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
+       write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
+       dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               page = ttm->pages[i];
+               if (page == NULL)
+                       continue;
+
+               if (page == ttm->dummy_read_page) {
+                       BUG_ON(write);
+                       continue;
+               }
+
+               if (write && dirty && !PageReserved(page))
+                       set_page_dirty_lock(page);
+
+               ttm->pages[i] = NULL;
+               put_page(page);
+       }
+}
+
+static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
+{
+       int i;
+       struct drm_buffer_manager *bm = &ttm->dev->bm;
+       struct page **cur_page;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               cur_page = ttm->pages + i;
+               if (*cur_page) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+                       ClearPageReserved(*cur_page);
+#endif
+                       if (page_count(*cur_page) != 1)
+                               DRM_ERROR("Erroneous page count. Leaking pages.\n");
+                       if (page_mapped(*cur_page))
+                               DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
+                       __free_page(*cur_page);
+                       --bm->cur_pages;
+               }
+       }
+}
+
+/*
+ * Free all resources associated with a ttm.
+ */
+
+int drm_destroy_ttm(struct drm_ttm *ttm)
+{
+       struct drm_ttm_backend *be;
+
+       if (!ttm)
+               return 0;
+
+       be = ttm->be;
+       if (be) {
+               be->func->destroy(be);
+               ttm->be = NULL;
+       }
+
+       if (ttm->pages) {
+               if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
+                       drm_set_caching(ttm, 0);
+
+               if (ttm->page_flags & DRM_TTM_PAGE_USER)
+                       drm_ttm_free_user_pages(ttm);
+               else
+                       drm_ttm_free_alloced_pages(ttm);
+
+               ttm_free_pages(ttm);
+       }
+
+       return 0;
+}
+
+struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
+{
+       struct page *p;
+       struct drm_buffer_manager *bm = &ttm->dev->bm;
+
+       p = ttm->pages[index];
+       if (!p) {
+               p = drm_ttm_alloc_page();
+               if (!p)
+                       return NULL;
+               ttm->pages[index] = p;
+               ++bm->cur_pages;
+       }
+       return p;
+}
+EXPORT_SYMBOL(drm_ttm_get_page);
+
+int drm_ttm_set_user(struct drm_ttm *ttm,
+                    struct task_struct *tsk,
+                    int write,
+                    unsigned long start,
+                    unsigned long num_pages,
+                    struct page *dummy_read_page)
+{
+       struct mm_struct *mm = tsk->mm;
+       int ret;
+       int i;
+
+       BUG_ON(num_pages != ttm->num_pages);
+
+       ttm->dummy_read_page = dummy_read_page;
+       ttm->page_flags |= DRM_TTM_PAGE_USER |
+               ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
+
+
+       down_read(&mm->mmap_sem);
+       ret = get_user_pages(tsk, mm, start, num_pages,
+                            write, 0, ttm->pages, NULL);
+       up_read(&mm->mmap_sem);
+
+       if (ret != num_pages && write) {
+               drm_ttm_free_user_pages(ttm);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < num_pages; ++i) {
+               if (ttm->pages[i] == NULL)
+                       ttm->pages[i] = ttm->dummy_read_page;
+       }
+
+       return 0;
+}
+
+int drm_ttm_populate(struct drm_ttm *ttm)
+{
+       struct page *page;
+       unsigned long i;
+       struct drm_ttm_backend *be;
+
+       if (ttm->state != ttm_unpopulated)
+               return 0;
+
+       be = ttm->be;
+       for (i = 0; i < ttm->num_pages; ++i) {
+               page = drm_ttm_get_page(ttm, i);
+               if (!page)
+                       return -ENOMEM;
+       }
+       be->func->populate(be, ttm->num_pages, ttm->pages);
+       ttm->state = ttm_unbound;
+       return 0;
+}
+
+/*
+ * Calculate the estimated pinned memory usage of a ttm.
+ */
+
+unsigned long drm_ttm_size(struct drm_device *dev,
+                          unsigned long num_pages,
+                          int user_bo)
+{
+       struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
+       unsigned long tmp;
+
+       tmp = drm_size_align(sizeof(struct drm_ttm)) +
+               drm_size_align(num_pages * sizeof(struct page *)) +
+               ((user_bo) ? 0 : drm_size_align(num_pages * PAGE_SIZE));
+
+       if (bo_driver->backend_size)
+               tmp += bo_driver->backend_size(dev, num_pages);
+       else
+               tmp += drm_size_align(num_pages * sizeof(struct page *)) +
+                       3*drm_size_align(sizeof(struct drm_ttm_backend));
+       return tmp;
+}
+
+
+/*
+ * Initialize a ttm.
+ */
+
+struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
+{
+       struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
+       struct drm_ttm *ttm;
+
+       if (!bo_driver)
+               return NULL;
+
+       ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
+       if (!ttm)
+               return NULL;
+
+       ttm->dev = dev;
+       atomic_set(&ttm->vma_count, 0);
+
+       ttm->destroy = 0;
+       ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       ttm->page_flags = 0;
+
+       /*
+        * Account also for AGP module memory usage.
+        */
+
+       ttm_alloc_pages(ttm);
+       if (!ttm->pages) {
+               drm_destroy_ttm(ttm);
+               DRM_ERROR("Failed allocating page table\n");
+               return NULL;
+       }
+       ttm->be = bo_driver->create_ttm_backend_entry(dev);
+       if (!ttm->be) {
+               drm_destroy_ttm(ttm);
+               DRM_ERROR("Failed creating ttm backend entry\n");
+               return NULL;
+       }
+       ttm->state = ttm_unpopulated;
+       return ttm;
+}
+
+/*
+ * Unbind a ttm region from the aperture.
+ */
+
+void drm_ttm_evict(struct drm_ttm *ttm)
+{
+       struct drm_ttm_backend *be = ttm->be;
+       int ret;
+
+       if (ttm->state == ttm_bound) {
+               ret = be->func->unbind(be);
+               BUG_ON(ret);
+       }
+
+       ttm->state = ttm_evicted;
+}
+
+void drm_ttm_fixup_caching(struct drm_ttm *ttm)
+{
+
+       if (ttm->state == ttm_evicted) {
+               struct drm_ttm_backend *be = ttm->be;
+               if (be->func->needs_ub_cache_adjust(be))
+                       drm_set_caching(ttm, 0);
+               ttm->state = ttm_unbound;
+       }
+}
+
+void drm_ttm_unbind(struct drm_ttm *ttm)
+{
+       if (ttm->state == ttm_bound)
+               drm_ttm_evict(ttm);
+
+       drm_ttm_fixup_caching(ttm);
+}
+
+int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
+{
+       struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
+       int ret = 0;
+       struct drm_ttm_backend *be;
+
+       if (!ttm)
+               return -EINVAL;
+       if (ttm->state == ttm_bound)
+               return 0;
+
+       be = ttm->be;
+
+       ret = drm_ttm_populate(ttm);
+       if (ret)
+               return ret;
+
+       if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
+               drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
+       else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
+                  bo_driver->ttm_cache_flush)
+               bo_driver->ttm_cache_flush(ttm);
+
+       ret = be->func->bind(be, bo_mem);
+       if (ret) {
+               ttm->state = ttm_evicted;
+               DRM_ERROR("Couldn't bind backend.\n");
+               return ret;
+       }
+
+       ttm->state = ttm_bound;
+       if (ttm->page_flags & DRM_TTM_PAGE_USER)
+               ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
+       return 0;
+}
+EXPORT_SYMBOL(drm_bind_ttm);
diff --git a/psb-kernel-source-4.41.1/drm_vm.c b/psb-kernel-source-4.41.1/drm_vm.c
new file mode 100644 (file)
index 0000000..5baefee
--- /dev/null
@@ -0,0 +1,1143 @@
+/*
+ * \file drm_vm.c
+ * Memory mapping for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#if defined(__ia64__)
+#include <linux/efi.h>
+#endif
+
+static void drm_vm_open(struct vm_area_struct *vma);
+static void drm_vm_close(struct vm_area_struct *vma);
+static int drm_bo_mmap_locked(struct vm_area_struct *vma,
+                             struct file *filp,
+                             drm_local_map_t *map);
+
+pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+       pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+#ifdef USE_PAT_WC
+#warning using pat
+       if (drm_use_pat() && map_type == _DRM_TTM) {
+               pgprot_val(tmp) |= _PAGE_PAT;
+               return tmp;
+       }
+#endif
+       if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+               pgprot_val(tmp) |= _PAGE_PCD;
+               pgprot_val(tmp) &= ~_PAGE_PWT;
+       }
+#elif defined(__powerpc__)
+       pgprot_val(tmp) |= _PAGE_NO_CACHE;
+       if (map_type == _DRM_REGISTERS)
+               pgprot_val(tmp) |= _PAGE_GUARDED;
+#endif
+#if defined(__ia64__)
+       if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+                                   vma->vm_start))
+               tmp = pgprot_writecombine(tmp);
+       else
+               tmp = pgprot_noncached(tmp);
+#endif
+       return tmp;
+}
+
+#ifndef DRM_VM_NOPAGE
+/**
+ * \c fault method for AGP virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Find the right map and if it's AGP memory find the real physical page to
+ * map, get the page, increment the use count and return it.
+ */
+#if __OS_HAS_AGP
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->head->dev;
+       struct drm_map *map = NULL;
+       struct drm_map_list *r_list;
+       struct drm_hash_item *hash;
+
+       /*
+        * Find the right map
+        */
+       if (!drm_core_has_AGP(dev))
+               goto vm_fault_error;
+
+       if (!dev->agp || !dev->agp->cant_use_aperture)
+               goto vm_fault_error;
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
+               goto vm_fault_error;
+
+       r_list = drm_hash_entry(hash, struct drm_map_list, hash);
+       map = r_list->map;
+
+       if (map && map->type == _DRM_AGP) {
+               /*
+                * Using vm_pgoff as a selector forces us to use this unusual
+                * addressing scheme.
+                */
+               unsigned long offset = (unsigned long)vmf->virtual_address -
+                                                               vma->vm_start;
+               unsigned long baddr = map->offset + offset;
+               struct drm_agp_mem *agpmem;
+               struct page *page;
+
+#ifdef __alpha__
+               /*
+                * Adjust to a bus-relative address
+                */
+               baddr -= dev->hose->mem_space->start;
+#endif
+
+               /*
+                * It's AGP memory - find the real physical page to map
+                */
+               list_for_each_entry(agpmem, &dev->agp->memory, head) {
+                       if (agpmem->bound <= baddr &&
+                           agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
+                               break;
+               }
+
+               if (!agpmem)
+                       goto vm_fault_error;
+
+               /*
+                * Get the page, inc the use count, and return it
+                */
+               offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
+               page = virt_to_page(__va(agpmem->memory->memory[offset]));
+               get_page(page);
+               vmf->page = page;
+
+               DRM_DEBUG
+                   ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
+                    baddr, __va(agpmem->memory->memory[offset]), offset,
+                    page_count(page));
+               return 0;
+       }
+vm_fault_error:
+       return VM_FAULT_SIGBUS; /* Disallow mremap */
+}
+#else                          /* __OS_HAS_AGP */
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+#endif                         /* __OS_HAS_AGP */
+
+/**
+ * \c nopage method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Get the mapping, find the real physical page to map, get the page, and
+ * return it.
+ */
+static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+       unsigned long offset;
+       unsigned long i;
+       struct page *page;
+
+       if (!map)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       i = (unsigned long)map->handle + offset;
+       page = vmalloc_to_page((void *)i);
+       if (!page)
+               return VM_FAULT_SIGBUS;
+       get_page(page);
+       vmf->page = page;
+
+       DRM_DEBUG("shm_fault 0x%lx\n", offset);
+       return 0;
+}
+#endif
+
+/**
+ * \c close method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Deletes map information if we are the last
+ * person to close a mapping and it's not in the global maplist.
+ */
+static void drm_vm_shm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->head->dev;
+       struct drm_vma_entry *pt, *temp;
+       struct drm_map *map;
+       struct drm_map_list *r_list;
+       int found_maps = 0;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_dec(&dev->vma_count);
+
+       map = vma->vm_private_data;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+               if (pt->vma->vm_private_data == map)
+                       found_maps++;
+               if (pt->vma == vma) {
+                       list_del(&pt->head);
+                       drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+               }
+       }
+       /* We were the only map that was found */
+       if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
+               /* Check to see if we are in the maplist, if we are not, then
+                * we delete this mappings information.
+                */
+               found_maps = 0;
+               list_for_each_entry(r_list, &dev->maplist, head) {
+                       if (r_list->map == map)
+                               found_maps++;
+               }
+
+               if (!found_maps) {
+                       drm_dma_handle_t dmah;
+
+                       switch (map->type) {
+                       case _DRM_REGISTERS:
+                       case _DRM_FRAME_BUFFER:
+                               if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+                                       int retcode;
+                                       retcode = mtrr_del(map->mtrr,
+                                                          map->offset,
+                                                          map->size);
+                                       DRM_DEBUG("mtrr_del = %d\n", retcode);
+                               }
+                               iounmap(map->handle);
+                               break;
+                       case _DRM_SHM:
+                               vfree(map->handle);
+                               break;
+                       case _DRM_AGP:
+                       case _DRM_SCATTER_GATHER:
+                               break;
+                       case _DRM_CONSISTENT:
+                               dmah.vaddr = map->handle;
+                               dmah.busaddr = map->offset;
+                               dmah.size = map->size;
+                               __drm_pci_free(dev, &dmah);
+                               break;
+                       case _DRM_TTM:
+                               BUG_ON(1);
+                               break;
+                       }
+                       drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+#ifndef DRM_VM_NOPAGE
+/**
+ * \c fault method for DMA virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
+ */
+static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->head->dev;
+       struct drm_device_dma *dma = dev->dma;
+       unsigned long offset;
+       unsigned long page_nr;
+       struct page *page;
+
+       if (!dma)
+               return VM_FAULT_SIGBUS; /* Error */
+       if (!dma->pagelist)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
+       page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
+       page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+
+       get_page(page);
+       vmf->page = page;
+
+       DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
+       return 0;
+}
+
+/**
+ * \c fault method for scatter-gather virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
+ */
+static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->head->dev;
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long offset;
+       unsigned long map_offset;
+       unsigned long page_offset;
+       struct page *page;
+
+       if (!entry)
+               return VM_FAULT_SIGBUS; /* Error */
+       if (!entry->pagelist)
+               return VM_FAULT_SIGBUS; /* Nothing allocated */
+
+       offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+       map_offset = map->offset - (unsigned long)dev->sg->virtual;
+       page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
+       page = entry->pagelist[page_offset];
+       get_page(page);
+       vmf->page = page;
+
+       return 0;
+}
+#endif
+
+/** AGP virtual memory operations */
+static struct vm_operations_struct drm_vm_ops = {
+#ifdef DRM_VM_NOPAGE
+       .nopage = drm_vm_nopage,
+#else
+       .fault = drm_do_vm_fault,
+#endif
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/** Shared virtual memory operations */
+static struct vm_operations_struct drm_vm_shm_ops = {
+#ifdef DRM_VM_NOPAGE
+       .nopage = drm_vm_shm_nopage,
+#else
+       .fault = drm_do_vm_shm_fault,
+#endif
+       .open = drm_vm_open,
+       .close = drm_vm_shm_close,
+};
+
+/** DMA virtual memory operations */
+static struct vm_operations_struct drm_vm_dma_ops = {
+#ifdef DRM_VM_NOPAGE
+       .nopage = drm_vm_dma_nopage,
+#else
+       .fault = drm_do_vm_dma_fault,
+#endif
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/** Scatter-gather virtual memory operations */
+static struct vm_operations_struct drm_vm_sg_ops = {
+#ifdef DRM_VM_NOPAGE
+       .nopage = drm_vm_sg_nopage,
+#else
+       .fault = drm_do_vm_sg_fault,
+#endif
+       .open = drm_vm_open,
+       .close = drm_vm_close,
+};
+
+/**
+ * \c open method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Create a new drm_vma_entry structure as the \p vma private data entry and
+ * add it to drm_device::vmalist.
+ */
+static void drm_vm_open_locked(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->head->dev;
+       struct drm_vma_entry *vma_entry;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_inc(&dev->vma_count);
+
+       vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
+       if (vma_entry) {
+               vma_entry->vma = vma;
+               vma_entry->pid = current->pid;
+               list_add(&vma_entry->head, &dev->vmalist);
+       }
+}
+
+static void drm_vm_open(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->head->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->head->dev;
+       struct drm_vma_entry *pt, *temp;
+
+       DRM_DEBUG("0x%08lx,0x%08lx\n",
+                 vma->vm_start, vma->vm_end - vma->vm_start);
+       atomic_dec(&dev->vma_count);
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+               if (pt->vma == vma) {
+                       list_del(&pt->head);
+                       drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+                       break;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * Sets the virtual memory area operations structure to vm_dma_ops, the file
+ * pointer, and calls vm_open().
+ */
+static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev;
+       struct drm_device_dma *dma;
+       unsigned long length = vma->vm_end - vma->vm_start;
+
+       dev = priv->head->dev;
+       dma = dev->dma;
+       DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+                 vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+       /* Length must match exact page count */
+       if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
+               return -EINVAL;
+       }
+
+       if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
+               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+               pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+               /* Ye gads this is ugly.  With more thought
+                  we could move this up higher and use
+                  `protection_map' instead.  */
+               vma->vm_page_prot =
+                   __pgprot(pte_val
+                            (pte_wrprotect
+                             (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+       }
+
+       vma->vm_ops = &drm_vm_dma_ops;
+       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+       return 0;
+}
+
+unsigned long drm_core_get_map_ofs(struct drm_map * map)
+{
+       return map->offset;
+}
+EXPORT_SYMBOL(drm_core_get_map_ofs);
+
+unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
+{
+#ifdef __alpha__
+       return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+#else
+       return 0;
+#endif
+}
+EXPORT_SYMBOL(drm_core_get_reg_ofs);
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the virtual memory area has no offset associated with it then it's a DMA
+ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
+ * checks that the restricted flag is not set, sets the virtual memory operations
+ * according to the mapping type and remaps the pages. Finally sets the file
+ * pointer and calls vm_open().
+ */
+static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->head->dev;
+       struct drm_map *map = NULL;
+       unsigned long offset = 0;
+       struct drm_hash_item *hash;
+
+       DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+                 vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+       if (!priv->authenticated)
+               return -EACCES;
+
+       /* We check for "dma". On Apple's UniNorth, it's valid to have
+        * the AGP mapped at physical address 0
+        * --BenH.
+        */
+
+       if (!vma->vm_pgoff
+#if __OS_HAS_AGP
+           && (!dev->agp
+               || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
+#endif
+           )
+               return drm_mmap_dma(filp, vma);
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
+               DRM_ERROR("Could not find map\n");
+               return -EINVAL;
+       }
+
+       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+       if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
+               return -EPERM;
+
+       /* Check for valid size. */
+       if (map->size < vma->vm_end - vma->vm_start)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
+               vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+               pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+               /* Ye gads this is ugly.  With more thought
+                  we could move this up higher and use
+                  `protection_map' instead.  */
+               vma->vm_page_prot =
+                   __pgprot(pte_val
+                            (pte_wrprotect
+                             (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+       }
+
+       switch (map->type) {
+       case _DRM_AGP:
+               if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+                       /*
+                        * On some platforms we can't talk to bus dma address from the CPU, so for
+                        * memory of type DRM_AGP, we'll deal with sorting out the real physical
+                        * pages and mappings in nopage()
+                        */
+#if defined(__powerpc__)
+                       pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+#endif
+                       vma->vm_ops = &drm_vm_ops;
+                       break;
+               }
+               /* fall through to _DRM_FRAME_BUFFER... */
+       case _DRM_FRAME_BUFFER:
+       case _DRM_REGISTERS:
+               offset = dev->driver->get_reg_ofs(dev);
+               vma->vm_flags |= VM_IO; /* not in core dump */
+               vma->vm_page_prot = drm_io_prot(map->type, vma);
+#ifdef __sparc__
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+               if (io_remap_pfn_range(vma, vma->vm_start,
+                                      (map->offset + offset) >> PAGE_SHIFT,
+                                      vma->vm_end - vma->vm_start,
+                                      vma->vm_page_prot))
+                       return -EAGAIN;
+               DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
+                         " offset = 0x%lx\n",
+                         map->type,
+                         vma->vm_start, vma->vm_end, map->offset + offset);
+               vma->vm_ops = &drm_vm_ops;
+               break;
+       case _DRM_CONSISTENT:
+               /* Consistent memory is really like shared memory. But
+                * it's allocated in a different way, so avoid nopage */
+               if (remap_pfn_range(vma, vma->vm_start,
+                   page_to_pfn(virt_to_page(map->handle)),
+                   vma->vm_end - vma->vm_start, vma->vm_page_prot))
+                       return -EAGAIN;
+       /* fall through to _DRM_SHM */
+       case _DRM_SHM:
+               vma->vm_ops = &drm_vm_shm_ops;
+               vma->vm_private_data = (void *)map;
+               /* Don't let this area swap.  Change when
+                  DRM_KERNEL advisory is supported. */
+               vma->vm_flags |= VM_RESERVED;
+               break;
+       case _DRM_SCATTER_GATHER:
+               vma->vm_ops = &drm_vm_sg_ops;
+               vma->vm_private_data = (void *)map;
+               vma->vm_flags |= VM_RESERVED;
+               break;
+       case _DRM_TTM:
+               return drm_bo_mmap_locked(vma, filp, map);
+       default:
+               return -EINVAL; /* This should never happen. */
+       }
+       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+       return 0;
+}
+
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->head->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_mmap_locked(filp, vma);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mmap);
+
+/**
+ * buffer object vm functions.
+ */
+
+/**
+ * \c Pagefault method for buffer objects.
+ *
+ * \param vma Virtual memory area.
+ * \param address File offset.
+ * \return Error or refault. The pfn is manually inserted.
+ *
+ * It's important that pfns are inserted while holding the bo->mutex lock.
+ * otherwise we might race with unmap_mapping_range() which is always
+ * called with the bo->mutex lock held.
+ *
+ * We're modifying the page attribute bits of the vma->vm_page_prot field,
+ * without holding the mmap_sem in write mode. Only in read mode.
+ * These bits are not used by the mm subsystem code, and we consider them
+ * protected by the bo->mutex lock.
+ */
+#ifdef DRM_FULL_MM_COMPAT
+#define DRM_NOPFN_EXTRA 15 /* Fault 16 pages at a time in */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+int drm_bo_vm_fault(struct vm_area_struct *vma,
+                             struct vm_fault *vmf)
+{
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+       unsigned long page_offset;
+       struct page *page = NULL;
+       struct drm_ttm *ttm = NULL;
+       struct drm_device *dev;
+       unsigned long pfn;
+       int err;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       int i;
+        
+        unsigned long ret = VM_FAULT_NOPAGE;
+
+        unsigned long address = (unsigned long)vmf->virtual_address;
+
+       if (address > vma->vm_end)
+               return VM_FAULT_SIGBUS;
+
+       dev = bo->dev;
+       err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
+       if (err)
+               return VM_FAULT_NOPAGE;
+
+       err = mutex_lock_interruptible(&bo->mutex);
+       if (err) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               return VM_FAULT_NOPAGE;
+       }
+
+       err = drm_bo_wait(bo, 0, 0, 0);
+       if (err) {
+               ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+               goto out_unlock;
+       }
+
+       /*
+        * If buffer happens to be in a non-mappable location,
+        * move it to a mappable.
+        */
+
+       if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+               uint32_t new_mask = bo->mem.mask |
+                       DRM_BO_FLAG_MAPPABLE |
+                       DRM_BO_FLAG_FORCE_MAPPABLE;
+               err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+               if (err) {
+                       ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+                       goto out_unlock;
+               }
+       }
+
+       err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+                               &bus_size);
+
+       if (err) {
+               ret = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+       if (bus_size) {
+               struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
+
+               pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+               vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
+       } else {
+               ttm = bo->ttm;
+
+               drm_ttm_fixup_caching(ttm);
+               page = drm_ttm_get_page(ttm, page_offset);
+               if (!page) {
+                       ret = VM_FAULT_OOM;
+                       goto out_unlock;
+               }
+               pfn = page_to_pfn(page);
+               vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
+                       vm_get_page_prot(vma->vm_flags) :
+                       drm_io_prot(_DRM_TTM, vma);
+       }
+
+       err = vm_insert_pfn(vma, address, pfn);
+       if (err) {
+               ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
+               goto out_unlock;
+       }
+
+       for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
+
+               if (++page_offset == bo->mem.num_pages)
+                       break;
+               address = vma->vm_start + (page_offset << PAGE_SHIFT);
+               if (address >= vma->vm_end)
+                       break;
+               if (bus_size) {
+                       pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) 
+                               + page_offset;
+               } else {
+                       page = drm_ttm_get_page(ttm, page_offset);
+                       if (!page)
+                               break;
+                       pfn = page_to_pfn(page);
+               }
+               if (vm_insert_pfn(vma, address, pfn))
+                       break;
+       }
+out_unlock:
+       mutex_unlock(&bo->mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
+       return ret;
+}
+
+int drm_bo_vm_nopfn(struct vm_area_struct *vma,
+                             struct vm_fault *vmf )
+{
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+       unsigned long page_offset;
+       struct page *page = NULL;
+       struct drm_ttm *ttm = NULL;
+       struct drm_device *dev;
+       unsigned long pfn;
+       int err;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       int i;
+       unsigned long ret = VM_FAULT_NOPAGE;
+        
+        unsigned long address = (unsigned long)vmf->virtual_address;
+
+       if (address > vma->vm_end)
+               return VM_FAULT_SIGBUS;
+
+       dev = bo->dev;
+       err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
+       if (err)
+               return VM_FAULT_NOPAGE;
+
+       err = mutex_lock_interruptible(&bo->mutex);
+       if (err) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               return VM_FAULT_NOPAGE;
+       }
+
+       err = drm_bo_wait(bo, 0, 0, 0);
+       if (err) {
+               ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+               goto out_unlock;
+       }
+
+       /*
+        * If buffer happens to be in a non-mappable location,
+        * move it to a mappable.
+        */
+
+       if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+               uint32_t new_mask = bo->mem.mask |
+                       DRM_BO_FLAG_MAPPABLE |
+                       DRM_BO_FLAG_FORCE_MAPPABLE;
+               err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+               if (err) {
+                       ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+                       goto out_unlock;
+               }
+       }
+
+       err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+                               &bus_size);
+
+       if (err) {
+               ret = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+       if (bus_size) {
+               struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
+
+               pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+               vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
+       } else {
+               ttm = bo->ttm;
+
+               drm_ttm_fixup_caching(ttm);
+               page = drm_ttm_get_page(ttm, page_offset);
+               if (!page) {
+                       ret = VM_FAULT_OOM;
+                       goto out_unlock;
+               }
+               pfn = page_to_pfn(page);
+               vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
+                       vm_get_page_prot(vma->vm_flags) :
+                       drm_io_prot(_DRM_TTM, vma);
+       }
+
+       err = vm_insert_pfn(vma, address, pfn);
+       if (err) {
+               ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
+               goto out_unlock;
+       }
+
+       for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
+
+               if (++page_offset == bo->mem.num_pages)
+                       break;
+               address = vma->vm_start + (page_offset << PAGE_SHIFT);
+               if (address >= vma->vm_end)
+                       break;
+               if (bus_size) {
+                       pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) 
+                               + page_offset;
+               } else {
+                       page = drm_ttm_get_page(ttm, page_offset);
+                       if (!page)
+                               break;
+                       pfn = page_to_pfn(page);
+               }
+               if (vm_insert_pfn(vma, address, pfn))
+                       break;
+       }
+out_unlock:
+       mutex_unlock(&bo->mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
+       return ret;
+}
+
+#else
+unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
+                             unsigned long address)
+{
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+       unsigned long page_offset;
+       struct page *page = NULL;
+       struct drm_ttm *ttm = NULL;
+       struct drm_device *dev;
+       unsigned long pfn;
+       int err;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       int i;
+       unsigned long ret = NOPFN_REFAULT;
+
+       if (address > vma->vm_end)
+               return NOPFN_SIGBUS;
+
+       dev = bo->dev;
+       err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
+       if (err)
+               return NOPFN_REFAULT;
+
+       err = mutex_lock_interruptible(&bo->mutex);
+       if (err) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               return NOPFN_REFAULT;
+       }
+
+       err = drm_bo_wait(bo, 0, 0, 0);
+       if (err) {
+               ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
+               goto out_unlock;
+       }
+
+       /*
+        * If buffer happens to be in a non-mappable location,
+        * move it to a mappable.
+        */
+
+       if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+               uint32_t new_mask = bo->mem.mask |
+                       DRM_BO_FLAG_MAPPABLE |
+                       DRM_BO_FLAG_FORCE_MAPPABLE;
+               err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+               if (err) {
+                       ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
+                       goto out_unlock;
+               }
+       }
+
+       err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+                               &bus_size);
+
+       if (err) {
+               ret = NOPFN_SIGBUS;
+               goto out_unlock;
+       }
+
+       page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+       if (bus_size) {
+               struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
+
+               pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+               vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
+       } else {
+               ttm = bo->ttm;
+
+               drm_ttm_fixup_caching(ttm);
+               page = drm_ttm_get_page(ttm, page_offset);
+               if (!page) {
+                       ret = NOPFN_OOM;
+                       goto out_unlock;
+               }
+               pfn = page_to_pfn(page);
+               vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
+                       vm_get_page_prot(vma->vm_flags) :
+                       drm_io_prot(_DRM_TTM, vma);
+       }
+
+       err = vm_insert_pfn(vma, address, pfn);
+       if (err) {
+               ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
+               goto out_unlock;
+       }
+
+       for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
+
+               if (++page_offset == bo->mem.num_pages)
+                       break;
+               address = vma->vm_start + (page_offset << PAGE_SHIFT);
+               if (address >= vma->vm_end)
+                       break;
+               if (bus_size) {
+                       pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) 
+                               + page_offset;
+               } else {
+                       page = drm_ttm_get_page(ttm, page_offset);
+                       if (!page)
+                               break;
+                       pfn = page_to_pfn(page);
+               }
+               if (vm_insert_pfn(vma, address, pfn))
+                       break;
+       }
+out_unlock:
+       mutex_unlock(&bo->mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
+       return ret;
+}
+#endif
+
+EXPORT_SYMBOL(drm_bo_vm_nopfn);
+#endif
+   
+   static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
+   {
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+   
+       drm_vm_open_locked(vma);
+       atomic_inc(&bo->usage);
+   #ifdef DRM_ODD_MM_COMPAT
+       drm_bo_add_vma(bo, vma);
+   #endif
+   }
+   
+   /**
+    * \c vma open method for buffer objects.
+    *
+    * \param vma virtual memory area.
+    */
+  
+   static void drm_bo_vm_open(struct vm_area_struct *vma)
+   {
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+       struct drm_device *dev = bo->dev;
+  
+       mutex_lock(&dev->struct_mutex);
+       drm_bo_vm_open_locked(vma);
+       mutex_unlock(&dev->struct_mutex);
+   }
+   
+   /**
+    * \c vma close method for buffer objects.
+    *
+    * \param vma virtual memory area.
+    */
+   
+   static void drm_bo_vm_close(struct vm_area_struct *vma)
+   {
+       struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+       struct drm_device *dev = bo->dev;
+   
+       drm_vm_close(vma);
+       if (bo) {
+               mutex_lock(&dev->struct_mutex);
+   #ifdef DRM_ODD_MM_COMPAT
+               drm_bo_delete_vma(bo, vma);
+   #endif
+               drm_bo_usage_deref_locked((struct drm_buffer_object **)
+                                         &vma->vm_private_data);
+               mutex_unlock(&dev->struct_mutex);
+       }
+       return;
+   }
+   
+   static struct vm_operations_struct drm_bo_vm_ops = {
+   #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+           .fault = drm_bo_vm_fault,
+   #else
+   #ifdef DRM_FULL_MM_COMPAT
+           .nopfn = drm_bo_vm_nopfn,
+   #else
+   #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
+           .nopfn = drm_bo_vm_nopfn,
+   #else
+       .nopage = drm_bo_vm_nopage,
+   #endif
+   #endif
+   #endif
+       .open = drm_bo_vm_open,
+       .close = drm_bo_vm_close,
+   };
+   
+   /**
+    * mmap buffer object memory.
+    *
+    * \param vma virtual memory area.
+    * \param file_priv DRM file private.
+    * \param map The buffer object drm map.
+    * \return zero on success or a negative number on failure.
+    */
+   
+   int drm_bo_mmap_locked(struct vm_area_struct *vma,
+                      struct file *filp,
+                      drm_local_map_t *map)
+   {
+       vma->vm_ops = &drm_bo_vm_ops;
+       vma->vm_private_data = map->handle;
+       vma->vm_file = filp;
+       vma->vm_flags |= VM_RESERVED | VM_IO;
+   #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
+       vma->vm_flags |= VM_PFNMAP;
+   #endif
+       drm_bo_vm_open_locked(vma);
+   #ifdef DRM_ODD_MM_COMPAT
+       drm_bo_map_bound(vma);
+   #endif
+       return 0;
+   }
diff --git a/psb-kernel-source-4.41.1/drm_vm_nopage_compat.c b/psb-kernel-source-4.41.1/drm_vm_nopage_compat.c
new file mode 100644 (file)
index 0000000..d0c7c5a
--- /dev/null
@@ -0,0 +1,279 @@
+/**
+ * \file drm_vm.c
+ * Memory mapping for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#ifdef DRM_VM_NOPAGE
+/**
+ * \c nopage method for AGP virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Find the right map and if it's AGP memory find the real physical page to
+ * map, get the page, increment the use count and return it.
+ */
+#if __OS_HAS_AGP
+static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
+                                               unsigned long address)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+        #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+       struct drm_device *dev = priv->minor->dev;
+        #else
+        struct drm_device *dev = priv->head->dev;
+        #endif
+       struct drm_map *map = NULL;
+       struct drm_map_list *r_list;
+       struct drm_hash_item *hash;
+
+       /*
+        * Find the right map
+        */
+       if (!drm_core_has_AGP(dev))
+               goto vm_nopage_error;
+
+       if (!dev->agp || !dev->agp->cant_use_aperture)
+               goto vm_nopage_error;
+
+       if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
+               goto vm_nopage_error;
+
+       r_list = drm_hash_entry(hash, struct drm_map_list, hash);
+       map = r_list->map;
+
+       if (map && map->type == _DRM_AGP) {
+               unsigned long offset = address - vma->vm_start;
+               unsigned long baddr = map->offset + offset;
+               struct drm_agp_mem *agpmem;
+               struct page *page;
+
+#ifdef __alpha__
+               /*
+                * Adjust to a bus-relative address
+                */
+               baddr -= dev->hose->mem_space->start;
+#endif
+
+               /*
+                * It's AGP memory - find the real physical page to map
+                */
+               list_for_each_entry(agpmem, &dev->agp->memory, head) {
+                       if (agpmem->bound <= baddr &&
+                           agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
+                               break;
+               }
+
+               if (!agpmem)
+                       goto vm_nopage_error;
+
+               /*
+                * Get the page, inc the use count, and return it
+                */
+               offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
+               page = virt_to_page(__va(agpmem->memory->memory[offset]));
+               get_page(page);
+
+#if 0
+               /* page_count() not defined everywhere */
+               DRM_DEBUG
+                   ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
+                    baddr, __va(agpmem->memory->memory[offset]), offset,
+                    page_count(page));
+#endif
+
+               return page;
+       }
+      vm_nopage_error:
+       return NOPAGE_SIGBUS;   /* Disallow mremap */
+}
+#else                          /* __OS_HAS_AGP */
+static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
+                                               unsigned long address)
+{
+       return NOPAGE_SIGBUS;
+}
+#endif                         /* __OS_HAS_AGP */
+
+/**
+ * \c nopage method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Get the mapping, find the real physical page to map, get the page, and
+ * return it.
+ */
+static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
+                                                   unsigned long address)
+{
+       struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+       unsigned long offset;
+       unsigned long i;
+       struct page *page;
+
+       if (address > vma->vm_end)
+               return NOPAGE_SIGBUS;   /* Disallow mremap */
+       if (!map)
+               return NOPAGE_SIGBUS;   /* Nothing allocated */
+
+       offset = address - vma->vm_start;
+       i = (unsigned long)map->handle + offset;
+       page = vmalloc_to_page((void *)i);
+       if (!page)
+               return NOPAGE_SIGBUS;
+       get_page(page);
+
+       DRM_DEBUG("0x%lx\n", address);
+       return page;
+}
+
+/**
+ * \c nopage method for DMA virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
+ */
+static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
+                                                   unsigned long address)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+        #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+       struct drm_device *dev = priv->minor->dev;
+        #else
+        struct drm_device *dev = priv->head->dev;
+        #endif
+       struct drm_device_dma *dma = dev->dma;
+       unsigned long offset;
+       unsigned long page_nr;
+       struct page *page;
+
+       if (!dma)
+               return NOPAGE_SIGBUS;   /* Error */
+       if (address > vma->vm_end)
+               return NOPAGE_SIGBUS;   /* Disallow mremap */
+       if (!dma->pagelist)
+               return NOPAGE_SIGBUS;   /* Nothing allocated */
+
+       offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
+       page_nr = offset >> PAGE_SHIFT;
+       page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+
+       get_page(page);
+
+       DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
+       return page;
+}
+
+/**
+ * \c nopage method for scatter-gather virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
+ */
+static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
+                                                  unsigned long address)
+{
+       struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+       struct drm_file *priv = vma->vm_file->private_data;
+        #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+       struct drm_device *dev = priv->minor->dev;
+        #else
+        struct drm_device *dev = priv->head->dev;
+        #endif
+       struct drm_sg_mem *entry = dev->sg;
+       unsigned long offset;
+       unsigned long map_offset;
+       unsigned long page_offset;
+       struct page *page;
+
+       DRM_DEBUG("\n");
+       if (!entry)
+               return NOPAGE_SIGBUS;   /* Error */
+       if (address > vma->vm_end)
+               return NOPAGE_SIGBUS;   /* Disallow mremap */
+       if (!entry->pagelist)
+               return NOPAGE_SIGBUS;   /* Nothing allocated */
+
+       offset = address - vma->vm_start;
+       map_offset = map->offset - (unsigned long)dev->sg->virtual;
+       page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
+       page = entry->pagelist[page_offset];
+       get_page(page);
+
+       return page;
+}
+
+
+struct page *drm_vm_nopage(struct vm_area_struct *vma,
+                          unsigned long address, int *type)
+{
+       if (type)
+               *type = VM_FAULT_MINOR;
+       return drm_do_vm_nopage(vma, address);
+}
+
+struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
+                              unsigned long address, int *type)
+{
+       if (type)
+               *type = VM_FAULT_MINOR;
+       return drm_do_vm_shm_nopage(vma, address);
+}
+
+struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
+                              unsigned long address, int *type)
+{
+       if (type)
+               *type = VM_FAULT_MINOR;
+       return drm_do_vm_dma_nopage(vma, address);
+}
+
+struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
+                             unsigned long address, int *type)
+{
+       if (type)
+               *type = VM_FAULT_MINOR;
+       return drm_do_vm_sg_nopage(vma, address);
+}
+#endif
diff --git a/psb-kernel-source-4.41.1/i915_buffer.c b/psb-kernel-source-4.41.1/i915_buffer.c
new file mode 100644 (file)
index 0000000..4577a94
--- /dev/null
@@ -0,0 +1,286 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
+{
+       return drm_agp_init_ttm(dev);
+}
+
+int i915_fence_types(struct drm_buffer_object *bo,
+                    uint32_t *fclass,
+                    uint32_t *type)
+{
+       if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+               *type = 3;
+       else
+               *type = 1;
+       return 0;
+}
+
+int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
+{
+       /*
+        * FIXME: Only emit once per batchbuffer submission.
+        */
+
+       uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
+
+       if (flags & DRM_BO_FLAG_READ)
+               flush_cmd |= MI_READ_FLUSH;
+       if (flags & DRM_BO_FLAG_EXE)
+               flush_cmd |= MI_EXE_FLUSH;
+
+       return i915_emit_mi_flush(dev, flush_cmd);
+}
+
+int i915_init_mem_type(struct drm_device *dev, uint32_t type,
+                      struct drm_mem_type_manager *man)
+{
+       switch (type) {
+       case DRM_BO_MEM_LOCAL:
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CACHED;
+               man->drm_bus_maptype = 0;
+               man->gpu_offset = 0;
+               break;
+       case DRM_BO_MEM_TT:
+               if (!(drm_core_has_AGP(dev) && dev->agp)) {
+                       DRM_ERROR("AGP is not enabled for memory type %u\n",
+                                 (unsigned)type);
+                       return -EINVAL;
+               }
+               man->io_offset = dev->agp->agp_info.aper_base;
+               man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+               man->io_addr = NULL;
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
+               man->drm_bus_maptype = _DRM_AGP;
+               man->gpu_offset = 0;
+               break;
+       case DRM_BO_MEM_VRAM:
+               if (!(drm_core_has_AGP(dev) && dev->agp)) {
+                       DRM_ERROR("AGP is not enabled for memory type %u\n",
+                                 (unsigned)type);
+                       return -EINVAL;
+               }
+               man->io_offset = dev->agp->agp_info.aper_base;
+               man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+               man->io_addr = NULL;
+               man->flags =  _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
+               man->drm_bus_maptype = _DRM_AGP;
+               man->gpu_offset = 0;
+               break;
+       case DRM_BO_MEM_PRIV0: /* for OS preallocated space */
+               DRM_ERROR("PRIV0 not used yet.\n");
+               break;
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+uint32_t i915_evict_mask(struct drm_buffer_object *bo)
+{
+       switch (bo->mem.mem_type) {
+       case DRM_BO_MEM_LOCAL:
+       case DRM_BO_MEM_TT:
+               return DRM_BO_FLAG_MEM_LOCAL;
+       default:
+               return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
+       }
+}
+
+#if 0 /* See comment below */
+
+static void i915_emit_copy_blit(struct drm_device * dev,
+                               uint32_t src_offset,
+                               uint32_t dst_offset,
+                               uint32_t pages, int direction)
+{
+       uint32_t cur_pages;
+       uint32_t stride = PAGE_SIZE;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       RING_LOCALS;
+
+       if (!dev_priv)
+               return;
+
+       i915_kernel_lost_context(dev);
+       while (pages > 0) {
+               cur_pages = pages;
+               if (cur_pages > 2048)
+                       cur_pages = 2048;
+               pages -= cur_pages;
+
+               BEGIN_LP_RING(6);
+               OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
+                        XY_SRC_COPY_BLT_WRITE_RGB);
+               OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
+                        (1 << 25) | (direction ? (1 << 30) : 0));
+               OUT_RING((cur_pages << 16) | PAGE_SIZE);
+               OUT_RING(dst_offset);
+               OUT_RING(stride & 0xffff);
+               OUT_RING(src_offset);
+               ADVANCE_LP_RING();
+       }
+       return;
+}
+
+static int i915_move_blit(struct drm_buffer_object * bo,
+                         int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+{
+       struct drm_bo_mem_reg *old_mem = &bo->mem;
+       int dir = 0;
+
+       if ((old_mem->mem_type == new_mem->mem_type) &&
+           (new_mem->mm_node->start <
+            old_mem->mm_node->start + old_mem->mm_node->size)) {
+               dir = 1;
+       }
+
+       i915_emit_copy_blit(bo->dev,
+                           old_mem->mm_node->start << PAGE_SHIFT,
+                           new_mem->mm_node->start << PAGE_SHIFT,
+                           new_mem->num_pages, dir);
+
+       i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
+
+       return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
+                                        DRM_FENCE_TYPE_EXE |
+                                        DRM_I915_FENCE_TYPE_RW,
+                                        DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
+}
+
+/*
+ * Flip destination ttm into cached-coherent AGP,
+ * then blit and subsequently move out again.
+ */
+
+static int i915_move_flip(struct drm_buffer_object * bo,
+                         int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_bo_mem_reg tmp_mem;
+       int ret;
+
+       tmp_mem = *new_mem;
+       tmp_mem.mm_node = NULL;
+       tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
+           DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
+
+       ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
+       if (ret)
+               return ret;
+
+       ret = drm_bind_ttm(bo->ttm, &tmp_mem);
+       if (ret)
+               goto out_cleanup;
+
+       ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
+       if (ret)
+               goto out_cleanup;
+
+       ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
+out_cleanup:
+       if (tmp_mem.mm_node) {
+               mutex_lock(&dev->struct_mutex);
+               if (tmp_mem.mm_node != bo->pinned_node)
+                       drm_mm_put_block(tmp_mem.mm_node);
+               tmp_mem.mm_node = NULL;
+               mutex_unlock(&dev->struct_mutex);
+       }
+       return ret;
+}
+
+#endif
+
+/*
+ * Disable i915_move_flip for now, since we can't guarantee that the hardware
+ * lock is held here. To re-enable we need to make sure either
+ * a) The X server is using DRM to submit commands to the ring, or
+ * b) DRM can use the HP ring for these blits. This means i915 needs to
+ *    implement a new ring submission mechanism and fence class.
+ */
+int i915_move(struct drm_buffer_object *bo,
+             int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
+{
+       struct drm_bo_mem_reg *old_mem = &bo->mem;
+
+       if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
+               return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
+               if (0) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
+                       return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       } else {
+               if (0) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
+                       return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       }
+       return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
+static inline void clflush(volatile void *__p)
+{
+       asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+}
+#endif
+
+static inline void drm_cache_flush_addr(void *virt)
+{
+       int i;
+
+       for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+               clflush(virt+i);
+}
+
+static inline void drm_cache_flush_page(struct page *p)
+{
+       drm_cache_flush_addr(page_address(p));
+}
+
+void i915_flush_ttm(struct drm_ttm *ttm)
+{
+       int i;
+
+       if (!ttm)
+               return;
+
+       DRM_MEMORYBARRIER();
+       for (i = ttm->num_pages-1; i >= 0; i--)
+               drm_cache_flush_page(drm_ttm_get_page(ttm, i));
+       DRM_MEMORYBARRIER();
+}
diff --git a/psb-kernel-source-4.41.1/i915_compat.c b/psb-kernel-source-4.41.1/i915_compat.c
new file mode 100644 (file)
index 0000000..e119a99
--- /dev/null
@@ -0,0 +1,204 @@
+#include "drmP.h"
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
+#define PCI_DEVICE_ID_INTEL_82965G_1_HB     0x2980
+#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990
+#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0
+#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10
+#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
+#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
+#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
+#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
+
+#define I915_IFPADDR    0x60
+#define I965_IFPADDR    0x70
+
+static struct _i9xx_private_compat {
+       void __iomem *flush_page;
+       struct resource ifp_resource;
+} i9xx_private;
+
+static struct _i8xx_private_compat {
+       void *flush_page;
+       struct page *page;
+} i8xx_private;
+
+static void
+intel_compat_align_resource(void *data, struct resource *res,
+                        resource_size_t size, resource_size_t align)
+{
+       return;
+}
+
+
+static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev)
+{
+       int ret;
+       ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE,
+                                    PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+                                    intel_compat_align_resource, pdev);
+       if (ret != 0)
+               return ret;
+
+       return 0;
+}
+
+static void intel_i915_setup_chipset_flush(struct pci_dev *pdev)
+{
+       int ret;
+       u32 temp;
+
+       pci_read_config_dword(pdev, I915_IFPADDR, &temp);
+       if (!(temp & 0x1)) {
+               intel_alloc_chipset_flush_resource(pdev);
+
+               pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
+       } else {
+               temp &= ~1;
+
+               i9xx_private.ifp_resource.start = temp;
+               i9xx_private.ifp_resource.end = temp + PAGE_SIZE;
+               ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
+               if (ret) {
+                       i9xx_private.ifp_resource.start = 0;
+                       printk("Failed inserting resource into tree\n");
+               }
+       }
+}
+
+static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
+{
+       u32 temp_hi, temp_lo;
+       int ret;
+
+       pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi);
+       pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo);
+
+       if (!(temp_lo & 0x1)) {
+
+               intel_alloc_chipset_flush_resource(pdev);
+
+               pci_write_config_dword(pdev, I965_IFPADDR + 4, (i9xx_private.ifp_resource.start >> 32));
+               pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
+       } else {
+               u64 l64;
+
+               temp_lo &= ~0x1;
+               l64 = ((u64)temp_hi << 32) | temp_lo;
+
+               i9xx_private.ifp_resource.start = l64;
+               i9xx_private.ifp_resource.end = l64 + PAGE_SIZE;
+               ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
+               if (!ret) {
+                       i9xx_private.ifp_resource.start = 0;
+                       printk("Failed inserting resource into tree\n");
+               }
+       }
+}
+
+static void intel_i8xx_fini_flush(struct drm_device *dev)
+{
+       kunmap(i8xx_private.page);
+       i8xx_private.flush_page = NULL;
+       unmap_page_from_agp(i8xx_private.page);
+       flush_agp_mappings();
+
+       __free_page(i8xx_private.page);
+}
+
+static void intel_i8xx_setup_flush(struct drm_device *dev)
+{
+
+       i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+       if (!i8xx_private.page) {
+               return;
+       }
+
+       /* make page uncached */
+       map_page_into_agp(i8xx_private.page);
+       flush_agp_mappings();
+
+       i8xx_private.flush_page = kmap(i8xx_private.page);
+       if (!i8xx_private.flush_page)
+               intel_i8xx_fini_flush(dev);
+}
+
+
+static void intel_i8xx_flush_page(struct drm_device *dev)
+{
+       unsigned int *pg = i8xx_private.flush_page;
+       int i;
+
+       /* HAI NUT CAN I HAZ HAMMER?? */
+       for (i = 0; i < 256; i++)
+               *(pg + i) = i;
+       
+       DRM_MEMORYBARRIER();
+}
+
+static void intel_i9xx_setup_flush(struct drm_device *dev)
+{
+       struct pci_dev *agp_dev = dev->agp->agp_info.device;
+
+       i9xx_private.ifp_resource.name = "GMCH IFPBAR";
+       i9xx_private.ifp_resource.flags = IORESOURCE_MEM;
+
+       /* Setup chipset flush for 915 */
+       if (IS_I965G(dev) || IS_G33(dev)) {
+               intel_i965_g33_setup_chipset_flush(agp_dev);
+       } else {
+               intel_i915_setup_chipset_flush(agp_dev);
+       }
+
+       if (i9xx_private.ifp_resource.start) {
+               i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE);
+               if (!i9xx_private.flush_page)
+                       printk("unable to ioremap flush  page - no chipset flushing");
+       }
+}
+
+static void intel_i9xx_fini_flush(struct drm_device *dev)
+{
+       iounmap(i9xx_private.flush_page);
+       release_resource(&i9xx_private.ifp_resource);
+}
+
+static void intel_i9xx_flush_page(struct drm_device *dev)
+{
+       if (i9xx_private.flush_page)
+               writel(1, i9xx_private.flush_page);
+}
+
+void intel_init_chipset_flush_compat(struct drm_device *dev)
+{
+       /* not flush on i8xx */
+       if (IS_I9XX(dev))       
+               intel_i9xx_setup_flush(dev);
+       else
+               intel_i8xx_setup_flush(dev);
+       
+}
+
+void intel_fini_chipset_flush_compat(struct drm_device *dev)
+{
+       /* not flush on i8xx */
+       if (IS_I9XX(dev))
+               intel_i9xx_fini_flush(dev);
+       else
+               intel_i8xx_fini_flush(dev);
+}
+
+void drm_agp_chipset_flush(struct drm_device *dev)
+{
+       if (IS_I9XX(dev))
+               intel_i9xx_flush_page(dev);
+       else
+               intel_i8xx_flush_page(dev);
+}
+#endif
diff --git a/psb-kernel-source-4.41.1/i915_dma.c b/psb-kernel-source-4.41.1/i915_dma.c
new file mode 100644 (file)
index 0000000..5d9533b
--- /dev/null
@@ -0,0 +1,1324 @@
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* Really want an OS-independent resettable timer.  Would like to have
+ * this loop run for (eg) 3 sec, but have the timer reset every time
+ * the head pointer changes, so that EBUSY only happens if the ring
+ * actually stalls for (eg) 3 seconds.
+ */
+int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
+       u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+       int i;
+
+       for (i = 0; i < 10000; i++) {
+               ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->Size;
+               if (ring->space >= n)
+                       return 0;
+
+               dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+               if (ring->head != last_head)
+                       i = 0;
+
+               last_head = ring->head;
+               DRM_UDELAY(1);
+       }
+
+       return -EBUSY;
+}
+
+void i915_kernel_lost_context(struct drm_device * dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
+
+       ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+       ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
+       ring->space = ring->head - (ring->tail + 8);
+       if (ring->space < 0)
+               ring->space += ring->Size;
+
+       if (ring->head == ring->tail)
+               dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+}
+
+int i915_dma_cleanup(struct drm_device * dev)
+{
+       /* Make sure interrupts are disabled here because the uninstall ioctl
+        * may not have been called from userspace and after dev_private
+        * is freed, it's too late.
+        */
+       if (dev->irq)
+               drm_irq_uninstall(dev);
+
+       return 0;
+}
+
+static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->sarea = drm_getsarea(dev);
+       if (!dev_priv->sarea) {
+               DRM_ERROR("can not find sarea!\n");
+               i915_dma_cleanup(dev);
+               return -EINVAL;
+       }
+
+       dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
+       if (!dev_priv->mmio_map) {
+               i915_dma_cleanup(dev);
+               DRM_ERROR("can not find mmio map!\n");
+               return -EINVAL;
+       }
+
+#ifdef I915_HAVE_BUFFER
+       dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
+#endif
+
+       dev_priv->sarea_priv = (drm_i915_sarea_t *)
+           ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
+
+       dev_priv->ring.Start = init->ring_start;
+       dev_priv->ring.End = init->ring_end;
+       dev_priv->ring.Size = init->ring_size;
+       dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+       dev_priv->ring.map.offset = init->ring_start;
+       dev_priv->ring.map.size = init->ring_size;
+       dev_priv->ring.map.type = 0;
+       dev_priv->ring.map.flags = 0;
+       dev_priv->ring.map.mtrr = 0;
+
+       drm_core_ioremap(&dev_priv->ring.map, dev);
+
+       if (dev_priv->ring.map.handle == NULL) {
+               i915_dma_cleanup(dev);
+               DRM_ERROR("can not ioremap virtual address for"
+                         " ring buffer\n");
+               return -ENOMEM;
+       }
+
+       dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+       dev_priv->cpp = init->cpp;
+       dev_priv->sarea_priv->pf_current_page = 0;
+
+       /* We are using separate values as placeholders for mechanisms for
+        * private backbuffer/depthbuffer usage.
+        */
+       dev_priv->use_mi_batchbuffer_start = 0;
+
+       /* Allow hardware batchbuffers unless told otherwise.
+        */
+       dev_priv->allow_batchbuffer = 1;
+
+       /* Enable vblank on pipe A for older X servers
+        */
+       dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+
+       /* Program Hardware Status Page */
+       if (!IS_G33(dev)) {
+               dev_priv->status_page_dmah =
+                       drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+               if (!dev_priv->status_page_dmah) {
+                       i915_dma_cleanup(dev);
+                       DRM_ERROR("Can not allocate hardware status page\n");
+                       return -ENOMEM;
+               }
+               dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+               dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+               memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+               I915_WRITE(0x02080, dev_priv->dma_status_page);
+       }
+       DRM_DEBUG("Enabled hardware status page\n");
+#ifdef I915_HAVE_BUFFER
+       mutex_init(&dev_priv->cmdbuf_mutex);
+#endif
+       return 0;
+}
+
+static int i915_dma_resume(struct drm_device * dev)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+
+       DRM_DEBUG("%s\n", __FUNCTION__);
+
+       if (!dev_priv->sarea) {
+               DRM_ERROR("can not find sarea!\n");
+               return -EINVAL;
+       }
+
+       if (!dev_priv->mmio_map) {
+               DRM_ERROR("can not find mmio map!\n");
+               return -EINVAL;
+       }
+
+       if (dev_priv->ring.map.handle == NULL) {
+               DRM_ERROR("can not ioremap virtual address for"
+                         " ring buffer\n");
+               return -ENOMEM;
+       }
+
+       /* Program Hardware Status Page */
+       if (!dev_priv->hw_status_page) {
+               DRM_ERROR("Can not find hardware status page\n");
+               return -EINVAL;
+       }
+       DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+
+       if (dev_priv->status_gfx_addr != 0)
+               I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+       else
+               I915_WRITE(0x02080, dev_priv->dma_status_page);
+       DRM_DEBUG("Enabled hardware status page\n");
+
+       return 0;
+}
+
+static int i915_dma_init(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_init *init = data;
+       int retcode = 0;
+
+       switch (init->func) {
+       case I915_INIT_DMA:
+               retcode = i915_initialize(dev, init);
+               break;
+       case I915_CLEANUP_DMA:
+               retcode = i915_dma_cleanup(dev);
+               break;
+       case I915_RESUME_DMA:
+               retcode = i915_dma_resume(dev);
+               break;
+       default:
+               retcode = -EINVAL;
+               break;
+       }
+
+       return retcode;
+}
+
+/* Implement basically the same security restrictions as hardware does
+ * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
+ *
+ * Most of the calculations below involve calculating the size of a
+ * particular instruction.  It's important to get the size right as
+ * that tells us where the next instruction to check is.  Any illegal
+ * instruction detected will be given a size of zero, which is a
+ * signal to abort the rest of the buffer.
+ */
+static int do_validate_cmd(int cmd)
+{
+       switch (((cmd >> 29) & 0x7)) {
+       case 0x0:
+               switch ((cmd >> 23) & 0x3f) {
+               case 0x0:
+                       return 1;       /* MI_NOOP */
+               case 0x4:
+                       return 1;       /* MI_FLUSH */
+               default:
+                       return 0;       /* disallow everything else */
+               }
+               break;
+       case 0x1:
+               return 0;       /* reserved */
+       case 0x2:
+               return (cmd & 0xff) + 2;        /* 2d commands */
+       case 0x3:
+               if (((cmd >> 24) & 0x1f) <= 0x18)
+                       return 1;
+
+               switch ((cmd >> 24) & 0x1f) {
+               case 0x1c:
+                       return 1;
+               case 0x1d:
+                       switch ((cmd >> 16) & 0xff) {
+                       case 0x3:
+                               return (cmd & 0x1f) + 2;
+                       case 0x4:
+                               return (cmd & 0xf) + 2;
+                       default:
+                               return (cmd & 0xffff) + 2;
+                       }
+               case 0x1e:
+                       if (cmd & (1 << 23))
+                               return (cmd & 0xffff) + 1;
+                       else
+                               return 1;
+               case 0x1f:
+                       if ((cmd & (1 << 23)) == 0)     /* inline vertices */
+                               return (cmd & 0x1ffff) + 2;
+                       else if (cmd & (1 << 17))       /* indirect random */
+                               if ((cmd & 0xffff) == 0)
+                                       return 0;       /* unknown length, too hard */
+                               else
+                                       return (((cmd & 0xffff) + 1) / 2) + 1;
+                       else
+                               return 2;       /* indirect sequential */
+               default:
+                       return 0;
+               }
+       default:
+               return 0;
+       }
+
+       return 0;
+}
+
+static int validate_cmd(int cmd)
+{
+       int ret = do_validate_cmd(cmd);
+
+/*     printk("validate_cmd( %x ): %d\n", cmd, ret); */
+
+       return ret;
+}
+
+static int i915_emit_cmds(struct drm_device * dev, int __user * buffer,
+                         int dwords)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+       RING_LOCALS;
+
+       if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+               return -EINVAL;
+
+       BEGIN_LP_RING((dwords+1)&~1);
+
+       for (i = 0; i < dwords;) {
+               int cmd, sz;
+
+               if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
+                       return -EINVAL;
+
+               if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+                       return -EINVAL;
+
+               OUT_RING(cmd);
+
+               while (++i, --sz) {
+                       if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
+                                                        sizeof(cmd))) {
+                               return -EINVAL;
+                       }
+                       OUT_RING(cmd);
+               }
+       }
+
+       if (dwords & 1)
+               OUT_RING(0);
+
+       ADVANCE_LP_RING();
+
+       return 0;
+}
+
+static int i915_emit_box(struct drm_device * dev,
+                        struct drm_clip_rect __user * boxes,
+                        int i, int DR1, int DR4)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_clip_rect box;
+       RING_LOCALS;
+
+       if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
+               return -EFAULT;
+       }
+
+       if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+               DRM_ERROR("Bad box %d,%d..%d,%d\n",
+                         box.x1, box.y1, box.x2, box.y2);
+               return -EINVAL;
+       }
+
+       if (IS_I965G(dev)) {
+               BEGIN_LP_RING(4);
+               OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+               OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+               OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+               OUT_RING(DR4);
+               ADVANCE_LP_RING();
+       } else {
+               BEGIN_LP_RING(6);
+               OUT_RING(GFX_OP_DRAWRECT_INFO);
+               OUT_RING(DR1);
+               OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+               OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+               OUT_RING(DR4);
+               OUT_RING(0);
+               ADVANCE_LP_RING();
+       }
+
+       return 0;
+}
+
+/* XXX: Emitting the counter should really be moved to part of the IRQ
+ * emit. For now, do it in both places:
+ */
+
+void i915_emit_breadcrumb(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       RING_LOCALS;
+
+       if (++dev_priv->counter > BREADCRUMB_MASK) {
+#ifdef I915_HAVE_FENCE
+               i915_invalidate_reported_sequence(dev);
+#endif
+                dev_priv->counter = 1;
+                DRM_DEBUG("Breadcrumb counter wrapped around\n");
+       }
+
+       dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+       BEGIN_LP_RING(4);
+       OUT_RING(CMD_STORE_DWORD_IDX);
+       OUT_RING(20);
+       OUT_RING(dev_priv->counter);
+       OUT_RING(0);
+       ADVANCE_LP_RING();
+}
+
+
+int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t flush_cmd = CMD_MI_FLUSH;
+       RING_LOCALS;
+
+       flush_cmd |= flush;
+
+       i915_kernel_lost_context(dev);
+
+       BEGIN_LP_RING(4);
+       OUT_RING(flush_cmd);
+       OUT_RING(0);
+       OUT_RING(0);
+       OUT_RING(0);
+       ADVANCE_LP_RING();
+
+       return 0;
+}
+
+
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+                                  struct drm_i915_cmdbuffer * cmd)
+{
+#ifdef I915_HAVE_FENCE
+       struct drm_i915_private *dev_priv = dev->dev_private;
+#endif
+       int nbox = cmd->num_cliprects;
+       int i = 0, count, ret;
+
+       if (cmd->sz & 0x3) {
+               DRM_ERROR("alignment\n");
+               return -EINVAL;
+       }
+
+       i915_kernel_lost_context(dev);
+
+       count = nbox ? nbox : 1;
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+                       ret = i915_emit_box(dev, cmd->cliprects, i,
+                                           cmd->DR1, cmd->DR4);
+                       if (ret)
+                               return ret;
+               }
+
+               ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+               if (ret)
+                       return ret;
+       }
+
+       i915_emit_breadcrumb( dev );
+#ifdef I915_HAVE_FENCE
+       if (unlikely((dev_priv->counter & 0xFF) == 0))
+               drm_fence_flush_old(dev, 0, dev_priv->counter);
+#endif
+       return 0;
+}
+
+static int i915_dispatch_batchbuffer(struct drm_device * dev,
+                                    drm_i915_batchbuffer_t * batch)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_clip_rect __user *boxes = batch->cliprects;
+       int nbox = batch->num_cliprects;
+       int i = 0, count;
+       RING_LOCALS;
+
+       if ((batch->start | batch->used) & 0x7) {
+               DRM_ERROR("alignment\n");
+               return -EINVAL;
+       }
+
+       i915_kernel_lost_context(dev);
+
+       count = nbox ? nbox : 1;
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+                       int ret = i915_emit_box(dev, boxes, i,
+                                               batch->DR1, batch->DR4);
+                       if (ret)
+                               return ret;
+               }
+
+               if (dev_priv->use_mi_batchbuffer_start) {
+                       BEGIN_LP_RING(2);
+                       if (IS_I965G(dev)) {
+                               OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+                               OUT_RING(batch->start);
+                       } else {
+                               OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+                               OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+                       }
+                       ADVANCE_LP_RING();
+
+               } else {
+                       BEGIN_LP_RING(4);
+                       OUT_RING(MI_BATCH_BUFFER);
+                       OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+                       OUT_RING(batch->start + batch->used - 4);
+                       OUT_RING(0);
+                       ADVANCE_LP_RING();
+               }
+       }
+
+       i915_emit_breadcrumb( dev );
+#ifdef I915_HAVE_FENCE
+       if (unlikely((dev_priv->counter & 0xFF) == 0))
+               drm_fence_flush_old(dev, 0, dev_priv->counter);
+#endif
+       return 0;
+}
+
+static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 num_pages, current_page, next_page, dspbase;
+       int shift = 2 * plane, x, y;
+       RING_LOCALS;
+
+       /* Calculate display base offset */
+       num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
+       current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
+       next_page = (current_page + 1) % num_pages;
+
+       switch (next_page) {
+       default:
+       case 0:
+               dspbase = dev_priv->sarea_priv->front_offset;
+               break;
+       case 1:
+               dspbase = dev_priv->sarea_priv->back_offset;
+               break;
+       case 2:
+               dspbase = dev_priv->sarea_priv->third_offset;
+               break;
+       }
+
+       if (plane == 0) {
+               x = dev_priv->sarea_priv->planeA_x;
+               y = dev_priv->sarea_priv->planeA_y;
+       } else {
+               x = dev_priv->sarea_priv->planeB_x;
+               y = dev_priv->sarea_priv->planeB_y;
+       }
+
+       dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
+
+       DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
+                 dspbase);
+
+       BEGIN_LP_RING(4);
+       OUT_RING(sync ? 0 :
+                (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
+                                      MI_WAIT_FOR_PLANE_A_FLIP)));
+       OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
+                (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
+       OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
+       OUT_RING(dspbase);
+       ADVANCE_LP_RING();
+
+       dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
+       dev_priv->sarea_priv->pf_current_page |= next_page << shift;
+}
+
+void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       DRM_DEBUG("%s: planes=0x%x pfCurrentPage=%d\n",
+                 __FUNCTION__,
+                 planes, dev_priv->sarea_priv->pf_current_page);
+
+       i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
+
+       for (i = 0; i < 2; i++)
+               if (planes & (1 << i))
+                       i915_do_dispatch_flip(dev, i, sync);
+
+       i915_emit_breadcrumb(dev);
+#ifdef I915_HAVE_FENCE
+       if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
+               drm_fence_flush_old(dev, 0, dev_priv->counter);
+#endif
+}
+
+static int i915_quiescent(struct drm_device * dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       i915_kernel_lost_context(dev);
+       return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+}
+
+static int i915_flush_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv)
+{
+
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+       return i915_quiescent(dev);
+}
+
+static int i915_batchbuffer(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+           dev_priv->sarea_priv;
+       drm_i915_batchbuffer_t *batch = data;
+       int ret;
+
+       if (!dev_priv->allow_batchbuffer) {
+               DRM_ERROR("Batchbuffer ioctl disabled\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
+                 batch->start, batch->used, batch->num_cliprects);
+
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+       if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+                                                       batch->num_cliprects *
+                                                       sizeof(struct drm_clip_rect)))
+               return -EFAULT;
+
+       ret = i915_dispatch_batchbuffer(dev, batch);
+
+       sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+       return ret;
+}
+
+static int i915_cmdbuffer(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
+           dev_priv->sarea_priv;
+       struct drm_i915_cmdbuffer *cmdbuf = data;
+       int ret;
+
+       DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+                 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+       if (cmdbuf->num_cliprects &&
+           DRM_VERIFYAREA_READ(cmdbuf->cliprects,
+                               cmdbuf->num_cliprects *
+                               sizeof(struct drm_clip_rect))) {
+               DRM_ERROR("Fault accessing cliprects\n");
+               return -EFAULT;
+       }
+
+       ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+       if (ret) {
+               DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+               return ret;
+       }
+
+       sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+       return 0;
+}
+
+#ifdef I915_HAVE_BUFFER
+struct i915_relocatee_info {
+       struct drm_buffer_object *buf;
+       unsigned long offset;
+       u32 *data_page;
+       unsigned page_offset;
+       struct drm_bo_kmap_obj kmap;
+       int is_iomem;
+};
+
+static void i915_dereference_buffers_locked(struct drm_buffer_object **buffers,
+                                           unsigned num_buffers)
+{
+       while (num_buffers--)
+               drm_bo_usage_deref_locked(&buffers[num_buffers]);
+}
+
+int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
+                    struct drm_buffer_object **buffers,
+                    struct i915_relocatee_info *relocatee,
+                    uint32_t *reloc)
+{
+       unsigned index;
+       unsigned long new_cmd_offset;
+       u32 val;
+       int ret;
+
+       if (reloc[2] >= num_buffers) {
+               DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
+               return -EINVAL;
+       }
+
+       new_cmd_offset = reloc[0];
+       if (!relocatee->data_page ||
+           !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
+               drm_bo_kunmap(&relocatee->kmap);
+               relocatee->offset = new_cmd_offset;
+               ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
+                                 1, &relocatee->kmap);
+               if (ret) {
+                       DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset);
+                       return ret;
+               }
+
+               relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
+                                                      &relocatee->is_iomem);
+               relocatee->page_offset = (relocatee->offset & PAGE_MASK);
+       }
+
+       val = buffers[reloc[2]]->offset;
+       index = (reloc[0] - relocatee->page_offset) >> 2;
+
+       /* add in validate */
+       val = val + reloc[1];
+
+       relocatee->data_page[index] = val;
+       return 0;
+}
+
+int i915_process_relocs(struct drm_file *file_priv,
+                       uint32_t buf_handle,
+                       uint32_t *reloc_buf_handle,
+                       struct i915_relocatee_info *relocatee,
+                       struct drm_buffer_object **buffers,
+                       uint32_t num_buffers)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *reloc_list_object;
+       uint32_t cur_handle = *reloc_buf_handle;
+       uint32_t *reloc_page;
+       int ret, reloc_is_iomem, reloc_stride;
+       uint32_t num_relocs, reloc_offset, reloc_end, reloc_page_offset, next_offset, cur_offset;
+       struct drm_bo_kmap_obj reloc_kmap;
+
+       memset(&reloc_kmap, 0, sizeof(reloc_kmap));
+
+       mutex_lock(&dev->struct_mutex);
+       reloc_list_object = drm_lookup_buffer_object(file_priv, cur_handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+       if (!reloc_list_object)
+               return -EINVAL;
+
+       ret = drm_bo_kmap(reloc_list_object, 0, 1, &reloc_kmap);
+       if (ret) {
+               DRM_ERROR("Could not map relocation buffer.\n");
+               goto out;
+       }
+
+       reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem);
+       num_relocs = reloc_page[0] & 0xffff;
+
+       if ((reloc_page[0] >> 16) & 0xffff) {
+               DRM_ERROR("Unsupported relocation type requested\n");
+               goto out;
+       }
+
+       /* get next relocate buffer handle */
+       *reloc_buf_handle = reloc_page[1];
+       reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
+
+       DRM_DEBUG("num relocs is %d, next is %08X\n", num_relocs, reloc_page[1]);
+
+       reloc_page_offset = 0;
+       reloc_offset = I915_RELOC_HEADER * sizeof(uint32_t);
+       reloc_end = reloc_offset + (num_relocs * reloc_stride);
+
+       do {
+               next_offset = drm_bo_offset_end(reloc_offset, reloc_end);
+
+               do {
+                       cur_offset = ((reloc_offset + reloc_page_offset) & ~PAGE_MASK) / sizeof(uint32_t);
+                       ret = i915_apply_reloc(file_priv, num_buffers,
+                                        buffers, relocatee, &reloc_page[cur_offset]);
+                       if (ret)
+                               goto out;
+
+                       reloc_offset += reloc_stride;
+               } while (reloc_offset < next_offset);
+
+               drm_bo_kunmap(&reloc_kmap);
+
+               reloc_offset = next_offset;
+               if (reloc_offset != reloc_end) {
+                       ret = drm_bo_kmap(reloc_list_object, reloc_offset >> PAGE_SHIFT, 1, &reloc_kmap);
+                       if (ret) {
+                               DRM_ERROR("Could not map relocation buffer.\n");
+                               goto out;
+                       }
+
+                       reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem);
+                       reloc_page_offset = reloc_offset & ~PAGE_MASK;
+               }
+
+       } while (reloc_offset != reloc_end);
+out:
+       drm_bo_kunmap(&relocatee->kmap);
+       relocatee->data_page = NULL;
+
+       drm_bo_kunmap(&reloc_kmap);
+
+       mutex_lock(&dev->struct_mutex);
+       drm_bo_usage_deref_locked(&reloc_list_object);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
+                          drm_handle_t buf_reloc_handle,
+                          struct drm_buffer_object **buffers,
+                          uint32_t buf_count)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct i915_relocatee_info relocatee;
+       int ret = 0;
+
+       memset(&relocatee, 0, sizeof(relocatee));
+
+       mutex_lock(&dev->struct_mutex);
+       relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+       if (!relocatee.buf) {
+               DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       while (buf_reloc_handle) {
+               ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count);
+               if (ret) {
+                       DRM_ERROR("process relocs failed\n");
+                       break;
+               }
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       drm_bo_usage_deref_locked(&relocatee.buf);
+       mutex_unlock(&dev->struct_mutex);
+
+out_err:
+       return ret;
+}
+
+/*
+ * Validate, add fence and relocate a block of bos from a userspace list
+ */
+int i915_validate_buffer_list(struct drm_file *file_priv,
+                             unsigned int fence_class, uint64_t data,
+                             struct drm_buffer_object **buffers,
+                             uint32_t *num_buffers)
+{
+       struct drm_i915_op_arg arg;
+       struct drm_bo_op_req *req = &arg.d.req;
+       struct drm_bo_arg_rep rep;
+       unsigned long next = 0;
+       int ret = 0;
+       unsigned buf_count = 0;
+       struct drm_device *dev = file_priv->head->dev;
+       uint32_t buf_reloc_handle, buf_handle;
+
+
+       do {
+               if (buf_count >= *num_buffers) {
+                       DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
+                       ret = -EINVAL;
+                       goto out_err;
+               }
+
+               buffers[buf_count] = NULL;
+
+               if (copy_from_user(&arg, (void __user *)(unsigned)data, sizeof(arg))) {
+                       ret = -EFAULT;
+                       goto out_err;
+               }
+
+               if (arg.handled) {
+                       data = arg.next;
+                       mutex_lock(&dev->struct_mutex);
+                       buffers[buf_count] = drm_lookup_buffer_object(file_priv, req->arg_handle, 1);
+                       mutex_unlock(&dev->struct_mutex);
+                       buf_count++;
+                       continue;
+               }
+
+               rep.ret = 0;
+               if (req->op != drm_bo_validate) {
+                       DRM_ERROR
+                           ("Buffer object operation wasn't \"validate\".\n");
+                       rep.ret = -EINVAL;
+                       goto out_err;
+               }
+
+               buf_handle = req->bo_req.handle;
+               buf_reloc_handle = arg.reloc_handle;
+
+               if (buf_reloc_handle) {
+                       ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count);
+                       if (ret)
+                               goto out_err;
+                       DRM_MEMORYBARRIER();
+               }
+
+               rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
+                                                req->bo_req.fence_class,
+                                                req->bo_req.flags,
+                                                req->bo_req.mask,
+                                                req->bo_req.hint,
+                                                0,
+                                                &rep.bo_info,
+                                                &buffers[buf_count]);
+
+               if (rep.ret) {
+                       DRM_ERROR("error on handle validate %d\n", rep.ret);
+                       goto out_err;
+               }
+
+
+               next = arg.next;
+               arg.handled = 1;
+               arg.d.rep = rep;
+
+               if (copy_to_user((void __user *)(unsigned)data, &arg, sizeof(arg)))
+                       return -EFAULT;
+
+               data = next;
+               buf_count++;
+
+       } while (next != 0);
+       *num_buffers = buf_count;
+       return 0;
+out_err:
+       mutex_lock(&dev->struct_mutex);
+       i915_dereference_buffers_locked(buffers, buf_count);
+       mutex_unlock(&dev->struct_mutex);
+       *num_buffers = 0;
+       return (ret) ? ret : rep.ret;
+}
+
+static int i915_execbuffer(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+               dev_priv->sarea_priv;
+       struct drm_i915_execbuffer *exec_buf = data;
+       struct drm_i915_batchbuffer *batch = &exec_buf->batch;
+       struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
+       int num_buffers;
+       int ret;
+       struct drm_buffer_object **buffers;
+       struct drm_fence_object *fence;
+
+       if (!dev_priv->allow_batchbuffer) {
+               DRM_ERROR("Batchbuffer ioctl disabled\n");
+               return -EINVAL;
+       }
+
+
+       if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+                                                       batch->num_cliprects *
+                                                       sizeof(struct drm_clip_rect)))
+               return -EFAULT;
+
+       if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
+               return -EINVAL;
+
+
+       ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
+       if (ret)
+               return ret;
+
+       /*
+        * The cmdbuf_mutex makes sure the validate-submit-fence
+        * operation is atomic.
+        */
+
+       ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+       if (ret) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               return -EAGAIN;
+       }
+
+       num_buffers = exec_buf->num_buffers;
+
+       buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER);
+       if (!buffers) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               mutex_unlock(&dev_priv->cmdbuf_mutex);
+               return -ENOMEM;
+        }
+
+       /* validate buffer list + fixup relocations */
+       ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
+                                       buffers, &num_buffers);
+       if (ret)
+               goto out_free;
+
+       /* make sure all previous memory operations have passed */
+       DRM_MEMORYBARRIER();
+       drm_agp_chipset_flush(dev);
+
+       /* submit buffer */
+       batch->start = buffers[num_buffers-1]->offset;
+
+       DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
+                 batch->start, batch->used, batch->num_cliprects);
+
+       ret = i915_dispatch_batchbuffer(dev, batch);
+       if (ret)
+               goto out_err0;
+
+       sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+       /* fence */
+       ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
+       if (ret)
+               goto out_err0;
+
+       if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) {
+               ret = drm_fence_add_user_object(file_priv, fence, fence_arg->flags & DRM_FENCE_FLAG_SHAREABLE);
+               if (!ret) {
+                       fence_arg->handle = fence->base.hash.key;
+                       fence_arg->fence_class = fence->fence_class;
+                       fence_arg->type = fence->type;
+                       fence_arg->signaled = fence->signaled_types;
+               }
+       }
+       drm_fence_usage_deref_unlocked(&fence);
+out_err0:
+
+       /* handle errors */
+       mutex_lock(&dev->struct_mutex);
+       i915_dereference_buffers_locked(buffers, num_buffers);
+       mutex_unlock(&dev->struct_mutex);
+
+out_free:
+       drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER);
+
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
+       return ret;
+}
+#endif
+
+int i915_do_cleanup_pageflip(struct drm_device * dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i, planes, num_pages;
+
+       DRM_DEBUG("%s\n", __FUNCTION__);
+       if (!dev_priv->sarea_priv)
+               return 0;
+       num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
+       for (i = 0, planes = 0; i < 2; i++) {
+               if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
+                       dev_priv->sarea_priv->pf_current_page =
+                               (dev_priv->sarea_priv->pf_current_page &
+                                ~(0x3 << (2 * i))) | (num_pages - 1) << (2 * i);
+
+                       planes |= 1 << i;
+               }
+       }
+
+       if (planes)
+               i915_dispatch_flip(dev, planes, 0);
+
+       return 0;
+}
+
+static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_i915_flip *param = data;
+
+       DRM_DEBUG("%s\n", __FUNCTION__);
+
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+       /* This is really planes */
+       if (param->pipes & ~0x3) {
+               DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
+                         param->pipes);
+               return -EINVAL;
+       }
+
+       i915_dispatch_flip(dev, param->pipes, 0);
+
+       return 0;
+}
+
+
+static int i915_getparam(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_getparam *param = data;
+       int value;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       switch (param->param) {
+       case I915_PARAM_IRQ_ACTIVE:
+               value = dev->irq ? 1 : 0;
+               break;
+       case I915_PARAM_ALLOW_BATCHBUFFER:
+               value = dev_priv->allow_batchbuffer ? 1 : 0;
+               break;
+       case I915_PARAM_LAST_DISPATCH:
+               value = READ_BREADCRUMB(dev_priv);
+               break;
+       default:
+               DRM_ERROR("Unknown parameter %d\n", param->param);
+               return -EINVAL;
+       }
+
+       if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+               DRM_ERROR("DRM_COPY_TO_USER failed\n");
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+static int i915_setparam(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       drm_i915_setparam_t *param = data;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       switch (param->param) {
+       case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+               if (!IS_I965G(dev))
+                       dev_priv->use_mi_batchbuffer_start = param->value;
+               break;
+       case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+               dev_priv->tex_lru_log_granularity = param->value;
+               break;
+       case I915_SETPARAM_ALLOW_BATCHBUFFER:
+               dev_priv->allow_batchbuffer = param->value;
+               break;
+       default:
+               DRM_ERROR("unknown parameter %d\n", param->param);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+drm_i915_mmio_entry_t mmio_table[] = {
+       [MMIO_REGS_PS_DEPTH_COUNT] = {
+               I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
+               0x2350,
+               8
+       }
+};
+
+static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
+
+static int i915_mmio(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       uint32_t buf[8];
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       drm_i915_mmio_entry_t *e;        
+       drm_i915_mmio_t *mmio = data;
+       void __iomem *base;
+       int i;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       if (mmio->reg >= mmio_table_size)
+               return -EINVAL;
+
+       e = &mmio_table[mmio->reg];
+       base = (u8 *) dev_priv->mmio_map->handle + e->offset;
+
+       switch (mmio->read_write) {
+       case I915_MMIO_READ:
+               if (!(e->flag & I915_MMIO_MAY_READ))
+                       return -EINVAL;
+               for (i = 0; i < e->size / 4; i++)
+                       buf[i] = I915_READ(e->offset + i * 4);
+               if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
+                       DRM_ERROR("DRM_COPY_TO_USER failed\n");
+                       return -EFAULT;
+               }
+               break;
+               
+       case I915_MMIO_WRITE:
+               if (!(e->flag & I915_MMIO_MAY_WRITE))
+                       return -EINVAL;
+               if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
+                       DRM_ERROR("DRM_COPY_TO_USER failed\n");
+                               return -EFAULT;
+               }
+               for (i = 0; i < e->size / 4; i++)
+                       I915_WRITE(e->offset + i * 4, buf[i]);
+               break;
+       }
+       return 0;
+}
+
+static int i915_set_status_page(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       drm_i915_hws_addr_t *hws = data;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+       DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
+
+       dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+
+       dev_priv->hws_map.offset = dev->agp->base + hws->addr;
+       dev_priv->hws_map.size = 4*1024;
+       dev_priv->hws_map.type = 0;
+       dev_priv->hws_map.flags = 0;
+       dev_priv->hws_map.mtrr = 0;
+
+       drm_core_ioremap(&dev_priv->hws_map, dev);
+       if (dev_priv->hws_map.handle == NULL) {
+               i915_dma_cleanup(dev);
+               dev_priv->status_gfx_addr = 0;
+               DRM_ERROR("can not ioremap virtual address for"
+                               " G33 hw status page\n");
+               return -ENOMEM;
+       }
+       dev_priv->hw_status_page = dev_priv->hws_map.handle;
+
+       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+       I915_WRITE(I915REG_HWS_PGA, dev_priv->status_gfx_addr);
+       DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
+                       dev_priv->status_gfx_addr);
+       DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
+       return 0;
+}
+
+struct drm_ioctl_desc i915_ioctls[] = {
+       DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+       DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+       DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
+       DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
+#ifdef I915_HAVE_BUFFER
+       DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
+#endif
+};
+
+int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i9x5 is AGP.
+ */
+int i915_driver_device_is_agp(struct drm_device * dev)
+{
+       return 1;
+}
+
diff --git a/psb-kernel-source-4.41.1/i915_drm.h b/psb-kernel-source-4.41.1/i915_drm.h
new file mode 100644 (file)
index 0000000..65bc9e8
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRM_H_
+#define _I915_DRM_H_
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#include "drm.h"
+
+/* Each region is a minimum of 16k, and there are at most 255 of them.
+ */
+#define I915_NR_TEX_REGIONS 255        /* table size 2k - maximum due to use
+                                * of chars for next/prev indices */
+#define I915_LOG_MIN_TEX_REGION_SIZE 14
+
+typedef struct drm_i915_init {
+       enum {
+               I915_INIT_DMA = 0x01,
+               I915_CLEANUP_DMA = 0x02,
+               I915_RESUME_DMA = 0x03
+       } func;
+       unsigned int mmio_offset;
+       int sarea_priv_offset;
+       unsigned int ring_start;
+       unsigned int ring_end;
+       unsigned int ring_size;
+       unsigned int front_offset;
+       unsigned int back_offset;
+       unsigned int depth_offset;
+       unsigned int w;
+       unsigned int h;
+       unsigned int pitch;
+       unsigned int pitch_bits;
+       unsigned int back_pitch;
+       unsigned int depth_pitch;
+       unsigned int cpp;
+       unsigned int chipset;
+} drm_i915_init_t;
+
+typedef struct drm_i915_sarea {
+       struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
+       int last_upload;        /* last time texture was uploaded */
+       int last_enqueue;       /* last time a buffer was enqueued */
+       int last_dispatch;      /* age of the most recently dispatched buffer */
+       int ctxOwner;           /* last context to upload state */
+       int texAge;
+       int pf_enabled;         /* is pageflipping allowed? */
+       int pf_active;
+       int pf_current_page;    /* which buffer is being displayed? */
+       int perf_boxes;         /* performance boxes to be displayed */
+       int width, height;      /* screen size in pixels */
+
+       drm_handle_t front_handle;
+       int front_offset;
+       int front_size;
+
+       drm_handle_t back_handle;
+       int back_offset;
+       int back_size;
+
+       drm_handle_t depth_handle;
+       int depth_offset;
+       int depth_size;
+
+       drm_handle_t tex_handle;
+       int tex_offset;
+       int tex_size;
+       int log_tex_granularity;
+       int pitch;
+       int rotation;           /* 0, 90, 180 or 270 */
+       int rotated_offset;
+       int rotated_size;
+       int rotated_pitch;
+       int virtualX, virtualY;
+
+       unsigned int front_tiled;
+       unsigned int back_tiled;
+       unsigned int depth_tiled;
+       unsigned int rotated_tiled;
+       unsigned int rotated2_tiled;
+
+       int planeA_x;
+       int planeA_y;
+       int planeA_w;
+       int planeA_h;
+       int planeB_x;
+       int planeB_y;
+       int planeB_w;
+       int planeB_h;
+
+       /* Triple buffering */
+       drm_handle_t third_handle;
+       int third_offset;
+       int third_size;
+       unsigned int third_tiled;
+} drm_i915_sarea_t;
+
+/* Driver specific fence types and classes.
+ */
+
+/* The only fence class we support */
+#define DRM_I915_FENCE_CLASS_ACCEL 0
+/* Fence type that guarantees read-write flush */
+#define DRM_I915_FENCE_TYPE_RW 2
+/* MI_FLUSH programmed just before the fence */
+#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
+
+/* Flags for perf_boxes
+ */
+#define I915_BOX_RING_EMPTY    0x1
+#define I915_BOX_FLIP          0x2
+#define I915_BOX_WAIT          0x4
+#define I915_BOX_TEXTURE_LOAD  0x8
+#define I915_BOX_LOST_CONTEXT  0x10
+
+/* I915 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_I915_INIT          0x00
+#define DRM_I915_FLUSH         0x01
+#define DRM_I915_FLIP          0x02
+#define DRM_I915_BATCHBUFFER   0x03
+#define DRM_I915_IRQ_EMIT      0x04
+#define DRM_I915_IRQ_WAIT      0x05
+#define DRM_I915_GETPARAM      0x06
+#define DRM_I915_SETPARAM      0x07
+#define DRM_I915_ALLOC         0x08
+#define DRM_I915_FREE          0x09
+#define DRM_I915_INIT_HEAP     0x0a
+#define DRM_I915_CMDBUFFER     0x0b
+#define DRM_I915_DESTROY_HEAP  0x0c
+#define DRM_I915_SET_VBLANK_PIPE       0x0d
+#define DRM_I915_GET_VBLANK_PIPE       0x0e
+#define DRM_I915_VBLANK_SWAP   0x0f
+#define DRM_I915_MMIO          0x10
+#define DRM_I915_HWS_ADDR      0x11
+#define DRM_I915_EXECBUFFER    0x12
+
+#define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+#define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+#define DRM_IOCTL_I915_FLIP            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
+#define DRM_IOCTL_I915_BATCHBUFFER     DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+#define DRM_IOCTL_I915_CMDBUFFER       DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+#define DRM_IOCTL_I915_DESTROY_HEAP    DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_EXECBUFFER      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
+
+/* Asynchronous page flipping:
+ */
+typedef struct drm_i915_flip {
+       /*
+        * This is really talking about planes, and we could rename it
+        * except for the fact that some of the duplicated i915_drm.h files
+        * out there check for HAVE_I915_FLIP and so might pick up this
+        * version.
+        */
+       int pipes;
+} drm_i915_flip_t;
+
+/* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+ */
+typedef struct drm_i915_batchbuffer {
+       int start;              /* agp offset */
+       int used;               /* nr bytes in use */
+       int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
+       int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
+       int num_cliprects;      /* mulitpass with multiple cliprects? */
+       struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer_t;
+
+/* As above, but pass a pointer to userspace buffer which can be
+ * validated by the kernel prior to sending to hardware.
+ */
+typedef struct drm_i915_cmdbuffer {
+       char __user *buf;       /* pointer to userspace command buffer */
+       int sz;                 /* nr bytes in buf */
+       int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
+       int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
+       int num_cliprects;      /* mulitpass with multiple cliprects? */
+       struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer_t;
+
+/* Userspace can request & wait on irq's:
+ */
+typedef struct drm_i915_irq_emit {
+       int __user *irq_seq;
+} drm_i915_irq_emit_t;
+
+typedef struct drm_i915_irq_wait {
+       int irq_seq;
+} drm_i915_irq_wait_t;
+
+/* Ioctl to query kernel params:
+ */
+#define I915_PARAM_IRQ_ACTIVE            1
+#define I915_PARAM_ALLOW_BATCHBUFFER     2
+#define I915_PARAM_LAST_DISPATCH         3
+
+typedef struct drm_i915_getparam {
+       int param;
+       int __user *value;
+} drm_i915_getparam_t;
+
+/* Ioctl to set kernel params:
+ */
+#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
+#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
+#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
+
+typedef struct drm_i915_setparam {
+       int param;
+       int value;
+} drm_i915_setparam_t;
+
+/* A memory manager for regions of shared memory:
+ */
+#define I915_MEM_REGION_AGP 1
+
+typedef struct drm_i915_mem_alloc {
+       int region;
+       int alignment;
+       int size;
+       int __user *region_offset;      /* offset from start of fb or agp */
+} drm_i915_mem_alloc_t;
+
+typedef struct drm_i915_mem_free {
+       int region;
+       int region_offset;
+} drm_i915_mem_free_t;
+
+typedef struct drm_i915_mem_init_heap {
+       int region;
+       int size;
+       int start;
+} drm_i915_mem_init_heap_t;
+
+/* Allow memory manager to be torn down and re-initialized (eg on
+ * rotate):
+ */
+typedef struct drm_i915_mem_destroy_heap {
+               int region;
+} drm_i915_mem_destroy_heap_t;
+
+/* Allow X server to configure which pipes to monitor for vblank signals
+ */
+#define        DRM_I915_VBLANK_PIPE_A  1
+#define        DRM_I915_VBLANK_PIPE_B  2
+
+typedef struct drm_i915_vblank_pipe {
+       int pipe;
+} drm_i915_vblank_pipe_t;
+
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+       drm_drawable_t drawable;
+       enum drm_vblank_seq_type seqtype;
+       unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
+#define I915_MMIO_READ 0
+#define I915_MMIO_WRITE 1
+
+#define I915_MMIO_MAY_READ     0x1
+#define I915_MMIO_MAY_WRITE    0x2
+
+#define MMIO_REGS_IA_PRIMATIVES_COUNT          0
+#define MMIO_REGS_IA_VERTICES_COUNT            1
+#define MMIO_REGS_VS_INVOCATION_COUNT          2
+#define MMIO_REGS_GS_PRIMITIVES_COUNT          3
+#define MMIO_REGS_GS_INVOCATION_COUNT          4
+#define MMIO_REGS_CL_PRIMITIVES_COUNT          5
+#define MMIO_REGS_CL_INVOCATION_COUNT          6
+#define MMIO_REGS_PS_INVOCATION_COUNT          7
+#define MMIO_REGS_PS_DEPTH_COUNT               8
+
+typedef struct drm_i915_mmio_entry {
+       unsigned int flag;
+       unsigned int offset;
+       unsigned int size;
+} drm_i915_mmio_entry_t;
+
+typedef struct drm_i915_mmio {
+       unsigned int read_write:1;
+       unsigned int reg:31;
+       void __user *data;
+} drm_i915_mmio_t;
+
+typedef struct drm_i915_hws_addr {
+       uint64_t addr;
+} drm_i915_hws_addr_t;
+
+/*
+ * Relocation header is 4 uint32_ts
+ * 0 - (16-bit relocation type << 16)| 16 bit reloc count
+ * 1 - buffer handle for another list of relocs
+ * 2-3 - spare.
+ */
+#define I915_RELOC_HEADER 4
+
+/*
+ * type 0 relocation has 4-uint32_t stride
+ * 0 - offset into buffer
+ * 1 - delta to add in
+ * 2 - index into buffer list
+ * 3 - reserved (for optimisations later).
+ */
+#define I915_RELOC_TYPE_0 0
+#define I915_RELOC0_STRIDE 4
+
+struct drm_i915_op_arg {
+       uint64_t next;
+       uint32_t reloc_handle;
+       int handled;
+       union {
+               struct drm_bo_op_req req;
+               struct drm_bo_arg_rep rep;
+       } d;
+
+};
+
+struct drm_i915_execbuffer {
+       uint64_t ops_list;
+       uint32_t num_buffers;
+       struct drm_i915_batchbuffer batch;
+       drm_context_t context; /* for lockless use in the future */
+       struct drm_fence_arg fence_arg;
+};
+
+#endif                         /* _I915_DRM_H_ */
diff --git a/psb-kernel-source-4.41.1/i915_drv.c b/psb-kernel-source-4.41.1/i915_drv.c
new file mode 100644 (file)
index 0000000..dbaeb38
--- /dev/null
@@ -0,0 +1,607 @@
+/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+       i915_PCI_IDS
+};
+
+#ifdef I915_HAVE_FENCE
+extern struct drm_fence_driver i915_fence_driver;
+#endif
+
+#ifdef I915_HAVE_BUFFER
+
+static uint32_t i915_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
+static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
+
+static struct drm_bo_driver i915_bo_driver = {
+       .mem_type_prio = i915_mem_prios,
+       .mem_busy_prio = i915_busy_prios,
+       .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
+       .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
+       .create_ttm_backend_entry = i915_create_ttm_backend_entry,
+       .fence_type = i915_fence_types,
+       .invalidate_caches = i915_invalidate_caches,
+       .init_mem_type = i915_init_mem_type,
+       .evict_mask = i915_evict_mask,
+       .move = i915_move,
+       .ttm_cache_flush = i915_flush_ttm,
+       .command_stream_barrier = NULL,
+};
+#endif
+
+enum pipe {
+    PIPE_A = 0,
+    PIPE_B,
+};
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (pipe == PIPE_A)
+               return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
+       else
+               return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+       u32 *array;
+       int i;
+
+       if (!i915_pipe_enabled(dev, pipe))
+               return;
+
+       if (pipe == PIPE_A)
+               array = dev_priv->save_palette_a;
+       else
+               array = dev_priv->save_palette_b;
+
+       for(i = 0; i < 256; i++)
+               array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+       u32 *array;
+       int i;
+
+       if (!i915_pipe_enabled(dev, pipe))
+               return;
+
+       if (pipe == PIPE_A)
+               array = dev_priv->save_palette_a;
+       else
+               array = dev_priv->save_palette_b;
+
+       for(i = 0; i < 256; i++)
+               I915_WRITE(reg + (i << 2), array[i]);
+}
+
+static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
+{
+       outb(reg, index_port);
+       return inb(data_port);
+}
+
+static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
+{
+       inb(st01);
+       outb(palette_enable | reg, VGA_AR_INDEX);
+       return inb(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
+{
+       inb(st01);
+       outb(palette_enable | reg, VGA_AR_INDEX);
+       outb(val, VGA_AR_DATA_WRITE);
+}
+
+static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+       outb(reg, index_port);
+       outb(val, data_port);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+       u16 cr_index, cr_data, st01;
+
+       /* VGA color palette registers */
+       dev_priv->saveDACMASK = inb(VGA_DACMASK);
+       /* DACCRX automatically increments during read */
+       outb(0, VGA_DACRX);
+       /* Read 3 bytes of color data from each index */
+       for (i = 0; i < 256 * 3; i++)
+               dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
+
+       /* MSR bits */
+       dev_priv->saveMSR = inb(VGA_MSR_READ);
+       if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+               cr_index = VGA_CR_INDEX_CGA;
+               cr_data = VGA_CR_DATA_CGA;
+               st01 = VGA_ST01_CGA;
+       } else {
+               cr_index = VGA_CR_INDEX_MDA;
+               cr_data = VGA_CR_DATA_MDA;
+               st01 = VGA_ST01_MDA;
+       }
+
+       /* CRT controller regs */
+       i915_write_indexed(cr_index, cr_data, 0x11,
+                          i915_read_indexed(cr_index, cr_data, 0x11) &
+                          (~0x80));
+       for (i = 0; i < 0x24; i++)
+               dev_priv->saveCR[i] =
+                       i915_read_indexed(cr_index, cr_data, i);
+       /* Make sure we don't turn off CR group 0 writes */
+       dev_priv->saveCR[0x11] &= ~0x80;
+
+       /* Attribute controller registers */
+       inb(st01);
+       dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
+       for (i = 0; i < 20; i++)
+               dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
+       inb(st01);
+       outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
+
+       /* Graphics controller registers */
+       for (i = 0; i < 9; i++)
+               dev_priv->saveGR[i] =
+                       i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
+
+       dev_priv->saveGR[0x10] =
+               i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+       dev_priv->saveGR[0x11] =
+               i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+       dev_priv->saveGR[0x18] =
+               i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+       /* Sequencer registers */
+       for (i = 0; i < 8; i++)
+               dev_priv->saveSR[i] =
+                       i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+       u16 cr_index, cr_data, st01;
+
+       /* MSR bits */
+       outb(dev_priv->saveMSR, VGA_MSR_WRITE);
+       if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+               cr_index = VGA_CR_INDEX_CGA;
+               cr_data = VGA_CR_DATA_CGA;
+               st01 = VGA_ST01_CGA;
+       } else {
+               cr_index = VGA_CR_INDEX_MDA;
+               cr_data = VGA_CR_DATA_MDA;
+               st01 = VGA_ST01_MDA;
+       }
+
+       /* Sequencer registers, don't write SR07 */
+       for (i = 0; i < 7; i++)
+               i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
+                                  dev_priv->saveSR[i]);
+
+       /* CRT controller regs */
+       /* Enable CR group 0 writes */
+       i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+       for (i = 0; i < 0x24; i++)
+               i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
+
+       /* Graphics controller regs */
+       for (i = 0; i < 9; i++)
+               i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
+                                  dev_priv->saveGR[i]);
+
+       i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+                          dev_priv->saveGR[0x10]);
+       i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+                          dev_priv->saveGR[0x11]);
+       i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+                          dev_priv->saveGR[0x18]);
+
+       /* Attribute controller registers */
+       for (i = 0; i < 20; i++)
+               i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
+       inb(st01); /* switch back to index mode */
+       outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
+
+       /* VGA color palette registers */
+       outb(dev_priv->saveDACMASK, VGA_DACMASK);
+       /* DACCRX automatically increments during read */
+       outb(0, VGA_DACWX);
+       /* Read 3 bytes of color data from each index */
+       for (i = 0; i < 256 * 3; i++)
+               outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
+
+}
+
+static int i915_suspend(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       if (!dev || !dev_priv) {
+               printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
+               printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
+               return -ENODEV;
+       }
+
+       pci_save_state(dev->pdev);
+       pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+       /* Pipe & plane A info */
+       dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+       dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+       dev_priv->saveFPA0 = I915_READ(FPA0);
+       dev_priv->saveFPA1 = I915_READ(FPA1);
+       dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+       if (IS_I965G(dev))
+               dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+       dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+       dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+       dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
+       dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+       dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+       dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+       dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+       dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
+       dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+       dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+       dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+       dev_priv->saveDSPABASE = I915_READ(DSPABASE);
+       if (IS_I965G(dev)) {
+               dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+               dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+       }
+       i915_save_palette(dev, PIPE_A);
+
+       /* Pipe & plane B info */
+       dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+       dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+       dev_priv->saveFPB0 = I915_READ(FPB0);
+       dev_priv->saveFPB1 = I915_READ(FPB1);
+       dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+       if (IS_I965G(dev))
+               dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+       dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+       dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+       dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
+       dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+       dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+       dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+       dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+       dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
+       dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+       dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+       dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+       dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
+       if (IS_I965GM(dev)) {
+               dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+               dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+       }
+       i915_save_palette(dev, PIPE_B);
+
+       /* CRT state */
+       dev_priv->saveADPA = I915_READ(ADPA);
+
+       /* LVDS state */
+       dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+       dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+       dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+       if (IS_I965G(dev))
+               dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+       if (IS_MOBILE(dev) && !IS_I830(dev))
+               dev_priv->saveLVDS = I915_READ(LVDS);
+       if (!IS_I830(dev) && !IS_845G(dev))
+               dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+       dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
+       dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
+       dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
+
+       /* FIXME: save TV & SDVO state */
+
+       /* FBC state */
+       dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+       dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+       dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+       dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+
+       /* VGA state */
+       dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
+       dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
+       dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
+       dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+       /* Scratch space */
+       for (i = 0; i < 16; i++) {
+               dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
+               dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+       }
+       for (i = 0; i < 3; i++)
+               dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+       i915_save_vga(dev);
+
+       /* Shut down the device */
+       pci_disable_device(dev->pdev);
+       pci_set_power_state(dev->pdev, PCI_D3hot);
+
+       return 0;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       pci_set_power_state(dev->pdev, PCI_D0);
+       pci_restore_state(dev->pdev);
+       if (pci_enable_device(dev->pdev))
+               return -1;
+
+       pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+       /* Pipe & plane A info */
+       /* Prime the clock */
+       if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+               I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+                          ~DPLL_VCO_ENABLE);
+               udelay(150);
+       }
+       I915_WRITE(FPA0, dev_priv->saveFPA0);
+       I915_WRITE(FPA1, dev_priv->saveFPA1);
+       /* Actually enable it */
+       I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+       udelay(150);
+       if (IS_I965G(dev))
+               I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+       udelay(150);
+
+       /* Restore mode */
+       I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+       I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+       I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+       I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+       I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+       I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+       I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+       /* Restore plane info */
+       I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+       I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+       I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+       I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
+       I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+       if (IS_I965G(dev)) {
+               I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+               I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+       }
+
+       if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) &&
+           (dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS))
+               I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+       i915_restore_palette(dev, PIPE_A);
+       /* Enable the plane */
+       I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+       I915_WRITE(DSPABASE, I915_READ(DSPABASE));
+
+       /* Pipe & plane B info */
+       if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+               I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+                          ~DPLL_VCO_ENABLE);
+               udelay(150);
+       }
+       I915_WRITE(FPB0, dev_priv->saveFPB0);
+       I915_WRITE(FPB1, dev_priv->saveFPB1);
+       /* Actually enable it */
+       I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+       udelay(150);
+       if (IS_I965G(dev))
+               I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+       udelay(150);
+
+       /* Restore mode */
+       I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+       I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+       I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+       I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+       I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+       I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+       I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+       /* Restore plane info */
+       I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+       I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+       I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+       I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
+       I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+       if (IS_I965G(dev)) {
+               I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+               I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+       }
+
+       if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) &&
+           (dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS))
+               I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+       i915_restore_palette(dev, PIPE_A);
+       /* Enable the plane */
+       I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+       I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
+
+       /* CRT state */
+       I915_WRITE(ADPA, dev_priv->saveADPA);
+
+       /* LVDS state */
+       if (IS_I965G(dev))
+               I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+       if (IS_MOBILE(dev) && !IS_I830(dev))
+               I915_WRITE(LVDS, dev_priv->saveLVDS);
+       if (!IS_I830(dev) && !IS_845G(dev))
+               I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+
+       I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+       I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+       I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
+       I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
+       I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
+       I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+
+       /* FIXME: restore TV & SDVO state */
+
+       /* FBC info */
+       I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+       I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+       I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+       I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+
+       /* VGA state */
+       I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+       I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
+       I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
+       I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
+       udelay(150);
+
+       for (i = 0; i < 16; i++) {
+               I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
+               I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+       }
+       for (i = 0; i < 3; i++)
+               I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+       i915_restore_vga(dev);
+
+       return 0;
+}
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static struct drm_driver driver = {
+       /* don't use mtrr's here, the Xserver or user space app should
+        * deal with them for intel hardware.
+        */
+       .driver_features =
+           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
+           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
+           DRIVER_IRQ_VBL2,
+       .load = i915_driver_load,
+       .unload = i915_driver_unload,
+       .lastclose = i915_driver_lastclose,
+       .preclose = i915_driver_preclose,
+       .suspend = i915_suspend,
+       .resume = i915_resume,
+       .device_is_agp = i915_driver_device_is_agp,
+       .vblank_wait = i915_driver_vblank_wait,
+       .vblank_wait2 = i915_driver_vblank_wait2,
+       .irq_preinstall = i915_driver_irq_preinstall,
+       .irq_postinstall = i915_driver_irq_postinstall,
+       .irq_uninstall = i915_driver_irq_uninstall,
+       .irq_handler = i915_driver_irq_handler,
+       .reclaim_buffers = drm_core_reclaim_buffers,
+       .get_map_ofs = drm_core_get_map_ofs,
+       .get_reg_ofs = drm_core_get_reg_ofs,
+       .fb_probe = intelfb_probe,
+       .fb_remove = intelfb_remove,
+       .ioctls = i915_ioctls,
+       .fops = {
+               .owner = THIS_MODULE,
+               .open = drm_open,
+               .release = drm_release,
+               .ioctl = drm_ioctl,
+               .mmap = drm_mmap,
+               .poll = drm_poll,
+               .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
+               .compat_ioctl = i915_compat_ioctl,
+#endif
+               },
+       .pci_driver = {
+               .name = DRIVER_NAME,
+               .id_table = pciidlist,
+               .probe = probe,
+               .remove = __devexit_p(drm_cleanup_pci),
+               },
+#ifdef I915_HAVE_FENCE
+       .fence_driver = &i915_fence_driver,
+#endif
+#ifdef I915_HAVE_BUFFER
+       .bo_driver = &i915_bo_driver,
+#endif
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       return drm_get_dev(pdev, ent, &driver);
+}
+
+static int __init i915_init(void)
+{
+       driver.num_ioctls = i915_max_ioctl;
+       return drm_init(&driver, pciidlist);
+}
+
+static void __exit i915_exit(void)
+{
+       drm_exit(&driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/psb-kernel-source-4.41.1/i915_drv.h b/psb-kernel-source-4.41.1/i915_drv.h
new file mode 100644 (file)
index 0000000..8399018
--- /dev/null
@@ -0,0 +1,795 @@
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRV_H_
+#define _I915_DRV_H_
+
+#include "i915_reg.h"
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR          "Tungsten Graphics, Inc."
+
+#define DRIVER_NAME            "i915"
+#define DRIVER_DESC            "Intel Graphics"
+#define DRIVER_DATE            "20070209"
+
+#if defined(__linux__)
+#define I915_HAVE_FENCE
+#define I915_HAVE_BUFFER
+#endif
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ *      - Support vertical blank on secondary display pipe
+ * 1.8: New ioctl for ARB_Occlusion_Query
+ * 1.9: Usable page flipping and triple buffering
+ * 1.10: Plane/pipe disentangling
+ * 1.11: TTM superioctl
+ */
+#define DRIVER_MAJOR           1
+#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
+#define DRIVER_MINOR           11
+#else
+#define DRIVER_MINOR           6
+#endif
+#define DRIVER_PATCHLEVEL      0
+
+#define DRM_DRIVER_PRIVATE_T struct drm_i915_private
+
+#ifdef I915_HAVE_BUFFER
+#define I915_MAX_VALIDATE_BUFFERS 4096
+#endif
+
+struct drm_i915_ring_buffer {
+       int tail_mask;
+       unsigned long Start;
+       unsigned long End;
+       unsigned long Size;
+       u8 *virtual_start;
+       int head;
+       int tail;
+       int space;
+       drm_local_map_t map;
+};
+
+struct mem_block {
+       struct mem_block *next;
+       struct mem_block *prev;
+       int start;
+       int size;
+       struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+struct drm_i915_vbl_swap {
+       struct list_head head;
+       drm_drawable_t drw_id;
+       unsigned int plane;
+       unsigned int sequence;
+       int flip;
+};
+
+struct drm_i915_private {
+       struct drm_buffer_object *ring_buffer;
+       drm_local_map_t *sarea;
+       drm_local_map_t *mmio_map;
+
+       unsigned long mmiobase;
+       unsigned long mmiolen;
+
+       struct drm_i915_sarea *sarea_priv;
+       struct drm_i915_ring_buffer ring;
+
+       struct drm_dma_handle *status_page_dmah;
+       void *hw_status_page;
+       dma_addr_t dma_status_page;
+       uint32_t counter;
+       unsigned int status_gfx_addr;
+       drm_local_map_t hws_map;
+
+       unsigned int cpp;
+       int use_mi_batchbuffer_start;
+
+       wait_queue_head_t irq_queue;
+       atomic_t irq_received;
+       atomic_t irq_emitted;
+
+       int tex_lru_log_granularity;
+       int allow_batchbuffer;
+       struct mem_block *agp_heap;
+       unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
+       int vblank_pipe;
+       DRM_SPINTYPE user_irq_lock;
+       int user_irq_refcount;
+       int fence_irq_on;
+       uint32_t irq_enable_reg;
+       int irq_enabled;
+
+#ifdef I915_HAVE_FENCE
+       uint32_t flush_sequence;
+       uint32_t flush_flags;
+       uint32_t flush_pending;
+       uint32_t saved_flush_status;
+       uint32_t reported_sequence;
+       int reported_sequence_valid;
+#endif
+#ifdef I915_HAVE_BUFFER
+       void *agp_iomap;
+       unsigned int max_validate_buffers;
+       struct mutex cmdbuf_mutex;
+#endif
+
+       DRM_SPINTYPE swaps_lock;
+       struct drm_i915_vbl_swap vbl_swaps;
+       unsigned int swaps_pending;
+
+       /* LVDS info */
+       int backlight_duty_cycle;  /* restore backlight to this value */
+       bool panel_wants_dither;
+       struct drm_display_mode *panel_fixed_mode;
+
+       /* Register state */
+       u8 saveLBB;
+       u32 saveDSPACNTR;
+       u32 saveDSPBCNTR;
+       u32 savePIPEACONF;
+       u32 savePIPEBCONF;
+       u32 savePIPEASRC;
+       u32 savePIPEBSRC;
+       u32 saveFPA0;
+       u32 saveFPA1;
+       u32 saveDPLL_A;
+       u32 saveDPLL_A_MD;
+       u32 saveHTOTAL_A;
+       u32 saveHBLANK_A;
+       u32 saveHSYNC_A;
+       u32 saveVTOTAL_A;
+       u32 saveVBLANK_A;
+       u32 saveVSYNC_A;
+       u32 saveBCLRPAT_A;
+       u32 saveDSPASTRIDE;
+       u32 saveDSPASIZE;
+       u32 saveDSPAPOS;
+       u32 saveDSPABASE;
+       u32 saveDSPASURF;
+       u32 saveDSPATILEOFF;
+       u32 savePFIT_PGM_RATIOS;
+       u32 saveBLC_PWM_CTL;
+       u32 saveBLC_PWM_CTL2;
+       u32 saveFPB0;
+       u32 saveFPB1;
+       u32 saveDPLL_B;
+       u32 saveDPLL_B_MD;
+       u32 saveHTOTAL_B;
+       u32 saveHBLANK_B;
+       u32 saveHSYNC_B;
+       u32 saveVTOTAL_B;
+       u32 saveVBLANK_B;
+       u32 saveVSYNC_B;
+       u32 saveBCLRPAT_B;
+       u32 saveDSPBSTRIDE;
+       u32 saveDSPBSIZE;
+       u32 saveDSPBPOS;
+       u32 saveDSPBBASE;
+       u32 saveDSPBSURF;
+       u32 saveDSPBTILEOFF;
+       u32 saveVCLK_DIVISOR_VGA0;
+       u32 saveVCLK_DIVISOR_VGA1;
+       u32 saveVCLK_POST_DIV;
+       u32 saveVGACNTRL;
+       u32 saveADPA;
+       u32 saveLVDS;
+       u32 saveLVDSPP_ON;
+       u32 saveLVDSPP_OFF;
+       u32 saveDVOA;
+       u32 saveDVOB;
+       u32 saveDVOC;
+       u32 savePP_ON;
+       u32 savePP_OFF;
+       u32 savePP_CONTROL;
+       u32 savePP_CYCLE;
+       u32 savePFIT_CONTROL;
+       u32 save_palette_a[256];
+       u32 save_palette_b[256];
+       u32 saveFBC_CFB_BASE;
+       u32 saveFBC_LL_BASE;
+       u32 saveFBC_CONTROL;
+       u32 saveFBC_CONTROL2;
+       u32 saveSWF0[16];
+       u32 saveSWF1[16];
+       u32 saveSWF2[3];
+       u8 saveMSR;
+       u8 saveSR[8];
+       u8 saveGR[24];
+       u8 saveAR_INDEX;
+       u8 saveAR[20];
+       u8 saveDACMASK;
+       u8 saveDACDATA[256*3]; /* 256 3-byte colors */
+       u8 saveCR[36];
+};
+
+enum intel_chip_family {
+       CHIP_I8XX = 0x01,
+       CHIP_I9XX = 0x02,
+       CHIP_I915 = 0x04,
+       CHIP_I965 = 0x08,
+       CHIP_POULSBO = 0x10,
+};
+
+extern struct drm_ioctl_desc i915_ioctls[];
+extern int i915_max_ioctl;
+
+                               /* i915_dma.c */
+extern void i915_kernel_lost_context(struct drm_device * dev);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *dev);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device *dev,
+                                struct drm_file *file_priv);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+                             unsigned long arg);
+extern void i915_emit_breadcrumb(struct drm_device *dev);
+extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
+extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
+extern int i915_driver_firstopen(struct drm_device *dev);
+extern int i915_do_cleanup_pageflip(struct drm_device *dev);
+extern int i915_dma_cleanup(struct drm_device *dev);
+
+/* i915_irq.c */
+extern int i915_irq_emit(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+extern int i915_irq_wait(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+
+extern void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe);
+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
+extern void i915_driver_irq_preinstall(struct drm_device * dev);
+extern void i915_driver_irq_postinstall(struct drm_device * dev);
+extern void i915_driver_irq_uninstall(struct drm_device * dev);
+extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+extern int i915_emit_irq(struct drm_device * dev);
+extern void i915_user_irq_on(struct drm_i915_private *dev_priv);
+extern void i915_user_irq_off(struct drm_i915_private *dev_priv);
+extern void i915_enable_interrupt (struct drm_device *dev);
+extern int i915_vblank_swap(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
+
+/* i915_mem.c */
+extern int i915_mem_alloc(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+extern int i915_mem_free(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+extern int i915_mem_init_heap(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+                                struct drm_file *file_priv);
+extern void i915_mem_takedown(struct mem_block **heap);
+extern void i915_mem_release(struct drm_device * dev,
+                            struct drm_file *file_priv,
+                            struct mem_block *heap);
+#ifdef I915_HAVE_FENCE
+/* i915_fence.c */
+extern void i915_fence_handler(struct drm_device *dev);
+extern void i915_invalidate_reported_sequence(struct drm_device *dev);
+
+#endif
+
+#ifdef I915_HAVE_BUFFER
+/* i915_buffer.c */
+extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
+extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
+                           uint32_t *type);
+extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
+extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
+                              struct drm_mem_type_manager *man);
+extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
+extern int i915_move(struct drm_buffer_object *bo, int evict,
+               int no_wait, struct drm_bo_mem_reg *new_mem);
+void i915_flush_ttm(struct drm_ttm *ttm);
+#endif
+
+#ifdef __linux__
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+extern void intel_init_chipset_flush_compat(struct drm_device *dev);
+extern void intel_fini_chipset_flush_compat(struct drm_device *dev);
+#endif
+#endif
+
+
+/* modesetting */
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+
+
+#define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
+#define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+#define I915_READ16(reg)       DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_WRITE16(reg,val)  DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+
+#define I915_VERBOSE 0
+
+#define RING_LOCALS    unsigned int outring, ringmask, outcount; \
+                       volatile char *virt;
+
+#define BEGIN_LP_RING(n) do {                          \
+       if (I915_VERBOSE)                               \
+               DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n",  \
+                                (n), __FUNCTION__);           \
+       if (dev_priv->ring.space < (n)*4)                      \
+               i915_wait_ring(dev, (n)*4, __FUNCTION__);      \
+       outcount = 0;                                   \
+       outring = dev_priv->ring.tail;                  \
+       ringmask = dev_priv->ring.tail_mask;            \
+       virt = dev_priv->ring.virtual_start;            \
+} while (0)
+
+#define OUT_RING(n) do {                                       \
+       if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
+       *(volatile unsigned int *)(virt + outring) = (n);               \
+       outcount++;                                             \
+       outring += 4;                                           \
+       outring &= ringmask;                                    \
+} while (0)
+
+#define ADVANCE_LP_RING() do {                                         \
+       if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);   \
+       dev_priv->ring.tail = outring;                                  \
+       dev_priv->ring.space -= outcount * 4;                           \
+       I915_WRITE(LP_RING + RING_TAIL, outring);                       \
+} while(0)
+
+#define MI_NOOP        (0x00 << 23)
+
+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ */
+#define INTEL_GMCH_CTRL                0x52
+#define INTEL_GMCH_ENABLED     0x4
+#define INTEL_GMCH_MEM_MASK    0x1
+#define INTEL_GMCH_MEM_64M     0x1
+#define INTEL_GMCH_MEM_128M    0
+
+#define INTEL_855_GMCH_GMS_MASK                (0x7 << 4)
+#define INTEL_855_GMCH_GMS_DISABLED    (0x0 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_1M   (0x1 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_4M   (0x2 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_8M   (0x3 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_16M  (0x4 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_32M  (0x5 << 4)
+
+#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
+
+/* Extended config space */
+#define LBB 0xf4
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define   VGA_MSR_MEM_EN (1<<1)
+#define   VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define   VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define   VGA_GR_MEM_READ_MODE_SHIFT 3
+#define     VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define   VGA_GR_MEM_MODE_MASK 0xc
+#define   VGA_GR_MEM_MODE_SHIFT 2
+#define   VGA_GR_MEM_A0000_AFFFF 0
+#define   VGA_GR_MEM_A0000_BFFFF 1
+#define   VGA_GR_MEM_B0000_B7FFF 2
+#define   VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+#define GFX_OP_USER_INTERRUPT          ((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT    ((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD                        (7<<23)
+#define CMD_STORE_DWORD_IDX            ((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
+
+#define CMD_MI_FLUSH         (0x04 << 23)
+#define MI_NO_WRITE_FLUSH    (1 << 2)
+#define MI_READ_FLUSH        (1 << 0)
+#define MI_EXE_FLUSH         (1 << 1)
+#define MI_END_SCENE         (1 << 4) /* flush binner and incr scene count */
+#define MI_SCENE_COUNT       (1 << 3) /* just increment scene count */
+
+/* Packet to load a register value from the ring/batch command stream:
+ */
+#define CMD_MI_LOAD_REGISTER_IMM       ((0x22 << 23)|0x1)
+
+#define BB1_START_ADDR_MASK   (~0x7)
+#define BB1_PROTECTED         (1<<0)
+#define BB1_UNPROTECTED       (0<<0)
+#define BB2_END_ADDR_MASK     (~0x7)
+
+#define I915REG_HWS_PGA                0x02080
+
+/* Framebuffer compression */
+#define FBC_CFB_BASE           0x03200 /* 4k page aligned */
+#define FBC_LL_BASE            0x03204 /* 4k page aligned */
+#define FBC_CONTROL            0x03208
+#define   FBC_CTL_EN           (1<<31)
+#define   FBC_CTL_PERIODIC     (1<<30)
+#define   FBC_CTL_INTERVAL_SHIFT (16)
+#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_CTL_STRIDE_SHIFT (5)
+#define   FBC_CTL_FENCENO      (1<<0)
+#define FBC_COMMAND            0x0320c
+#define   FBC_CMD_COMPRESS     (1<<0)
+#define FBC_STATUS             0x03210
+#define   FBC_STAT_COMPRESSING (1<<31)
+#define   FBC_STAT_COMPRESSED  (1<<30)
+#define   FBC_STAT_MODIFIED    (1<<29)
+#define   FBC_STAT_CURRENT_LINE        (1<<0)
+#define FBC_CONTROL2           0x03214
+#define   FBC_CTL_FENCE_DBL    (0<<4)
+#define   FBC_CTL_IDLE_IMM     (0<<2)
+#define   FBC_CTL_IDLE_FULL    (1<<2)
+#define   FBC_CTL_IDLE_LINE    (2<<2)
+#define   FBC_CTL_IDLE_DEBUG   (3<<2)
+#define   FBC_CTL_CPU_FENCE    (1<<1)
+#define   FBC_CTL_PLANEA       (0<<0)
+#define   FBC_CTL_PLANEB       (1<<0)
+#define FBC_FENCE_OFF          0x0321b
+
+#define FBC_LL_SIZE            (1536)
+#define FBC_LL_PAD             (32)
+
+/* Interrupt bits:
+ */
+#define USER_INT_FLAG    (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+#define HWB_OOM_FLAG     (1<<13) /* binner out of memory */
+
+#define I915REG_HWSTAM         0x02098
+#define I915REG_INT_IDENTITY_R 0x020a4
+#define I915REG_INT_MASK_R     0x020a8
+#define I915REG_INT_ENABLE_R   0x020a0
+#define I915REG_INSTPM         0x020c0
+
+#define I915REG_PIPEASTAT      0x70024
+#define I915REG_PIPEBSTAT      0x71024
+
+#define I915_VBLANK_INTERRUPT_ENABLE   (1UL<<17)
+#define I915_VBLANK_CLEAR              (1UL<<1)
+
+#define GPIOA                  0x5010
+#define GPIOB                  0x5014
+#define GPIOC                  0x5018
+#define GPIOD                  0x501c
+#define GPIOE                  0x5020
+#define GPIOF                  0x5024
+#define GPIOG                  0x5028
+#define GPIOH                  0x502c
+# define GPIO_CLOCK_DIR_MASK           (1 << 0)
+# define GPIO_CLOCK_DIR_IN             (0 << 1)
+# define GPIO_CLOCK_DIR_OUT            (1 << 1)
+# define GPIO_CLOCK_VAL_MASK           (1 << 2)
+# define GPIO_CLOCK_VAL_OUT            (1 << 3)
+# define GPIO_CLOCK_VAL_IN             (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE     (1 << 5)
+# define GPIO_DATA_DIR_MASK            (1 << 8)
+# define GPIO_DATA_DIR_IN              (0 << 9)
+# define GPIO_DATA_DIR_OUT             (1 << 9)
+# define GPIO_DATA_VAL_MASK            (1 << 10)
+# define GPIO_DATA_VAL_OUT             (1 << 11)
+# define GPIO_DATA_VAL_IN              (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE      (1 << 13)
+
+/* p317, 319
+ */
+#define VCLK2_VCO_M        0x6008 /* treat as 16 bit? (includes msbs) */
+#define VCLK2_VCO_N        0x600a
+#define VCLK2_VCO_DIV_SEL  0x6012
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV      0x6010
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA1_PD_P2_DIV_4      (1 << 15)
+/** Overrides the p2 post divisor field */
+# define VGA1_PD_P1_DIV_2      (1 << 13)
+# define VGA1_PD_P1_SHIFT      8
+/** P1 value is 2 greater than this field */
+# define VGA1_PD_P1_MASK       (0x1f << 8)
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA0_PD_P2_DIV_4      (1 << 7)
+/** Overrides the p2 post divisor field */
+# define VGA0_PD_P1_DIV_2      (1 << 5)
+# define VGA0_PD_P1_SHIFT      0
+/** P1 value is 2 greater than this field */
+# define VGA0_PD_P1_MASK       (0x1f << 0)
+
+#define POST_DIV_SELECT        0x70
+#define POST_DIV_1             0x00
+#define POST_DIV_2             0x10
+#define POST_DIV_4             0x20
+#define POST_DIV_8             0x30
+#define POST_DIV_16            0x40
+#define POST_DIV_32            0x50
+#define VCO_LOOP_DIV_BY_4M     0x00
+#define VCO_LOOP_DIV_BY_16M    0x04
+
+#define SRX_INDEX              0x3c4
+#define SRX_DATA               0x3c5
+#define SR01                   1
+#define SR01_SCREEN_OFF                (1<<5)
+
+#define PPCR                   0x61204
+#define PPCR_ON                        (1<<0)
+
+#define DVOA                   0x61120
+#define DVOA_ON                        (1<<31)
+#define DVOB                   0x61140
+#define DVOB_ON                        (1<<31)
+#define DVOC                   0x61160
+#define DVOC_ON                        (1<<31)
+#define LVDS                   0x61180
+#define LVDS_ON                        (1<<31)
+
+#define ADPA                   0x61100
+#define ADPA_DPMS_MASK         (~(3<<10))
+#define ADPA_DPMS_ON           (0<<10)
+#define ADPA_DPMS_SUSPEND      (1<<10)
+#define ADPA_DPMS_STANDBY      (2<<10)
+#define ADPA_DPMS_OFF          (3<<10)
+
+#define NOPID                   0x2094
+#define LP_RING                        0x2030
+#define HP_RING                        0x2040
+/* The binner has its own ring buffer:
+ */
+#define HWB_RING               0x2400
+
+#define RING_TAIL              0x00
+#define TAIL_ADDR              0x001FFFF8
+#define RING_HEAD              0x04
+#define HEAD_WRAP_COUNT                0xFFE00000
+#define HEAD_WRAP_ONE          0x00200000
+#define HEAD_ADDR              0x001FFFFC
+#define RING_START             0x08
+#define START_ADDR             0x0xFFFFF000
+#define RING_LEN               0x0C
+#define RING_NR_PAGES          0x001FF000
+#define RING_REPORT_MASK       0x00000006
+#define RING_REPORT_64K                0x00000002
+#define RING_REPORT_128K       0x00000004
+#define RING_NO_REPORT         0x00000000
+#define RING_VALID_MASK                0x00000001
+#define RING_VALID             0x00000001
+#define RING_INVALID           0x00000000
+
+/* Instruction parser error reg:
+ */
+#define IPEIR                  0x2088
+
+/* Scratch pad debug 0 reg:
+ */
+#define SCPD0                  0x209c
+
+/* Error status reg:
+ */
+#define ESR                    0x20b8
+
+/* Secondary DMA fetch address debug reg:
+ */
+#define DMA_FADD_S             0x20d4
+
+/* Cache mode 0 reg.
+ *  - Manipulating render cache behaviour is central
+ *    to the concept of zone rendering, tuning this reg can help avoid
+ *    unnecessary render cache reads and even writes (for z/stencil)
+ *    at beginning and end of scene.
+ *
+ * - To change a bit, write to this reg with a mask bit set and the
+ * bit of interest either set or cleared.  EG: (BIT<<16) | BIT to set.
+ */
+#define Cache_Mode_0           0x2120
+#define CM0_MASK_SHIFT          16
+#define CM0_IZ_OPT_DISABLE      (1<<6)
+#define CM0_ZR_OPT_DISABLE      (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+
+
+/* Graphics flush control.  A CPU write flushes the GWB of all writes.
+ * The data is discarded.
+ */
+#define GFX_FLSH_CNTL          0x2170
+
+/* Binner control.  Defines the location of the bin pointer list:
+ */
+#define BINCTL                 0x2420
+#define BC_MASK                        (1 << 9)
+
+/* Binned scene info.
+ */
+#define BINSCENE               0x2428
+#define BS_OP_LOAD             (1 << 8)
+#define BS_MASK                        (1 << 22)
+
+/* Bin command parser debug reg:
+ */
+#define BCPD                   0x2480
+
+/* Bin memory control debug reg:
+ */
+#define BMCD                   0x2484
+
+/* Bin data cache debug reg:
+ */
+#define BDCD                   0x2488
+
+/* Binner pointer cache debug reg:
+ */
+#define BPCD                   0x248c
+
+/* Binner scratch pad debug reg:
+ */
+#define BINSKPD                        0x24f0
+
+/* HWB scratch pad debug reg:
+ */
+#define HWBSKPD                        0x24f4
+
+/* Binner memory pool reg:
+ */
+#define BMP_BUFFER             0x2430
+#define BMP_PAGE_SIZE_4K       (0 << 10)
+#define BMP_BUFFER_SIZE_SHIFT  1
+#define BMP_ENABLE             (1 << 0)
+
+/* Get/put memory from the binner memory pool:
+ */
+#define BMP_GET                        0x2438
+#define BMP_PUT                        0x2440
+#define BMP_OFFSET_SHIFT       5
+
+/* 3D state packets:
+ */
+#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR       (0x1<<1)
+#define SC_ENABLE_MASK          (0x1<<0)
+#define SC_ENABLE               (0x1<<0)
+
+#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK      (0xffff<<16)
+#define SCI_XMIN_MASK      (0xffff<<0)
+#define SCI_YMAX_MASK      (0xffff<<16)
+#define SCI_XMAX_MASK      (0xffff<<0)
+
+#define GFX_OP_SCISSOR_ENABLE   ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT     ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+
+#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
+
+#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD            ((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT       ((2<<29)|(0x71<<22)|5)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA    (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB      (1<<20)
+#define   BLT_DEPTH_8                  (0<<24)
+#define   BLT_DEPTH_16_565             (1<<24)
+#define   BLT_DEPTH_16_1555            (2<<24)
+#define   BLT_DEPTH_32                 (3<<24)
+#define   BLT_ROP_GXCOPY               (0xcc<<16)
+
+#define MI_BATCH_BUFFER                ((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START  (0x31<<23)
+#define MI_BATCH_BUFFER_END    (0xA<<23)
+#define MI_BATCH_NON_SECURE    (1)
+
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+
+#define MI_WAIT_FOR_EVENT       ((0x3<<23))
+#define MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+
+#define MI_LOAD_SCAN_LINES_INCL  ((0x12<<23))
+
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP                (1<<22)
+#define DISPLAY_PLANE_A           (0<<20)
+#define DISPLAY_PLANE_B           (1<<20)
+
+/* Display regs */
+#define DSPACNTR                0x70180
+#define DSPBCNTR                0x71180
+#define DISPPLANE_SEL_PIPE_MASK                 (1<<24)
+
+/* Define the region of interest for the binner:
+ */
+#define CMD_OP_BIN_CONTROL      ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
+
+#define CMD_OP_DESTBUFFER_INFO  ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
+#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
+
+#define PRIMARY_RINGBUFFER_SIZE                (128*1024)
+
+#define BLC_PWM_CTL2           0x61250
+
+#endif
diff --git a/psb-kernel-source-4.41.1/i915_fence.c b/psb-kernel-source-4.41.1/i915_fence.c
new file mode 100644 (file)
index 0000000..e2664c5
--- /dev/null
@@ -0,0 +1,293 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * Initiate a sync flush if it's not already pending.
+ */
+
+static void i915_initiate_rwflush(struct drm_i915_private *dev_priv, 
+                                 struct drm_fence_class_manager *fc)
+{
+       if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) && 
+           !dev_priv->flush_pending) {
+               dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
+               dev_priv->flush_flags = fc->pending_flush;
+               dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
+               I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
+               dev_priv->flush_pending = 1;
+               fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
+       }
+}
+
+static void i915_fence_flush(struct drm_device *dev,
+                            uint32_t fence_class)
+{
+       struct drm_i915_private *dev_priv = 
+               (struct drm_i915_private *) dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[0];
+       unsigned long irq_flags;
+
+       if (unlikely(!dev_priv))
+               return;
+
+       write_lock_irqsave(&fm->lock, irq_flags);
+       i915_initiate_rwflush(dev_priv, fc);
+       write_unlock_irqrestore(&fm->lock, irq_flags);
+}
+
+static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
+                           uint32_t waiting_types)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[0];
+       uint32_t flush_flags = 0;
+       uint32_t flush_sequence = 0;
+       uint32_t i_status;
+       uint32_t sequence;
+
+       if (unlikely(!dev_priv))
+               return;
+
+       /*
+        * First, report any executed sync flush:
+        */
+
+       if (dev_priv->flush_pending) {
+               i_status = READ_HWSP(dev_priv, 0);
+               if ((i_status & (1 << 12)) !=
+                   (dev_priv->saved_flush_status & (1 << 12))) {
+                       flush_flags = dev_priv->flush_flags;
+                       flush_sequence = dev_priv->flush_sequence;
+                       dev_priv->flush_pending = 0;
+                       drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
+               }
+       }               
+
+       /*
+        * Report A new breadcrumb, and adjust IRQs.
+        */
+
+       if (waiting_types & DRM_FENCE_TYPE_EXE) {
+               sequence = READ_BREADCRUMB(dev_priv);
+
+               if (sequence != dev_priv->reported_sequence ||
+                   !dev_priv->reported_sequence_valid) {
+                       drm_fence_handler(dev, 0, sequence, 
+                                         DRM_FENCE_TYPE_EXE, 0);
+                       dev_priv->reported_sequence = sequence;
+                       dev_priv->reported_sequence_valid = 1;
+               }
+
+               if (dev_priv->fence_irq_on && !(waiting_types & DRM_FENCE_TYPE_EXE)) {
+                       i915_user_irq_off(dev_priv);
+                       dev_priv->fence_irq_on = 0;
+               } else if (!dev_priv->fence_irq_on && (waiting_types & DRM_FENCE_TYPE_EXE)) {
+                       i915_user_irq_on(dev_priv);
+                       dev_priv->fence_irq_on = 1;
+               }
+       }
+
+       /*
+        * There may be new RW flushes pending. Start them.
+        */
+       
+       i915_initiate_rwflush(dev_priv, fc); 
+
+       /*
+        * And possibly, but unlikely, they finish immediately. 
+        */
+
+       if (dev_priv->flush_pending) {
+               i_status = READ_HWSP(dev_priv, 0);
+               if (unlikely((i_status & (1 << 12)) !=
+                   (dev_priv->saved_flush_status & (1 << 12)))) {
+                       flush_flags = dev_priv->flush_flags;
+                       flush_sequence = dev_priv->flush_sequence;
+                       dev_priv->flush_pending = 0;
+                       drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
+               }
+       }
+}
+
+static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
+                            uint32_t flags, uint32_t *sequence,
+                            uint32_t *native_type)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       if (unlikely(!dev_priv))
+               return -EINVAL;
+
+       i915_emit_irq(dev);
+       *sequence = (uint32_t) dev_priv->counter;
+       *native_type = DRM_FENCE_TYPE_EXE;
+       if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
+               *native_type |= DRM_I915_FENCE_TYPE_RW;
+
+       return 0;
+}
+
+void i915_fence_handler(struct drm_device *dev)
+{
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[0];
+
+       write_lock(&fm->lock);
+       i915_fence_poll(dev, 0, fc->waiting_types);
+       write_unlock(&fm->lock);
+}
+
+/*
+ * We need a separate wait function since we need to poll for
+ * sync flushes.
+ */
+
+static int i915_fence_wait(struct drm_fence_object *fence,
+                          int lazy, int interruptible, uint32_t mask)
+{
+       struct drm_device *dev = fence->dev;
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[0];
+       int ret;
+       unsigned long  _end = jiffies + 3 * DRM_HZ;
+
+       drm_fence_object_flush(fence, mask);
+       if (likely(interruptible))
+               ret = wait_event_interruptible_timeout
+                       (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), 
+                        3 * DRM_HZ);
+       else 
+               ret = wait_event_timeout
+                       (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), 
+                        3 * DRM_HZ);
+
+       if (unlikely(ret == -ERESTARTSYS))
+               return -EAGAIN;
+
+       if (unlikely(ret == 0))
+               return -EBUSY;
+
+       if (likely(mask == DRM_FENCE_TYPE_EXE || 
+                  drm_fence_object_signaled(fence, mask))) 
+               return 0;
+
+       /*
+        * Remove this code snippet when fixed. HWSTAM doesn't let
+        * flush info through...
+        */
+
+       if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
+               unsigned long irq_flags;
+
+               DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
+               msleep(100);
+               dev_priv->flush_pending = 0;
+               write_lock_irqsave(&fm->lock, irq_flags);
+               drm_fence_handler(dev, fence->fence_class, 
+                                 fence->sequence, fence->type, 0);
+               write_unlock_irqrestore(&fm->lock, irq_flags);
+       }
+
+       /*
+        * Poll for sync flush completion.
+        */
+
+       return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
+}
+
+static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
+{
+       uint32_t flush_flags = fence->waiting_types & 
+               ~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
+
+       if (likely(flush_flags == 0 || 
+                  ((flush_flags & ~fence->native_types) == 0) || 
+                  (fence->signaled_types != DRM_FENCE_TYPE_EXE)))
+               return 0;
+       else {
+               struct drm_device *dev = fence->dev;
+               struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+               struct drm_fence_driver *driver = dev->driver->fence_driver;
+               
+               if (unlikely(!dev_priv))
+                       return 0;
+
+               if (dev_priv->flush_pending) {
+                       uint32_t diff = (dev_priv->flush_sequence - fence->sequence) & 
+                               driver->sequence_mask;
+
+                       if (diff < driver->wrap_diff)
+                               return 0;
+               }
+       }
+       return flush_flags;
+}
+
+/*
+ * In the very unlikely event that "poll" is not really called very often
+ * we need the following function to handle sequence wraparounds.
+ */
+
+void i915_invalidate_reported_sequence(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) 
+               dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       unsigned long irq_flags;
+
+       if (unlikely(!dev_priv))
+               return;
+       
+       write_lock_irqsave(&fm->lock, irq_flags);
+       dev_priv->reported_sequence_valid = 0;
+       write_unlock_irqrestore(&fm->lock, irq_flags);
+}
+       
+
+struct drm_fence_driver i915_fence_driver = {
+       .num_classes = 1,
+       .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
+       .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
+       .sequence_mask = BREADCRUMB_MASK,
+       .has_irq = NULL,
+       .emit = i915_fence_emit_sequence,
+       .flush = i915_fence_flush,
+       .poll = i915_fence_poll,
+       .needed_flush = i915_fence_needed_flush,
+       .wait = i915_fence_wait,
+};
diff --git a/psb-kernel-source-4.41.1/i915_init.c b/psb-kernel-source-4.41.1/i915_init.c
new file mode 100644 (file)
index 0000000..3b43c72
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Copyright Â© 2002, 2003 David Dawes <dawes@xfree86.org>
+ *                   2004 Sylvain Meyer
+ *
+ * GPL/BSD dual license
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/**
+ * i915_probe_agp - get AGP bootup configuration
+ * @pdev: PCI device
+ * @aperture_size: returns AGP aperture configured size
+ * @preallocated_size: returns size of BIOS preallocated AGP space
+ *
+ * Since Intel integrated graphics are UMA, the BIOS has to set aside
+ * some RAM for the framebuffer at early boot.  This code figures out
+ * how much was set aside so we can use it for our own purposes.
+ */
+int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
+                  unsigned long *preallocated_size)
+{
+       struct pci_dev *bridge_dev;
+       u16 tmp = 0;
+       unsigned long overhead;
+
+       bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+       if (!bridge_dev) {
+               DRM_ERROR("bridge device not found\n");
+               return -1;
+       }
+
+       /* Get the fb aperture size and "stolen" memory amount. */
+       pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
+       pci_dev_put(bridge_dev);
+
+       *aperture_size = 1024 * 1024;
+       *preallocated_size = 1024 * 1024;
+
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_INTEL_82830_CGC:
+       case PCI_DEVICE_ID_INTEL_82845G_IG:
+       case PCI_DEVICE_ID_INTEL_82855GM_IG:
+       case PCI_DEVICE_ID_INTEL_82865_IG:
+               if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
+                       *aperture_size *= 64;
+               else
+                       *aperture_size *= 128;
+               break;
+       default:
+               /* 9xx supports large sizes, just look at the length */
+               *aperture_size = pci_resource_len(pdev, 2);
+               break;
+       }
+
+       /*
+        * Some of the preallocated space is taken by the GTT
+        * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
+        */
+       overhead = (*aperture_size / 1024) + 4096;
+       switch (tmp & INTEL_855_GMCH_GMS_MASK) {
+       case INTEL_855_GMCH_GMS_STOLEN_1M:
+               break; /* 1M already */
+       case INTEL_855_GMCH_GMS_STOLEN_4M:
+               *preallocated_size *= 4;
+               break;
+       case INTEL_855_GMCH_GMS_STOLEN_8M:
+               *preallocated_size *= 8;
+               break;
+       case INTEL_855_GMCH_GMS_STOLEN_16M:
+               *preallocated_size *= 16;
+               break;
+       case INTEL_855_GMCH_GMS_STOLEN_32M:
+               *preallocated_size *= 32;
+               break;
+       case INTEL_915G_GMCH_GMS_STOLEN_48M:
+               *preallocated_size *= 48;
+               break;
+       case INTEL_915G_GMCH_GMS_STOLEN_64M:
+               *preallocated_size *= 64;
+               break;
+       case INTEL_855_GMCH_GMS_DISABLED:
+               DRM_ERROR("video memory is disabled\n");
+               return -1;
+       default:
+               DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+                       tmp & INTEL_855_GMCH_GMS_MASK);
+               return -1;
+       }
+       *preallocated_size -= overhead;
+
+       return 0;
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - drive output discovery via intel_modeset_init()
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+       struct drm_i915_private *dev_priv;
+       unsigned long agp_size, prealloc_size;
+       unsigned long sareapage;
+       int size, ret;
+
+       dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
+       if (dev_priv == NULL)
+               return -ENOMEM;
+
+       memset(dev_priv, 0, sizeof(struct drm_i915_private));
+       dev->dev_private = (void *)dev_priv;
+//     dev_priv->flags = flags;
+
+       /* i915 has 4 more counters */
+       dev->counters += 4;
+       dev->types[6] = _DRM_STAT_IRQ;
+       dev->types[7] = _DRM_STAT_PRIMARY;
+       dev->types[8] = _DRM_STAT_SECONDARY;
+       dev->types[9] = _DRM_STAT_DMA;
+
+       if (IS_I9XX(dev)) {
+               dev_priv->mmiobase = drm_get_resource_start(dev, 0);
+               dev_priv->mmiolen = drm_get_resource_len(dev, 0);
+               dev->mode_config.fb_base =
+                       drm_get_resource_start(dev, 2) & 0xff000000;
+       } else if (drm_get_resource_start(dev, 1)) {
+               dev_priv->mmiobase = drm_get_resource_start(dev, 1);
+               dev_priv->mmiolen = drm_get_resource_len(dev, 1);
+               dev->mode_config.fb_base =
+                       drm_get_resource_start(dev, 0) & 0xff000000;
+       } else {
+               DRM_ERROR("Unable to find MMIO registers\n");
+               return -ENODEV;
+       }
+
+       DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base);
+
+       ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen,
+                        _DRM_REGISTERS, _DRM_READ_ONLY|_DRM_DRIVER, &dev_priv->mmio_map);
+       if (ret != 0) {
+               DRM_ERROR("Cannot add mapping for MMIO registers\n");
+               return ret;
+       }
+
+       /* prebuild the SAREA */
+       sareapage = max(SAREA_MAX, PAGE_SIZE);
+       ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+                        &dev_priv->sarea);
+       if (ret) {
+               DRM_ERROR("SAREA setup failed\n");
+               return ret;
+       }
+
+       init_waitqueue_head(&dev->lock.lock_queue);
+
+       /* FIXME: assume sarea_priv is right after SAREA */
+        dev_priv->sarea_priv = dev_priv->sarea->handle + sizeof(struct drm_sarea);
+
+       /*
+        * Initialize the memory manager for local and AGP space
+        */
+       drm_bo_driver_init(dev);
+
+       i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
+       printk("setting up %ld bytes of VRAM space\n", prealloc_size);
+       printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size));
+       drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT);
+       drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT, (agp_size - prealloc_size) >> PAGE_SHIFT);
+
+       I915_WRITE(LP_RING + RING_LEN, 0);
+       I915_WRITE(LP_RING + RING_HEAD, 0);
+       I915_WRITE(LP_RING + RING_TAIL, 0);
+
+       size = PRIMARY_RINGBUFFER_SIZE;
+       ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
+                                      DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
+                                      DRM_BO_FLAG_MEM_VRAM |
+                                      DRM_BO_FLAG_NO_EVICT |
+                                      DRM_BO_HINT_DONT_FENCE, 0, 0x1, 0,
+                                      &dev_priv->ring_buffer);
+       if (ret < 0) {
+               DRM_ERROR("Unable to allocate or pin ring buffer\n");
+               return -EINVAL;
+       }
+
+       /* remap the buffer object properly */
+       dev_priv->ring.Start = dev_priv->ring_buffer->offset;
+       dev_priv->ring.End = dev_priv->ring.Start + size;
+       dev_priv->ring.Size = size;
+       dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+       /* FIXME: need wrapper with PCI mem checks */
+       ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem,
+                                 (void **) &dev_priv->ring.virtual_start);
+       if (ret)
+               DRM_ERROR("error mapping ring buffer: %d\n", ret);
+
+       DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start,
+                 dev_priv->ring.virtual_start, dev_priv->ring.Size);
+
+       dev_priv->sarea_priv->pf_current_page = 0;
+
+       memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size);
+
+       I915_WRITE(LP_RING + RING_START, dev_priv->ring.Start);
+       I915_WRITE(LP_RING + RING_LEN,
+                  ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) |
+                  (RING_NO_REPORT | RING_VALID));
+
+       /* We are using separate values as placeholders for mechanisms for
+        * private backbuffer/depthbuffer usage.
+        */
+       dev_priv->use_mi_batchbuffer_start = 0;
+
+       /* Allow hardware batchbuffers unless told otherwise.
+        */
+       dev_priv->allow_batchbuffer = 1;
+
+       /* Program Hardware Status Page */
+       if (!IS_G33(dev)) {
+               dev_priv->status_page_dmah = 
+                       drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+               if (!dev_priv->status_page_dmah) {
+                       dev->dev_private = (void *)dev_priv;
+                       i915_dma_cleanup(dev);
+                       DRM_ERROR("Can not allocate hardware status page\n");
+                       return -ENOMEM;
+               }
+               dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+               dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+               memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+               I915_WRITE(I915REG_HWS_PGA, dev_priv->dma_status_page);
+       }
+       DRM_DEBUG("Enabled hardware status page\n");
+
+       intel_modeset_init(dev);
+       drm_initial_config(dev, false);
+
+       return 0;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->ring.virtual_start) {
+               drm_core_ioremapfree(&dev_priv->ring.map, dev);
+       }
+
+       if (dev_priv->status_page_dmah) {
+               drm_pci_free(dev, dev_priv->status_page_dmah);
+               dev_priv->status_page_dmah = NULL;
+               dev_priv->hw_status_page = NULL;
+               dev_priv->dma_status_page = 0;
+               /* Need to rewrite hardware status page */
+               I915_WRITE(I915REG_HWS_PGA, 0x1ffff000);
+       }
+
+       if (dev_priv->status_gfx_addr) {
+               dev_priv->status_gfx_addr = 0;
+               drm_core_ioremapfree(&dev_priv->hws_map, dev);
+               I915_WRITE(I915REG_HWS_PGA, 0x1ffff000);
+       }
+
+       I915_WRITE(LP_RING + RING_LEN, 0);
+
+       intel_modeset_cleanup(dev);
+
+       drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
+                           dev_priv->ring.virtual_start);
+
+       DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage));
+       mutex_lock(&dev->struct_mutex);
+       drm_bo_usage_deref_locked(&dev_priv->ring_buffer);
+
+       if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT)) {
+               DRM_ERROR("Memory manager type 3 not clean. "
+                         "Delaying takedown\n");
+       }
+       if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM)) {
+               DRM_ERROR("Memory manager type 3 not clean. "
+                         "Delaying takedown\n");
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       drm_bo_driver_finish(dev);
+
+        DRM_DEBUG("%p, %p\n", dev_priv->mmio_map, dev_priv->sarea);
+        drm_rmmap(dev, dev_priv->mmio_map);
+        drm_rmmap(dev, dev_priv->sarea);
+
+       drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+
+       dev->dev_private = NULL;
+       return 0;
+}
+
+void i915_driver_lastclose(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       i915_do_cleanup_pageflip(dev);
+       //i915_mem_takedown(&(dev_priv->agp_heap));
+       i915_dma_cleanup(dev);
+}
+
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *filp)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       //i915_mem_release(dev, filp, dev_priv->agp_heap);
+}
+
diff --git a/psb-kernel-source-4.41.1/i915_ioc32.c b/psb-kernel-source-4.41.1/i915_ioc32.c
new file mode 100644 (file)
index 0000000..11dee03
--- /dev/null
@@ -0,0 +1,223 @@
+/**
+ * \file i915_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the i915 DRM.
+ *
+ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Alan Hourihane 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+
+typedef struct _drm_i915_batchbuffer32 {
+       int start;              /* agp offset */
+       int used;               /* nr bytes in use */
+       int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
+       int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
+       int num_cliprects;      /* mulitpass with multiple cliprects? */
+       u32 cliprects;  /* pointer to userspace cliprects */
+} drm_i915_batchbuffer32_t;
+
+static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
+                                  unsigned long arg)
+{
+       drm_i915_batchbuffer32_t batchbuffer32;
+       drm_i915_batchbuffer_t __user *batchbuffer;
+
+       if (copy_from_user
+           (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
+               return -EFAULT;
+
+       batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
+       if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
+           || __put_user(batchbuffer32.start, &batchbuffer->start)
+           || __put_user(batchbuffer32.used, &batchbuffer->used)
+           || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
+           || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
+           || __put_user(batchbuffer32.num_cliprects,
+                         &batchbuffer->num_cliprects)
+           || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
+                         &batchbuffer->cliprects))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_I915_BATCHBUFFER,
+                        (unsigned long) batchbuffer);
+}
+
+typedef struct _drm_i915_cmdbuffer32 {
+       u32 buf;        /* pointer to userspace command buffer */
+       int sz;                 /* nr bytes in buf */
+       int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
+       int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
+       int num_cliprects;      /* mulitpass with multiple cliprects? */
+       u32 cliprects;  /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer32_t;
+
+static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
+                                unsigned long arg)
+{
+       drm_i915_cmdbuffer32_t cmdbuffer32;
+       drm_i915_cmdbuffer_t __user *cmdbuffer;
+
+       if (copy_from_user
+           (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
+               return -EFAULT;
+
+       cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
+       if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
+           || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
+                         &cmdbuffer->buf)
+           || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
+           || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
+           || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
+           || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
+           || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
+                         &cmdbuffer->cliprects))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer);
+}
+
+typedef struct drm_i915_irq_emit32 {
+       u32 irq_seq;
+} drm_i915_irq_emit32_t;
+
+static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_i915_irq_emit32_t req32;
+       drm_i915_irq_emit_t __user *request;
+
+       if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user((int __user *)(unsigned long)req32.irq_seq,
+                         &request->irq_seq))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_I915_IRQ_EMIT, (unsigned long) request);
+}
+typedef struct drm_i915_getparam32 {
+       int param;
+       u32 value;
+} drm_i915_getparam32_t;
+
+static int compat_i915_getparam(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       drm_i915_getparam32_t req32;
+       drm_i915_getparam_t __user *request;
+
+       if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.param, &request->param)
+           || __put_user((void __user *)(unsigned long)req32.value,
+                         &request->value))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_I915_GETPARAM, (unsigned long) request);
+}
+
+typedef struct drm_i915_mem_alloc32 {
+       int region;
+       int alignment;
+       int size;
+       u32 region_offset;      /* offset from start of fb or agp */
+} drm_i915_mem_alloc32_t;
+
+static int compat_i915_alloc(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       drm_i915_mem_alloc32_t req32;
+       drm_i915_mem_alloc_t __user *request;
+
+       if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+               return -EFAULT;
+
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.region, &request->region)
+           || __put_user(req32.alignment, &request->alignment)
+           || __put_user(req32.size, &request->size)
+           || __put_user((void __user *)(unsigned long)req32.region_offset,
+                         &request->region_offset))
+               return -EFAULT;
+
+       return drm_ioctl(file->f_dentry->d_inode, file,
+                        DRM_IOCTL_I915_ALLOC, (unsigned long) request);
+}
+
+
+drm_ioctl_compat_t *i915_compat_ioctls[] = {
+       [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
+       [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
+       [DRM_I915_GETPARAM] = compat_i915_getparam,
+       [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
+       [DRM_I915_ALLOC] = compat_i915_alloc
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       drm_ioctl_compat_t *fn = NULL;
+       int ret;
+
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(filp, cmd, arg);
+
+       if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+               fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+       lock_kernel();          /* XXX for now */
+       if (fn != NULL)
+               ret = (*fn)(filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+       unlock_kernel();
+
+       return ret;
+}
diff --git a/psb-kernel-source-4.41.1/i915_irq.c b/psb-kernel-source-4.41.1/i915_irq.c
new file mode 100644 (file)
index 0000000..bd57607
--- /dev/null
@@ -0,0 +1,781 @@
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define USER_INT_FLAG (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+
+#define MAX_NOPID ((u32)~0)
+
+/**
+ * i915_get_pipe - return the the pipe associated with a given plane
+ * @dev: DRM device
+ * @plane: plane to look for
+ *
+ * We need to get the pipe associated with a given plane to correctly perform
+ * vblank driven swapping, and they may not always be equal.  So look up the
+ * pipe associated with @plane here.
+ */
+static int
+i915_get_pipe(struct drm_device *dev, int plane)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       u32 dspcntr;
+
+       dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
+
+       return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
+}
+
+/**
+ * Emit a synchronous flip.
+ *
+ * This function must be called with the drawable spinlock held.
+ */
+static void
+i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
+                        int plane)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
+       u16 x1, y1, x2, y2;
+       int pf_planes = 1 << plane;
+
+       DRM_SPINLOCK_ASSERT(&dev->drw_lock);
+
+       /* If the window is visible on the other plane, we have to flip on that
+        * plane as well.
+        */
+       if (plane == 1) {
+               x1 = sarea_priv->planeA_x;
+               y1 = sarea_priv->planeA_y;
+               x2 = x1 + sarea_priv->planeA_w;
+               y2 = y1 + sarea_priv->planeA_h;
+       } else {
+               x1 = sarea_priv->planeB_x;
+               y1 = sarea_priv->planeB_y;
+               x2 = x1 + sarea_priv->planeB_w;
+               y2 = y1 + sarea_priv->planeB_h;
+       }
+
+       if (x2 > 0 && y2 > 0) {
+               int i, num_rects = drw->num_rects;
+               struct drm_clip_rect *rect = drw->rects;
+
+               for (i = 0; i < num_rects; i++)
+                       if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
+                             rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
+                               pf_planes = 0x3;
+
+                               break;
+                       }
+       }
+
+       i915_dispatch_flip(dev, pf_planes, 1);
+}
+
+/**
+ * Emit blits for scheduled buffer swaps.
+ *
+ * This function will be called with the HW lock held.
+ */
+static void i915_vblank_tasklet(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       struct list_head *list, *tmp, hits, *hit;
+       int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
+       unsigned counter[2] = { atomic_read(&dev->vbl_received),
+                               atomic_read(&dev->vbl_received2) };
+       struct drm_drawable_info *drw;
+       struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
+       u32 cpp = dev_priv->cpp,  offsets[3];
+       u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
+                               XY_SRC_COPY_BLT_WRITE_ALPHA |
+                               XY_SRC_COPY_BLT_WRITE_RGB)
+                            : XY_SRC_COPY_BLT_CMD;
+       u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
+                         (cpp << 23) | (1 << 24);
+       RING_LOCALS;
+
+       DRM_DEBUG("\n");
+
+       INIT_LIST_HEAD(&hits);
+
+       nhits = nrects = 0;
+
+       /* No irqsave/restore necessary.  This tasklet may be run in an
+        * interrupt context or normal context, but we don't have to worry
+        * about getting interrupted by something acquiring the lock, because
+        * we are the interrupt context thing that acquires the lock.
+        */
+       DRM_SPINLOCK(&dev_priv->swaps_lock);
+
+       /* Find buffer swaps scheduled for this vertical blank */
+       list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
+               struct drm_i915_vbl_swap *vbl_swap =
+                       list_entry(list, struct drm_i915_vbl_swap, head);
+               int pipe = i915_get_pipe(dev, vbl_swap->plane);
+
+               if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
+                       continue;
+
+               list_del(list);
+               dev_priv->swaps_pending--;
+
+               DRM_SPINUNLOCK(&dev_priv->swaps_lock);
+               DRM_SPINLOCK(&dev->drw_lock);
+
+               drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
+
+               if (!drw) {
+                       DRM_SPINUNLOCK(&dev->drw_lock);
+                       drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+                       DRM_SPINLOCK(&dev_priv->swaps_lock);
+                       continue;
+               }
+
+               list_for_each(hit, &hits) {
+                       struct drm_i915_vbl_swap *swap_cmp =
+                               list_entry(hit, struct drm_i915_vbl_swap, head);
+                       struct drm_drawable_info *drw_cmp =
+                               drm_get_drawable_info(dev, swap_cmp->drw_id);
+
+                       if (drw_cmp &&
+                           drw_cmp->rects[0].y1 > drw->rects[0].y1) {
+                               list_add_tail(list, hit);
+                               break;
+                       }
+               }
+
+               DRM_SPINUNLOCK(&dev->drw_lock);
+
+               /* List of hits was empty, or we reached the end of it */
+               if (hit == &hits)
+                       list_add_tail(list, hits.prev);
+
+               nhits++;
+
+               DRM_SPINLOCK(&dev_priv->swaps_lock);
+       }
+
+       DRM_SPINUNLOCK(&dev_priv->swaps_lock);
+
+       if (nhits == 0) {
+               return;
+       }
+
+       i915_kernel_lost_context(dev);
+
+       upper[0] = upper[1] = 0;
+       slice[0] = max(sarea_priv->planeA_h / nhits, 1);
+       slice[1] = max(sarea_priv->planeB_h / nhits, 1);
+       lower[0] = sarea_priv->planeA_y + slice[0];
+       lower[1] = sarea_priv->planeB_y + slice[0];
+
+       offsets[0] = sarea_priv->front_offset;
+       offsets[1] = sarea_priv->back_offset;
+       offsets[2] = sarea_priv->third_offset;
+       num_pages = sarea_priv->third_handle ? 3 : 2;
+
+       DRM_SPINLOCK(&dev->drw_lock);
+
+       /* Emit blits for buffer swaps, partitioning both outputs into as many
+        * slices as there are buffer swaps scheduled in order to avoid tearing
+        * (based on the assumption that a single buffer swap would always
+        * complete before scanout starts).
+        */
+       for (i = 0; i++ < nhits;
+            upper[0] = lower[0], lower[0] += slice[0],
+            upper[1] = lower[1], lower[1] += slice[1]) {
+               int init_drawrect = 1;
+
+               if (i == nhits)
+                       lower[0] = lower[1] = sarea_priv->height;
+
+               list_for_each(hit, &hits) {
+                       struct drm_i915_vbl_swap *swap_hit =
+                               list_entry(hit, struct drm_i915_vbl_swap, head);
+                       struct drm_clip_rect *rect;
+                       int num_rects, plane, front, back;
+                       unsigned short top, bottom;
+
+                       drw = drm_get_drawable_info(dev, swap_hit->drw_id);
+
+                       if (!drw)
+                               continue;
+
+                       plane = swap_hit->plane;
+
+                       if (swap_hit->flip) {
+                               i915_dispatch_vsync_flip(dev, drw, plane);
+                               continue;
+                       }
+
+                       if (init_drawrect) {
+                               BEGIN_LP_RING(6);
+
+                               OUT_RING(GFX_OP_DRAWRECT_INFO);
+                               OUT_RING(0);
+                               OUT_RING(0);
+                               OUT_RING(sarea_priv->width | sarea_priv->height << 16);
+                               OUT_RING(sarea_priv->width | sarea_priv->height << 16);
+                               OUT_RING(0);
+
+                               ADVANCE_LP_RING();
+
+                               sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
+
+                               init_drawrect = 0;
+                       }
+
+                       rect = drw->rects;
+                       top = upper[plane];
+                       bottom = lower[plane];
+
+                       front = (dev_priv->sarea_priv->pf_current_page >>
+                                (2 * plane)) & 0x3;
+                       back = (front + 1) % num_pages;
+
+                       for (num_rects = drw->num_rects; num_rects--; rect++) {
+                               int y1 = max(rect->y1, top);
+                               int y2 = min(rect->y2, bottom);
+
+                               if (y1 >= y2)
+                                       continue;
+
+                               BEGIN_LP_RING(8);
+
+                               OUT_RING(cmd);
+                               OUT_RING(pitchropcpp);
+                               OUT_RING((y1 << 16) | rect->x1);
+                               OUT_RING((y2 << 16) | rect->x2);
+                               OUT_RING(offsets[front]);
+                               OUT_RING((y1 << 16) | rect->x1);
+                               OUT_RING(pitchropcpp & 0xffff);
+                               OUT_RING(offsets[back]);
+
+                               ADVANCE_LP_RING();
+                       }
+               }
+       }
+
+       DRM_SPINUNLOCK(&dev->drw_lock);
+
+       list_for_each_safe(hit, tmp, &hits) {
+               struct drm_i915_vbl_swap *swap_hit =
+                       list_entry(hit, struct drm_i915_vbl_swap, head);
+
+               list_del(hit);
+
+               drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
+       }
+}
+
+irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       u16 temp;
+       u32 pipea_stats, pipeb_stats;
+
+       pipea_stats = I915_READ(I915REG_PIPEASTAT);
+       pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
+
+       temp = I915_READ16(I915REG_INT_IDENTITY_R);
+       temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
+
+#if 0
+       DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
+#endif
+       if (temp == 0)
+               return IRQ_NONE;
+
+       I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+       (void) I915_READ16(I915REG_INT_IDENTITY_R);
+       DRM_READMEMORYBARRIER();
+
+       dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+       if (temp & USER_INT_FLAG) {
+               DRM_WAKEUP(&dev_priv->irq_queue);
+#ifdef I915_HAVE_FENCE
+               i915_fence_handler(dev);
+#endif
+       }
+
+       if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
+               int vblank_pipe = dev_priv->vblank_pipe;
+
+               if ((vblank_pipe &
+                    (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
+                   == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
+                       if (temp & VSYNC_PIPEA_FLAG)
+                               atomic_inc(&dev->vbl_received);
+                       if (temp & VSYNC_PIPEB_FLAG)
+                               atomic_inc(&dev->vbl_received2);
+               } else if (((temp & VSYNC_PIPEA_FLAG) &&
+                           (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
+                          ((temp & VSYNC_PIPEB_FLAG) &&
+                           (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
+                       atomic_inc(&dev->vbl_received);
+
+               DRM_WAKEUP(&dev->vbl_queue);
+               drm_vbl_send_signals(dev);
+
+               if (dev_priv->swaps_pending > 0)
+                       drm_locked_tasklet(dev, i915_vblank_tasklet);
+               I915_WRITE(I915REG_PIPEASTAT,
+                       pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
+                       I915_VBLANK_CLEAR);
+               I915_WRITE(I915REG_PIPEBSTAT,
+                       pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
+                       I915_VBLANK_CLEAR);
+       }
+
+       return IRQ_HANDLED;
+}
+
+int i915_emit_irq(struct drm_device *dev)
+{
+       
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       RING_LOCALS;
+
+       i915_kernel_lost_context(dev);
+
+       DRM_DEBUG("%s\n", __FUNCTION__);
+
+       i915_emit_breadcrumb(dev);
+
+       BEGIN_LP_RING(2);
+       OUT_RING(0);
+       OUT_RING(GFX_OP_USER_INTERRUPT);
+       ADVANCE_LP_RING();
+
+       return dev_priv->counter;
+}
+
+void i915_user_irq_on(struct drm_i915_private *dev_priv)
+{
+       DRM_SPINLOCK(&dev_priv->user_irq_lock);
+       if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
+               dev_priv->irq_enable_reg |= USER_INT_FLAG;
+               I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+       }
+       DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
+
+}
+               
+void i915_user_irq_off(struct drm_i915_private *dev_priv)
+{
+       DRM_SPINLOCK(&dev_priv->user_irq_lock);
+       if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
+               //              dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
+               //              I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+       }
+       DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
+}
+
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       int ret = 0;
+
+       DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
+                 READ_BREADCRUMB(dev_priv));
+
+       if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+               return 0;
+
+       dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+       i915_user_irq_on(dev_priv);
+       DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+                   READ_BREADCRUMB(dev_priv) >= irq_nr);
+       i915_user_irq_off(dev_priv);
+
+       if (ret == -EBUSY) {
+               DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
+                         __FUNCTION__,
+                         READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+       }
+
+       dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+       return ret;
+}
+
+static int i915_driver_vblank_do_wait(struct drm_device *dev,
+                                     unsigned int *sequence,
+                                     atomic_t *counter)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned int cur_vblank;
+       int ret = 0;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+                   (((cur_vblank = atomic_read(counter))
+                       - *sequence) <= (1<<23)));
+
+       *sequence = cur_vblank;
+
+       return ret;
+}
+
+void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe)
+{
+       unsigned int seq;
+
+       seq = pipe ? atomic_read(&dev->vbl_received2) + 1 :
+               atomic_read(&dev->vbl_received) + 1;
+
+       if (!pipe)
+               i915_driver_vblank_do_wait(dev, &seq, &dev->vbl_received);
+       else
+               i915_driver_vblank_do_wait(dev, &seq, &dev->vbl_received2);
+}
+
+int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+{
+       return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
+}
+
+int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+{
+       return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int i915_irq_emit(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_irq_emit *emit = data;
+       int result;
+
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       result = i915_emit_irq(dev);
+
+       if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+               DRM_ERROR("copy_to_user\n");
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int i915_irq_wait(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_irq_wait *irqwait = data;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+void i915_enable_interrupt (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       
+       dev_priv->irq_enable_reg = USER_INT_FLAG; 
+
+       if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
+               dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
+       if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
+               dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
+
+       I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+       dev_priv->irq_enabled = 1;
+}
+
+/* Set the vblank monitor pipe
+ */
+int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_vblank_pipe *pipe = data;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
+               DRM_ERROR("%s called with invalid pipe 0x%x\n",
+                         __FUNCTION__, pipe->pipe);
+               return -EINVAL;
+       }
+
+       dev_priv->vblank_pipe = pipe->pipe;
+
+       i915_enable_interrupt (dev);
+
+       return 0;
+}
+
+int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_vblank_pipe *pipe = data;
+       u16 flag;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       flag = I915_READ(I915REG_INT_ENABLE_R);
+       pipe->pipe = 0;
+       if (flag & VSYNC_PIPEA_FLAG)
+               pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
+       if (flag & VSYNC_PIPEB_FLAG)
+               pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
+
+       return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+int i915_vblank_swap(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_vblank_swap *swap = data;
+       struct drm_i915_vbl_swap *vbl_swap;
+       unsigned int pipe, seqtype, curseq, plane;
+       unsigned long irqflags;
+       struct list_head *list;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __func__);
+               return -EINVAL;
+       }
+
+       if (dev_priv->sarea_priv->rotation) {
+               DRM_DEBUG("Rotation not supported\n");
+               return -EINVAL;
+       }
+
+       if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
+                            _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
+                            _DRM_VBLANK_FLIP)) {
+               DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
+               return -EINVAL;
+       }
+
+       plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+       pipe = i915_get_pipe(dev, plane);
+
+       seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
+
+       if (!(dev_priv->vblank_pipe & (1 << pipe))) {
+               DRM_ERROR("Invalid pipe %d\n", pipe);
+               return -EINVAL;
+       }
+
+       DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
+
+       /* It makes no sense to schedule a swap for a drawable that doesn't have
+        * valid information at this point. E.g. this could mean that the X
+        * server is too old to push drawable information to the DRM, in which
+        * case all such swaps would become ineffective.
+        */
+       if (!drm_get_drawable_info(dev, swap->drawable)) {
+               DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
+               DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
+               return -EINVAL;
+       }
+
+       DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
+
+       curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
+
+       if (seqtype == _DRM_VBLANK_RELATIVE)
+               swap->sequence += curseq;
+
+       if ((curseq - swap->sequence) <= (1<<23)) {
+               if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
+                       swap->sequence = curseq + 1;
+               } else {
+                       DRM_DEBUG("Missed target sequence\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (swap->seqtype & _DRM_VBLANK_FLIP) {
+               swap->sequence--;
+
+               if ((curseq - swap->sequence) <= (1<<23)) {
+                       struct drm_drawable_info *drw;
+
+                       LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+                       DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
+
+                       drw = drm_get_drawable_info(dev, swap->drawable);
+
+                       if (!drw) {
+                               DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock,
+                                   irqflags);
+                               DRM_DEBUG("Invalid drawable ID %d\n",
+                                         swap->drawable);
+                               return -EINVAL;
+                       }
+
+                       i915_dispatch_vsync_flip(dev, drw, plane);
+
+                       DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
+
+                       return 0;
+               }
+       }
+
+       DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
+
+       list_for_each(list, &dev_priv->vbl_swaps.head) {
+               vbl_swap = list_entry(list, struct drm_i915_vbl_swap, head);
+
+               if (vbl_swap->drw_id == swap->drawable &&
+                   vbl_swap->plane == plane &&
+                   vbl_swap->sequence == swap->sequence) {
+                       vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
+                       DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
+                       DRM_DEBUG("Already scheduled\n");
+                       return 0;
+               }
+       }
+
+       DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
+
+       if (dev_priv->swaps_pending >= 100) {
+               DRM_DEBUG("Too many swaps queued\n");
+               return -EBUSY;
+       }
+
+       vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+
+       if (!vbl_swap) {
+               DRM_ERROR("Failed to allocate memory to queue swap\n");
+               return -ENOMEM;
+       }
+
+       DRM_DEBUG("\n");
+
+       vbl_swap->drw_id = swap->drawable;
+       vbl_swap->plane = plane;
+       vbl_swap->sequence = swap->sequence;
+       vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
+
+       if (vbl_swap->flip)
+               swap->sequence++;
+
+       DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
+
+       list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
+       dev_priv->swaps_pending++;
+
+       DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
+
+       return 0;
+}
+
+/* drm_dma.h hooks
+*/
+void i915_driver_irq_preinstall(struct drm_device * dev)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+
+       I915_WRITE16(I915REG_HWSTAM, 0xeffe);
+       I915_WRITE16(I915REG_INT_MASK_R, 0x0);
+       I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+}
+
+void i915_driver_irq_postinstall(struct drm_device * dev)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+
+       DRM_SPININIT(&dev_priv->swaps_lock, "swap");
+       INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+       dev_priv->swaps_pending = 0;
+
+       DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
+       dev_priv->user_irq_refcount = 0;
+
+       i915_enable_interrupt(dev);
+       DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+
+       /*
+        * Initialize the hardware status page IRQ location.
+        */
+
+       I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
+}
+
+void i915_driver_irq_uninstall(struct drm_device * dev)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       u16 temp;
+
+       if (!dev_priv)
+               return;
+
+       dev_priv->irq_enabled = 0;
+       I915_WRITE16(I915REG_HWSTAM, 0xffff);
+       I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
+       I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+
+       temp = I915_READ16(I915REG_INT_IDENTITY_R);
+       I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+}
diff --git a/psb-kernel-source-4.41.1/i915_mem.c b/psb-kernel-source-4.41.1/i915_mem.c
new file mode 100644 (file)
index 0000000..4c7ced2
--- /dev/null
@@ -0,0 +1,386 @@
+/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* This memory manager is integrated into the global/local lru
+ * mechanisms used by the clients.  Specifically, it operates by
+ * setting the 'in_use' fields of the global LRU to indicate whether
+ * this region is privately allocated to a client.
+ *
+ * This does require the client to actually respect that field.
+ *
+ * Currently no effort is made to allocate 'private' memory in any
+ * clever way - the LRU information isn't used to determine which
+ * block to allocate, and the ring is drained prior to allocations --
+ * in other words allocation is expensive.
+ */
+static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
+       struct drm_tex_region *list;
+       unsigned shift, nr;
+       unsigned start;
+       unsigned end;
+       unsigned i;
+       int age;
+
+       shift = dev_priv->tex_lru_log_granularity;
+       nr = I915_NR_TEX_REGIONS;
+
+       start = p->start >> shift;
+       end = (p->start + p->size - 1) >> shift;
+
+       age = ++sarea_priv->texAge;
+       list = sarea_priv->texList;
+
+       /* Mark the regions with the new flag and update their age.  Move
+        * them to head of list to preserve LRU semantics.
+        */
+       for (i = start; i <= end; i++) {
+               list[i].in_use = in_use;
+               list[i].age = age;
+
+               /* remove_from_list(i)
+                */
+               list[(unsigned)list[i].next].prev = list[i].prev;
+               list[(unsigned)list[i].prev].next = list[i].next;
+
+               /* insert_at_head(list, i)
+                */
+               list[i].prev = nr;
+               list[i].next = list[nr].next;
+               list[(unsigned)list[nr].next].prev = i;
+               list[nr].next = i;
+       }
+}
+
+/* Very simple allocator for agp memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size,
+                                    struct drm_file *file_priv)
+{
+       /* Maybe cut off the start of an existing block */
+       if (start > p->start) {
+               struct mem_block *newblock =
+                   drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
+               if (!newblock)
+                       goto out;
+               newblock->start = start;
+               newblock->size = p->size - (start - p->start);
+               newblock->file_priv = NULL;
+               newblock->next = p->next;
+               newblock->prev = p;
+               p->next->prev = newblock;
+               p->next = newblock;
+               p->size -= newblock->size;
+               p = newblock;
+       }
+
+       /* Maybe cut off the end of an existing block */
+       if (size < p->size) {
+               struct mem_block *newblock =
+                   drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
+               if (!newblock)
+                       goto out;
+               newblock->start = start + size;
+               newblock->size = p->size - size;
+               newblock->file_priv = NULL;
+               newblock->next = p->next;
+               newblock->prev = p;
+               p->next->prev = newblock;
+               p->next = newblock;
+               p->size = size;
+       }
+
+      out:
+       /* Our block is in the middle */
+       p->file_priv = file_priv;
+       return p;
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, int size,
+                                    int align2, struct drm_file *file_priv)
+{
+       struct mem_block *p;
+       int mask = (1 << align2) - 1;
+
+       for (p = heap->next; p != heap; p = p->next) {
+               int start = (p->start + mask) & ~mask;
+               if (p->file_priv == NULL && start + size <= p->start + p->size)
+                       return split_block(p, start, size, file_priv);
+       }
+
+       return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, int start)
+{
+       struct mem_block *p;
+
+       for (p = heap->next; p != heap; p = p->next)
+               if (p->start == start)
+                       return p;
+
+       return NULL;
+}
+
+static void free_block(struct mem_block *p)
+{
+       p->file_priv = NULL;
+
+       /* Assumes a single contiguous range.  Needs a special file_priv in
+        * 'heap' to stop it being subsumed.
+        */
+       if (p->next->file_priv == NULL) {
+               struct mem_block *q = p->next;
+               p->size += q->size;
+               p->next = q->next;
+               p->next->prev = p;
+               drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+       }
+
+       if (p->prev->file_priv == NULL) {
+               struct mem_block *q = p->prev;
+               q->size += p->size;
+               q->next = p->next;
+               q->next->prev = q;
+               drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
+       }
+}
+
+/* Initialize.  How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+       struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
+
+       if (!blocks)
+               return -ENOMEM;
+
+       *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
+       if (!*heap) {
+               drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
+               return -ENOMEM;
+       }
+
+       blocks->start = start;
+       blocks->size = size;
+       blocks->file_priv = NULL;
+       blocks->next = blocks->prev = *heap;
+
+       memset(*heap, 0, sizeof(**heap));
+       (*heap)->file_priv = (struct drm_file *) - 1;
+       (*heap)->next = (*heap)->prev = blocks;
+       return 0;
+}
+
+/* Free all blocks associated with the releasing file.
+ */
+void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
+                     struct mem_block *heap)
+{
+       struct mem_block *p;
+
+       if (!heap || !heap->next)
+               return;
+
+       for (p = heap->next; p != heap; p = p->next) {
+               if (p->file_priv == file_priv) {
+                       p->file_priv = NULL;
+                       mark_block(dev, p, 0);
+               }
+       }
+
+       /* Assumes a single contiguous range.  Needs a special file_priv in
+        * 'heap' to stop it being subsumed.
+        */
+       for (p = heap->next; p != heap; p = p->next) {
+               while (p->file_priv == NULL && p->next->file_priv == NULL) {
+                       struct mem_block *q = p->next;
+                       p->size += q->size;
+                       p->next = q->next;
+                       p->next->prev = p;
+                       drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+               }
+       }
+}
+
+/* Shutdown.
+ */
+void i915_mem_takedown(struct mem_block **heap)
+{
+       struct mem_block *p;
+
+       if (!*heap)
+               return;
+
+       for (p = (*heap)->next; p != *heap;) {
+               struct mem_block *q = p;
+               p = p->next;
+               drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+       }
+
+       drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
+       *heap = NULL;
+}
+
+static struct mem_block **get_heap(struct drm_i915_private * dev_priv, int region)
+{
+       switch (region) {
+       case I915_MEM_REGION_AGP:
+               return &dev_priv->agp_heap;
+       default:
+               return NULL;
+       }
+}
+
+/* IOCTL HANDLERS */
+
+int i915_mem_alloc(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_mem_alloc *alloc = data;
+       struct mem_block *block, **heap;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       heap = get_heap(dev_priv, alloc->region);
+       if (!heap || !*heap)
+               return -EFAULT;
+
+       /* Make things easier on ourselves: all allocations at least
+        * 4k aligned.
+        */
+       if (alloc->alignment < 12)
+               alloc->alignment = 12;
+
+       block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
+
+       if (!block)
+               return -ENOMEM;
+
+       mark_block(dev, block, 1);
+
+       if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+                            sizeof(int))) {
+               DRM_ERROR("copy_to_user\n");
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int i915_mem_free(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_mem_free *memfree = data;
+       struct mem_block *block, **heap;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       heap = get_heap(dev_priv, memfree->region);
+       if (!heap || !*heap)
+               return -EFAULT;
+
+       block = find_block(*heap, memfree->region_offset);
+       if (!block)
+               return -EFAULT;
+
+       if (block->file_priv != file_priv)
+               return -EPERM;
+
+       mark_block(dev, block, 0);
+       free_block(block);
+       return 0;
+}
+
+int i915_mem_init_heap(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_mem_init_heap *initheap = data;
+       struct mem_block **heap;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return -EINVAL;
+       }
+
+       heap = get_heap(dev_priv, initheap->region);
+       if (!heap)
+               return -EFAULT;
+
+       if (*heap) {
+               DRM_ERROR("heap already initialized?");
+               return -EFAULT;
+       }
+
+       return init_heap(heap, initheap->start, initheap->size);
+}
+
+int i915_mem_destroy_heap( struct drm_device *dev, void *data,
+                          struct drm_file *file_priv )
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_mem_destroy_heap *destroyheap = data;
+       struct mem_block **heap;
+
+       if ( !dev_priv ) {
+               DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+               return -EINVAL;
+       }
+
+       heap = get_heap( dev_priv, destroyheap->region );
+       if (!heap) {
+               DRM_ERROR("get_heap failed");
+               return -EFAULT;
+       }
+
+       if (!*heap) {
+               DRM_ERROR("heap not initialized?");
+               return -EFAULT;
+       }
+
+       i915_mem_takedown( heap );
+       return 0;
+}
diff --git a/psb-kernel-source-4.41.1/i915_reg.h b/psb-kernel-source-4.41.1/i915_reg.h
new file mode 100644 (file)
index 0000000..4145914
--- /dev/null
@@ -0,0 +1,487 @@
+#define BLC_PWM_CTL            0x61254
+#define BLC_PWM_CTL2           0x61250
+#define BACKLIGHT_MODULATION_FREQ_SHIFT                (17)
+/**
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK         (0x7fff << 17)
+#define BLM_LEGACY_MODE                                (1 << 16)
+/**
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT             (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK              (0xffff)
+
+#define I915_GCFGC                     0xf0
+#define I915_LOW_FREQUENCY_ENABLE              (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ         (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ             (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK                        (7 << 4)
+
+#define I855_HPLLCC                    0xc0
+#define I855_CLOCK_CONTROL_MASK                        (3 << 0)
+#define I855_CLOCK_133_200                     (0 << 0)
+#define I855_CLOCK_100_200                     (1 << 0)
+#define I855_CLOCK_100_133                     (2 << 0)
+#define I855_CLOCK_166_250                     (3 << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A       0x60000
+#define HBLANK_A       0x60004
+#define HSYNC_A        0x60008
+#define VTOTAL_A       0x6000c
+#define VBLANK_A       0x60010
+#define VSYNC_A        0x60014
+#define PIPEASRC       0x6001c
+#define BCLRPAT_A      0x60020
+#define VSYNCSHIFT_A   0x60028
+
+#define HTOTAL_B       0x61000
+#define HBLANK_B       0x61004
+#define HSYNC_B        0x61008
+#define VTOTAL_B       0x6100c
+#define VBLANK_B       0x61010
+#define VSYNC_B        0x61014
+#define PIPEBSRC       0x6101c
+#define BCLRPAT_B      0x61020
+#define VSYNCSHIFT_B   0x61028
+
+#define PP_STATUS      0x61200
+# define PP_ON                                 (1 << 31)
+/**
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+# define PP_READY                              (1 << 30)
+# define PP_SEQUENCE_NONE                      (0 << 28)
+# define PP_SEQUENCE_ON                                (1 << 28)
+# define PP_SEQUENCE_OFF                       (2 << 28)
+# define PP_SEQUENCE_MASK                      0x30000000
+#define PP_CONTROL     0x61204
+# define POWER_TARGET_ON                       (1 << 0)
+
+#define LVDSPP_ON       0x61208
+#define LVDSPP_OFF      0x6120c
+#define PP_CYCLE        0x61210
+
+#define PFIT_CONTROL   0x61230
+# define PFIT_ENABLE                           (1 << 31)
+# define PFIT_PIPE_MASK                                (3 << 29)
+# define PFIT_PIPE_SHIFT                       29
+# define VERT_INTERP_DISABLE                   (0 << 10)
+# define VERT_INTERP_BILINEAR                  (1 << 10)
+# define VERT_INTERP_MASK                      (3 << 10)
+# define VERT_AUTO_SCALE                       (1 << 9)
+# define HORIZ_INTERP_DISABLE                  (0 << 6)
+# define HORIZ_INTERP_BILINEAR                 (1 << 6)
+# define HORIZ_INTERP_MASK                     (3 << 6)
+# define HORIZ_AUTO_SCALE                      (1 << 5)
+# define PANEL_8TO6_DITHER_ENABLE              (1 << 3)
+
+#define PFIT_PGM_RATIOS        0x61234
+# define PFIT_VERT_SCALE_MASK                  0xfff00000
+# define PFIT_HORIZ_SCALE_MASK                 0x0000fff0
+
+#define PFIT_AUTO_RATIOS       0x61238
+
+
+#define DPLL_A         0x06014
+#define DPLL_B         0x06018
+# define DPLL_VCO_ENABLE                       (1 << 31)
+# define DPLL_DVO_HIGH_SPEED                   (1 << 30)
+# define DPLL_SYNCLOCK_ENABLE                  (1 << 29)
+# define DPLL_VGA_MODE_DIS                     (1 << 28)
+# define DPLLB_MODE_DAC_SERIAL                 (1 << 26) /* i915 */
+# define DPLLB_MODE_LVDS                       (2 << 26) /* i915 */
+# define DPLL_MODE_MASK                                (3 << 26)
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10       (0 << 24) /* i915 */
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5                (1 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_14            (0 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_7             (1 << 24) /* i915 */
+# define DPLL_P2_CLOCK_DIV_MASK                        0x03000000 /* i915 */
+# define DPLL_FPA01_P1_POST_DIV_MASK           0x00ff0000 /* i915 */
+/**
+ *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830      0x001f0000
+/**
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+# define DPLL_FPA01_P1_POST_DIV_SHIFT          16
+# define PLL_P2_DIVIDE_BY_4                    (1 << 23) /* i830, required in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO                  (1 << 21) /* i830 */
+# define PLL_REF_INPUT_DREFCLK                 (0 << 13)
+# define PLL_REF_INPUT_TVCLKINA                        (1 << 13) /* i830 */
+# define PLL_REF_INPUT_TVCLKINBC               (2 << 13) /* SDVO TVCLKIN */
+# define PLLB_REF_INPUT_SPREADSPECTRUMIN       (3 << 13)
+# define PLL_REF_INPUT_MASK                    (3 << 13)
+# define PLL_LOAD_PULSE_PHASE_SHIFT            9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+# define PLL_LOAD_PULSE_PHASE_MASK             (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+# define DISPLAY_RATE_SELECT_FPA1              (1 << 8)
+
+/**
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+# define SDVO_MULTIPLIER_MASK                  0x000000ff
+# define SDVO_MULTIPLIER_SHIFT_HIRES           4
+# define SDVO_MULTIPLIER_SHIFT_VGA             0
+
+/** @defgroup DPLL_MD
+ * @{
+ */
+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD              0x0601c
+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD              0x06020
+/**
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+# define DPLL_MD_UDI_DIVIDER_MASK              0x3f000000
+# define DPLL_MD_UDI_DIVIDER_SHIFT             24
+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+# define DPLL_MD_VGA_UDI_DIVIDER_MASK          0x003f0000
+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT         16
+/**
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+# define DPLL_MD_UDI_MULTIPLIER_MASK           0x00003f00
+# define DPLL_MD_UDI_MULTIPLIER_SHIFT          8
+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. 
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK       0x0000003f
+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT      0
+/** @} */
+
+#define DPLL_TEST              0x606c
+# define DPLLB_TEST_SDVO_DIV_1                 (0 << 22)
+# define DPLLB_TEST_SDVO_DIV_2                 (1 << 22)
+# define DPLLB_TEST_SDVO_DIV_4                 (2 << 22)
+# define DPLLB_TEST_SDVO_DIV_MASK              (3 << 22)
+# define DPLLB_TEST_N_BYPASS                   (1 << 19)
+# define DPLLB_TEST_M_BYPASS                   (1 << 18)
+# define DPLLB_INPUT_BUFFER_ENABLE             (1 << 16)
+# define DPLLA_TEST_N_BYPASS                   (1 << 3)
+# define DPLLA_TEST_M_BYPASS                   (1 << 2)
+# define DPLLA_INPUT_BUFFER_ENABLE             (1 << 0)
+
+#define ADPA                   0x61100
+#define ADPA_DAC_ENABLE        (1<<31)
+#define ADPA_DAC_DISABLE       0
+#define ADPA_PIPE_SELECT_MASK  (1<<30)
+#define ADPA_PIPE_A_SELECT     0
+#define ADPA_PIPE_B_SELECT     (1<<30)
+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_SETS_HVPOLARITY   0
+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_LOW  0
+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_LOW  0
+
+#define FPA0           0x06040
+#define FPA1           0x06044
+#define FPB0           0x06048
+#define FPB1           0x0604c
+# define FP_N_DIV_MASK                         0x003f0000
+# define FP_N_DIV_SHIFT                                16
+# define FP_M1_DIV_MASK                                0x00003f00
+# define FP_M1_DIV_SHIFT                       8
+# define FP_M2_DIV_MASK                                0x0000003f
+# define FP_M2_DIV_SHIFT                       0
+
+
+#define PORT_HOTPLUG_EN                0x61110
+# define SDVOB_HOTPLUG_INT_EN                  (1 << 26)
+# define SDVOC_HOTPLUG_INT_EN                  (1 << 25)
+# define TV_HOTPLUG_INT_EN                     (1 << 18)
+# define CRT_HOTPLUG_INT_EN                    (1 << 9)
+# define CRT_HOTPLUG_FORCE_DETECT              (1 << 3)
+
+#define PORT_HOTPLUG_STAT      0x61114
+# define CRT_HOTPLUG_INT_STATUS                        (1 << 11)
+# define TV_HOTPLUG_INT_STATUS                 (1 << 10)
+# define CRT_HOTPLUG_MONITOR_MASK              (3 << 8)
+# define CRT_HOTPLUG_MONITOR_COLOR             (3 << 8)
+# define CRT_HOTPLUG_MONITOR_MONO              (2 << 8)
+# define CRT_HOTPLUG_MONITOR_NONE              (0 << 8)
+# define SDVOC_HOTPLUG_INT_STATUS              (1 << 7)
+# define SDVOB_HOTPLUG_INT_STATUS              (1 << 6)
+
+#define SDVOB                  0x61140
+#define SDVOC                  0x61160
+#define SDVO_ENABLE                            (1 << 31)
+#define SDVO_PIPE_B_SELECT                     (1 << 30)
+#define SDVO_STALL_SELECT                      (1 << 29)
+#define SDVO_INTERRUPT_ENABLE                  (1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK                        (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT               23
+#define SDVO_PHASE_SELECT_MASK                 (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT              (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT               (1 << 18)
+#define SDVOC_GANG_MODE                                (1 << 16)
+#define SDVO_BORDER_ENABLE                     (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY                 (1 << 3)
+#define SDVO_DETECTED                          (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK                    ((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK                    (1 << 17)
+
+/** @defgroup LVDS
+ * @{
+ */
+/**
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS                   0x61180
+/**
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+# define LVDS_PORT_EN                  (1 << 31)
+/** Selects pipe B for LVDS data.  Must be set on pre-965. */
+# define LVDS_PIPEB_SELECT             (1 << 30)
+
+/**
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+# define LVDS_A0A2_CLKA_POWER_MASK     (3 << 8)
+# define LVDS_A0A2_CLKA_POWER_DOWN     (0 << 8)
+# define LVDS_A0A2_CLKA_POWER_UP       (3 << 8)
+/**
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+# define LVDS_A3_POWER_MASK            (3 << 6)
+# define LVDS_A3_POWER_DOWN            (0 << 6)
+# define LVDS_A3_POWER_UP              (3 << 6)
+/**
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+# define LVDS_CLKB_POWER_MASK          (3 << 4)
+# define LVDS_CLKB_POWER_DOWN          (0 << 4)
+# define LVDS_CLKB_POWER_UP            (3 << 4)
+
+/**
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+# define LVDS_B0B3_POWER_MASK          (3 << 2)
+# define LVDS_B0B3_POWER_DOWN          (0 << 2)
+# define LVDS_B0B3_POWER_UP            (3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE       (1<<31)
+#define PIPEACONF_DISABLE      0
+#define PIPEACONF_DOUBLE_WIDE  (1<<30)
+#define I965_PIPECONF_ACTIVE   (1<<30)
+#define PIPEACONF_SINGLE_WIDE  0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_PIPE_LOCKED  (1<<25)
+#define PIPEACONF_PALETTE      0
+#define PIPEACONF_GAMMA        (1<<24)
+#define PIPECONF_FORCE_BORDER  (1<<25)
+#define PIPECONF_PROGRESSIVE   (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION  (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY                (7 << 21)
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE       (1<<31)
+#define PIPEBCONF_DISABLE      0
+#define PIPEBCONF_DOUBLE_WIDE  (1<<30)
+#define PIPEBCONF_DISABLE      0
+#define PIPEBCONF_GAMMA        (1<<24)
+#define PIPEBCONF_PALETTE      0
+
+#define PIPEBGCMAXRED          0x71010
+#define PIPEBGCMAXGREEN                0x71014
+#define PIPEBGCMAXBLUE         0x71018
+#define PIPEBSTAT              0x71024
+#define PIPEBFRAMEHIGH         0x71040
+#define PIPEBFRAMEPIXEL                0x71044
+
+#define DSPACNTR               0x70180
+#define DSPBCNTR               0x71180
+#define DISPLAY_PLANE_ENABLE                   (1<<31)
+#define DISPLAY_PLANE_DISABLE                  0
+#define DISPPLANE_GAMMA_ENABLE                 (1<<30)
+#define DISPPLANE_GAMMA_DISABLE                        0
+#define DISPPLANE_PIXFORMAT_MASK               (0xf<<26)
+#define DISPPLANE_8BPP                         (0x2<<26)
+#define DISPPLANE_15_16BPP                     (0x4<<26)
+#define DISPPLANE_16BPP                                (0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA               (0x6<<26)
+#define DISPPLANE_32BPP                                (0x7<<26)
+#define DISPPLANE_STEREO_ENABLE                        (1<<25)
+#define DISPPLANE_STEREO_DISABLE               0
+#define DISPPLANE_SEL_PIPE_MASK                        (1<<24)
+#define DISPPLANE_SEL_PIPE_A                   0
+#define DISPPLANE_SEL_PIPE_B                   (1<<24)
+#define DISPPLANE_SRC_KEY_ENABLE               (1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE              0
+#define DISPPLANE_LINE_DOUBLE                  (1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE               0
+#define DISPPLANE_STEREO_POLARITY_FIRST                0
+#define DISPPLANE_STEREO_POLARITY_SECOND       (1<<18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE           (1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE          0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA                0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY         (1)
+
+#define DSPABASE               0x70184
+#define DSPASTRIDE             0x70188
+
+#define DSPBBASE               0x71184
+#define DSPBADDR               DSPBBASE
+#define DSPBSTRIDE             0x71188
+
+#define DSPAKEYVAL             0x70194
+#define DSPAKEYMASK            0x70198
+
+#define DSPAPOS                        0x7018C /* reserved */
+#define DSPASIZE               0x70190
+#define DSPBPOS                        0x7118C
+#define DSPBSIZE               0x71190
+
+#define DSPASURF               0x7019C
+#define DSPATILEOFF            0x701A4
+
+#define DSPBSURF               0x7119C
+#define DSPBTILEOFF            0x711A4
+
+#define VGACNTRL               0x71400
+# define VGA_DISP_DISABLE                      (1 << 31)
+# define VGA_2X_MODE                           (1 << 30)
+# define VGA_PIPE_B_SELECT                     (1 << 29)
+
+/*
+ * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+
+#define SWF0                   0x71410
+#define SWF1                   0x71414
+#define SWF2                   0x71418
+#define SWF3                   0x7141c
+#define SWF4                   0x71420
+#define SWF5                   0x71424
+#define SWF6                   0x71428
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF00                  0x70410
+#define SWF01                  0x70414
+#define SWF02                  0x70418
+#define SWF03                  0x7041c
+#define SWF04                  0x70420
+#define SWF05                  0x70424
+#define SWF06                  0x70428
+
+#define SWF10                  SWF0
+#define SWF11                  SWF1
+#define SWF12                  SWF2
+#define SWF13                  SWF3
+#define SWF14                  SWF4
+#define SWF15                  SWF5
+#define SWF16                  SWF6
+
+#define SWF30                  0x72414
+#define SWF31                  0x72418
+#define SWF32                  0x7241c
+
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A              0x0a000
+#define PALETTE_B              0x0a800
+
+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
+
+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/
+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
+
+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+                      (dev)->pci_device == 0x2982 || \
+                      (dev)->pci_device == 0x2992 || \
+                      (dev)->pci_device == 0x29A2 || \
+                      (dev)->pci_device == 0x2A02 || \
+                      (dev)->pci_device == 0x2A12)
+
+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
+
+#define IS_G33(dev)    ((dev)->pci_device == 0x29C2 || \
+                       (dev)->pci_device == 0x29B2 ||  \
+                       (dev)->pci_device == 0x29D2)
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+                     IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev))
+
+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+                       IS_I945GM(dev) || IS_I965GM(dev) || IS_POULSBO(dev))
+
+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
+                        ((dev)->pci_device == 0x8109))
diff --git a/psb-kernel-source-4.41.1/intel_crt.c b/psb-kernel-source-4.41.1/intel_crt.c
new file mode 100644 (file)
index 0000000..1cb37c5
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+
+static void intel_crt_dpms(struct drm_output *output, int mode)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       u32 temp;
+       
+       temp = I915_READ(ADPA);
+       temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+       temp &= ~ADPA_DAC_ENABLE;
+       
+       switch(mode) {
+       case DPMSModeOn:
+               temp |= ADPA_DAC_ENABLE;
+               break;
+       case DPMSModeStandby:
+               temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+               break;
+       case DPMSModeSuspend:
+               temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+               break;
+       case DPMSModeOff:
+               temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+               break;
+       }
+       
+       I915_WRITE(ADPA, temp);
+}
+
+static void intel_crt_save(struct drm_output *output)
+{
+       
+}
+
+static void intel_crt_restore(struct drm_output *output)
+{
+
+}
+
+static int intel_crt_mode_valid(struct drm_output *output,
+                               struct drm_display_mode *mode)
+{
+       if (mode->flags & V_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       if (mode->clock > 400000 || mode->clock < 25000)
+               return MODE_CLOCK_RANGE;
+
+       return MODE_OK;
+}
+
+static bool intel_crt_mode_fixup(struct drm_output *output,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void intel_crt_mode_set(struct drm_output *output,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = output->dev;
+       struct drm_crtc *crtc = output->crtc;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       int dpll_md_reg;
+       u32 adpa, dpll_md;
+
+       if (intel_crtc->pipe == 0) 
+               dpll_md_reg = DPLL_A_MD;
+       else
+               dpll_md_reg = DPLL_B_MD;
+
+       /*
+        * Disable separate mode multiplier used when cloning SDVO to CRT
+        * XXX this needs to be adjusted when we really are cloning
+        */
+       if (IS_I965G(dev)) {
+               dpll_md = I915_READ(dpll_md_reg);
+               I915_WRITE(dpll_md_reg,
+                          dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+       }
+       
+       adpa = 0;
+       if (adjusted_mode->flags & V_PHSYNC)
+               adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+       if (adjusted_mode->flags & V_PVSYNC)
+               adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+       
+       if (intel_crtc->pipe == 0)
+               adpa |= ADPA_PIPE_A_SELECT;
+       else
+               adpa |= ADPA_PIPE_B_SELECT;
+       
+       I915_WRITE(ADPA, adpa);
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * Only for I945G/GM.
+ *
+ * \return TRUE if CRT is connected.
+ * \return FALSE if CRT is disconnected.
+ */
+static bool intel_crt_detect_hotplug(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       u32 temp;
+       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+       temp = I915_READ(PORT_HOTPLUG_EN);
+
+       I915_WRITE(PORT_HOTPLUG_EN,
+                  temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+
+       do {
+               if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
+                       break;
+               msleep(1);
+       } while (time_after(timeout, jiffies));
+
+       if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
+           CRT_HOTPLUG_MONITOR_COLOR)
+               return true;
+
+       return false;
+}
+
+static bool intel_crt_detect_ddc(struct drm_output *output)
+{
+       struct intel_output *intel_output = output->driver_private;
+
+       /* CRT should always be at 0, but check anyway */
+       if (intel_output->type != INTEL_OUTPUT_ANALOG)
+               return false;
+       
+       return intel_ddc_probe(output);
+}
+
+static enum drm_output_status intel_crt_detect(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       
+       if (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) {
+               if (intel_crt_detect_hotplug(output))
+                       return output_status_connected;
+               else
+                       return output_status_disconnected;
+       }
+
+       if (intel_crt_detect_ddc(output))
+               return output_status_connected;
+
+       /* TODO use load detect */
+       return output_status_unknown;
+}
+
+static void intel_crt_destroy(struct drm_output *output)
+{
+       struct intel_output *intel_output = output->driver_private;
+
+       intel_i2c_destroy(intel_output->ddc_bus);
+       kfree(output->driver_private);
+}
+
+static int intel_crt_get_modes(struct drm_output *output)
+{
+       return intel_ddc_get_modes(output);
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+static const struct drm_output_funcs intel_crt_output_funcs = {
+       .dpms = intel_crt_dpms,
+       .save = intel_crt_save,
+       .restore = intel_crt_restore,
+       .mode_valid = intel_crt_mode_valid,
+       .mode_fixup = intel_crt_mode_fixup,
+       .prepare = intel_output_prepare,
+       .mode_set = intel_crt_mode_set,
+       .commit = intel_output_commit,
+       .detect = intel_crt_detect,
+       .get_modes = intel_crt_get_modes,
+       .cleanup = intel_crt_destroy,
+};
+
+void intel_crt_init(struct drm_device *dev)
+{
+       struct drm_output *output;
+       struct intel_output *intel_output;
+
+       output = drm_output_create(dev, &intel_crt_output_funcs, "VGA");
+
+       intel_output = kmalloc(sizeof(struct intel_output), GFP_KERNEL);
+       if (!intel_output) {
+               drm_output_destroy(output);
+               return;
+       }
+       /* Set up the DDC bus. */
+       intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+       if (!intel_output->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+                          "failed.\n");
+               return;
+       }
+
+       intel_output->type = INTEL_OUTPUT_ANALOG;
+       output->driver_private = intel_output;
+       output->interlace_allowed = 0;
+       output->doublescan_allowed = 0;
+}
diff --git a/psb-kernel-source-4.41.1/intel_display.c b/psb-kernel-source-4.41.1/intel_display.c
new file mode 100644 (file)
index 0000000..f81cde3
--- /dev/null
@@ -0,0 +1,1472 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+
+typedef struct {
+    /* given values */    
+    int n;
+    int m1, m2;
+    int p1, p2;
+    /* derived values */
+    int        dot;
+    int        vco;
+    int        m;
+    int        p;
+} intel_clock_t;
+
+typedef struct {
+    int        min, max;
+} intel_range_t;
+
+typedef struct {
+    int        dot_limit;
+    int        p2_slow, p2_fast;
+} intel_p2_t;
+
+#define INTEL_P2_NUM                 2
+
+typedef struct {
+    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
+    intel_p2_t     p2;
+} intel_limit_t;
+
+#define I8XX_DOT_MIN             25000
+#define I8XX_DOT_MAX            350000
+#define I8XX_VCO_MIN            930000
+#define I8XX_VCO_MAX           1400000
+#define I8XX_N_MIN                   3
+#define I8XX_N_MAX                  16
+#define I8XX_M_MIN                  96
+#define I8XX_M_MAX                 140
+#define I8XX_M1_MIN                 18
+#define I8XX_M1_MAX                 26
+#define I8XX_M2_MIN                  6
+#define I8XX_M2_MAX                 16
+#define I8XX_P_MIN                   4
+#define I8XX_P_MAX                 128
+#define I8XX_P1_MIN                  2
+#define I8XX_P1_MAX                 33
+#define I8XX_P1_LVDS_MIN             1
+#define I8XX_P1_LVDS_MAX             6
+#define I8XX_P2_SLOW                 4
+#define I8XX_P2_FAST                 2
+#define I8XX_P2_LVDS_SLOW            14
+#define I8XX_P2_LVDS_FAST            14 /* No fast option */
+#define I8XX_P2_SLOW_LIMIT      165000
+
+#define I9XX_DOT_MIN             20000
+#define I9XX_DOT_MAX            400000
+#define I9XX_VCO_MIN           1400000
+#define I9XX_VCO_MAX           2800000
+#define I9XX_N_MIN                   3
+#define I9XX_N_MAX                   8
+#define I9XX_M_MIN                  70
+#define I9XX_M_MAX                 120
+#define I9XX_M1_MIN                 10
+#define I9XX_M1_MAX                 20
+#define I9XX_M2_MIN                  5
+#define I9XX_M2_MAX                  9
+#define I9XX_P_SDVO_DAC_MIN          5
+#define I9XX_P_SDVO_DAC_MAX         80
+#define I9XX_P_LVDS_MIN                      7
+#define I9XX_P_LVDS_MAX                     98
+#define I9XX_P1_MIN                  1
+#define I9XX_P1_MAX                  8
+#define I9XX_P2_SDVO_DAC_SLOW               10
+#define I9XX_P2_SDVO_DAC_FAST                5
+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT     200000
+#define I9XX_P2_LVDS_SLOW                   14
+#define I9XX_P2_LVDS_FAST                    7
+#define I9XX_P2_LVDS_SLOW_LIMIT                 112000
+
+#define INTEL_LIMIT_I8XX_DVO_DAC    0
+#define INTEL_LIMIT_I8XX_LVDS      1
+#define INTEL_LIMIT_I9XX_SDVO_DAC   2
+#define INTEL_LIMIT_I9XX_LVDS      3
+
+static const intel_limit_t intel_limits[] = {
+    { /* INTEL_LIMIT_I8XX_DVO_DAC */
+        .dot = { .min = I8XX_DOT_MIN,          .max = I8XX_DOT_MAX },
+        .vco = { .min = I8XX_VCO_MIN,          .max = I8XX_VCO_MAX },
+        .n   = { .min = I8XX_N_MIN,            .max = I8XX_N_MAX },
+        .m   = { .min = I8XX_M_MIN,            .max = I8XX_M_MAX },
+        .m1  = { .min = I8XX_M1_MIN,           .max = I8XX_M1_MAX },
+        .m2  = { .min = I8XX_M2_MIN,           .max = I8XX_M2_MAX },
+        .p   = { .min = I8XX_P_MIN,            .max = I8XX_P_MAX },
+        .p1  = { .min = I8XX_P1_MIN,           .max = I8XX_P1_MAX },
+       .p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
+                .p2_slow = I8XX_P2_SLOW,       .p2_fast = I8XX_P2_FAST },
+    },
+    { /* INTEL_LIMIT_I8XX_LVDS */
+        .dot = { .min = I8XX_DOT_MIN,          .max = I8XX_DOT_MAX },
+        .vco = { .min = I8XX_VCO_MIN,          .max = I8XX_VCO_MAX },
+        .n   = { .min = I8XX_N_MIN,            .max = I8XX_N_MAX },
+        .m   = { .min = I8XX_M_MIN,            .max = I8XX_M_MAX },
+        .m1  = { .min = I8XX_M1_MIN,           .max = I8XX_M1_MAX },
+        .m2  = { .min = I8XX_M2_MIN,           .max = I8XX_M2_MAX },
+        .p   = { .min = I8XX_P_MIN,            .max = I8XX_P_MAX },
+        .p1  = { .min = I8XX_P1_LVDS_MIN,      .max = I8XX_P1_LVDS_MAX },
+       .p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
+                .p2_slow = I8XX_P2_LVDS_SLOW,  .p2_fast = I8XX_P2_LVDS_FAST },
+    },
+    { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+        .dot = { .min = I9XX_DOT_MIN,          .max = I9XX_DOT_MAX },
+        .vco = { .min = I9XX_VCO_MIN,          .max = I9XX_VCO_MAX },
+        .n   = { .min = I9XX_N_MIN,            .max = I9XX_N_MAX },
+        .m   = { .min = I9XX_M_MIN,            .max = I9XX_M_MAX },
+        .m1  = { .min = I9XX_M1_MIN,           .max = I9XX_M1_MAX },
+        .m2  = { .min = I9XX_M2_MIN,           .max = I9XX_M2_MAX },
+        .p   = { .min = I9XX_P_SDVO_DAC_MIN,   .max = I9XX_P_SDVO_DAC_MAX },
+        .p1  = { .min = I9XX_P1_MIN,           .max = I9XX_P1_MAX },
+       .p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+                .p2_slow = I9XX_P2_SDVO_DAC_SLOW,      .p2_fast = I9XX_P2_SDVO_DAC_FAST },
+    },
+    { /* INTEL_LIMIT_I9XX_LVDS */
+        .dot = { .min = I9XX_DOT_MIN,          .max = I9XX_DOT_MAX },
+        .vco = { .min = I9XX_VCO_MIN,          .max = I9XX_VCO_MAX },
+        .n   = { .min = I9XX_N_MIN,            .max = I9XX_N_MAX },
+        .m   = { .min = I9XX_M_MIN,            .max = I9XX_M_MAX },
+        .m1  = { .min = I9XX_M1_MIN,           .max = I9XX_M1_MAX },
+        .m2  = { .min = I9XX_M2_MIN,           .max = I9XX_M2_MAX },
+        .p   = { .min = I9XX_P_LVDS_MIN,       .max = I9XX_P_LVDS_MAX },
+        .p1  = { .min = I9XX_P1_MIN,           .max = I9XX_P1_MAX },
+       /* The single-channel range is 25-112Mhz, and dual-channel
+        * is 80-224Mhz.  Prefer single channel as much as possible.
+        */
+       .p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+                .p2_slow = I9XX_P2_LVDS_SLOW,  .p2_fast = I9XX_P2_LVDS_FAST },
+    },
+};
+
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       const intel_limit_t *limit;
+       
+       if (IS_I9XX(dev)) {
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+                       limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
+               else
+                       limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+       } else {
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+                       limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
+               else
+                       limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
+       }
+       return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+static void i8xx_clock(int refclk, intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
+
+static void i9xx_clock(int refclk, intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk,
+                       intel_clock_t *clock)
+{
+       if (IS_I9XX(dev))
+               return i9xx_clock (refclk, clock);
+       else
+               return i8xx_clock (refclk, clock);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+{
+    struct drm_device *dev = crtc->dev;
+    struct drm_mode_config *mode_config = &dev->mode_config;
+    struct drm_output *l_entry;
+
+    list_for_each_entry(l_entry, &mode_config->output_list, head) {
+           if (l_entry->crtc == crtc) {
+                   struct intel_output *intel_output = l_entry->driver_private;
+                   if (intel_output->type == type)
+                           return true;
+           }
+    }
+    return false;
+}
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given outputs.
+ */
+
+static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+{
+       const intel_limit_t *limit = intel_limit (crtc);
+       
+       if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
+               INTELPllInvalid ("p1 out of range\n");
+       if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
+               INTELPllInvalid ("p out of range\n");
+       if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
+               INTELPllInvalid ("m2 out of range\n");
+       if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
+               INTELPllInvalid ("m1 out of range\n");
+       if (clock->m1 <= clock->m2)
+               INTELPllInvalid ("m1 <= m2\n");
+       if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
+               INTELPllInvalid ("m out of range\n");
+       if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
+               INTELPllInvalid ("n out of range\n");
+       if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+               INTELPllInvalid ("vco out of range\n");
+       /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+        * output, etc., rather than just a single range.
+        */
+       if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+               INTELPllInvalid ("dot out of range\n");
+       
+       return true;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
+                               int refclk, intel_clock_t *best_clock)
+{
+       struct drm_device *dev = crtc->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       intel_clock_t clock;
+       const intel_limit_t *limit = intel_limit(crtc);
+       int err = target;
+
+       if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+           (I915_READ(LVDS) & LVDS_PORT_EN) != 0) {
+               /*
+                * For LVDS, if the panel is on, just rely on its current
+                * settings for dual-channel.  We haven't figured out how to
+                * reliably set up different single/dual channel state, if we
+                * even can.
+                */
+               if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP)
+                       clock.p2 = limit->p2.p2_fast;
+               else
+                       clock.p2 = limit->p2.p2_slow;
+       } else {
+               if (target < limit->p2.dot_limit)
+                       clock.p2 = limit->p2.p2_slow;
+               else
+                       clock.p2 = limit->p2.p2_fast;
+       }
+       
+       memset (best_clock, 0, sizeof (*best_clock));
+       
+       for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+               for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
+                            clock.m2 <= limit->m2.max; clock.m2++) {
+                       for (clock.n = limit->n.min; clock.n <= limit->n.max;
+                            clock.n++) {
+                               for (clock.p1 = limit->p1.min;
+                                    clock.p1 <= limit->p1.max; clock.p1++) {
+                                       int this_err;
+                                       
+                                       intel_clock(dev, refclk, &clock);
+                                       
+                                       if (!intel_PLL_is_valid(crtc, &clock))
+                                               continue;
+                                       
+                                       this_err = abs(clock.dot - target);
+                                       if (this_err < err) {
+                                               *best_clock = clock;
+                                               err = this_err;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       return (err != target);
+}
+
+#if 0
+void
+intel_set_vblank(struct drm_device *dev)
+{
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+       int vbl_pipe = 0;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               intel_crtc = crtc->driver_private;
+
+               if (crtc->enabled)
+                       vbl_pipe |= (1<<intel_crtc->pipe);
+       }
+
+       dev_priv->vblank_pipe = vbl_pipe;
+       i915_enable_interrupt(dev);
+}
+#endif
+
+void
+intel_wait_for_vblank(struct drm_device *dev)
+{
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+       udelay(20000);
+}
+
+void
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       int pipe = intel_crtc->pipe;
+       unsigned long Start, Offset;
+       int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+
+       Start = crtc->fb->offset;
+       Offset = y * crtc->fb->pitch + x;
+
+       DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+       if (IS_I965G(dev)) {
+               I915_WRITE(dspbase, Offset);
+               I915_READ(dspbase);
+               I915_WRITE(dspsurf, Start);
+               I915_READ(dspsurf);
+       } else {
+               I915_WRITE(dspbase, Start + Offset);
+               I915_READ(dspbase);
+       }
+       
+
+       if (!dev_priv->sarea_priv) 
+               return;
+               
+       switch (pipe) {
+       case 0:
+               dev_priv->sarea_priv->planeA_x = x;
+               dev_priv->sarea_priv->planeA_y = y;
+               break;
+       case 1:
+               dev_priv->sarea_priv->planeB_x = x;
+               dev_priv->sarea_priv->planeB_y = y;
+               break;
+       default:
+               DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+               break;
+       }
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       int pipe = intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       u32 temp, temp2;
+       bool enabled;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DPMSModeOff in the CRTC.
+        */
+       switch (mode) {
+       case DPMSModeOn:
+       case DPMSModeStandby:
+       case DPMSModeSuspend:
+               /* Enable the DPLL */
+               temp = I915_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) == 0) {
+                       I915_WRITE(dpll_reg, temp);
+                       I915_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       I915_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       I915_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+               }
+               
+               /* Enable the pipe */
+               temp = I915_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) == 0)
+                       I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+               /* Enable the plane */
+               temp = I915_READ(dspcntr_reg);
+               if (mode != DPMSModeOn)
+                       temp2 = temp & ~DISPLAY_PLANE_ENABLE;
+               else
+                       temp2 = temp | DISPLAY_PLANE_ENABLE;
+
+               if (temp != temp2) {
+                       I915_WRITE(dspcntr_reg, temp2);
+                       /* Flush the plane changes */
+                       I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+               }
+               
+               intel_crtc_load_lut(crtc);
+               
+               /* Give the overlay scaler a chance to enable if it's on this pipe */
+               //intel_crtc_dpms_video(crtc, TRUE); TODO
+       break;
+       case DPMSModeOff:
+               /* Give the overlay scaler a chance to disable if it's on this pipe */
+               //intel_crtc_dpms_video(crtc, FALSE); TODO
+               
+               /* Disable display plane */
+               temp = I915_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+                       I915_READ(dspbase_reg);
+               }
+               
+               if (!IS_I9XX(dev)) {
+                       /* Wait for vblank for the disable to take effect */
+                       intel_wait_for_vblank(dev);
+               }
+               
+               /* Next, disable display pipes */
+               temp = I915_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+                       I915_READ(pipeconf_reg);
+               }
+               
+               /* Wait for vblank for the disable to take effect. */
+               intel_wait_for_vblank(dev);
+               
+               temp = I915_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) != 0) {
+                       I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+                       I915_READ(dpll_reg);
+               }
+               
+               /* Wait for the clocks to turn off. */
+               udelay(150);
+               break;
+       }
+       
+
+       if (!dev_priv->sarea_priv)
+               return;
+
+       enabled = crtc->enabled && mode != DPMSModeOff;
+       
+       switch (pipe) {
+       case 0:
+               dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
+               dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
+               break;
+       case 1:
+               dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
+               dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
+               break;
+       default:
+               DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+               break;
+       }
+}
+
+static bool intel_crtc_lock(struct drm_crtc *crtc)
+{
+   /* Sync the engine before mode switch */
+//   i830WaitSync(crtc->scrn);
+
+#if 0 // TODO def XF86DRI
+    return I830DRILock(crtc->scrn);
+#else
+    return FALSE;
+#endif
+}
+
+static void intel_crtc_unlock (struct drm_crtc *crtc)
+{
+#if 0 // TODO def XF86DRI
+    I830DRIUnlock (crtc->scrn);
+#endif
+}
+
+static void intel_crtc_prepare (struct drm_crtc *crtc)
+{
+       crtc->funcs->dpms(crtc, DPMSModeOff);
+}
+
+static void intel_crtc_commit (struct drm_crtc *crtc)
+{
+       crtc->funcs->dpms(crtc, DPMSModeOn);
+}
+
+void intel_output_prepare (struct drm_output *output)
+{
+       /* lvds has its own version of prepare see intel_lvds_prepare */
+       output->funcs->dpms(output, DPMSModeOff);
+}
+
+void intel_output_commit (struct drm_output *output)
+{
+       /* lvds has its own version of commit see intel_lvds_commit */
+       output->funcs->dpms(output, DPMSModeOn);
+}
+
+static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+
+/** Returns the core display clock speed for i830 - i945 */
+int intel_get_core_clock_speed(struct drm_device *dev)
+{
+
+       /* Core clock values taken from the published datasheets.
+        * The 830 may go up to 166 Mhz, which we should check.
+        */
+       if (IS_I945G(dev))
+               return 400000;
+       else if (IS_I915G(dev))
+               return 333000;
+       else if (IS_I945GM(dev) || IS_POULSBO(dev) || IS_845G(dev))
+               return 200000;
+       else if (IS_I915GM(dev)) {
+               u16 gcfgc = 0;
+
+               pci_read_config_word(dev->pdev, I915_GCFGC, &gcfgc);
+               
+               if (gcfgc & I915_LOW_FREQUENCY_ENABLE)
+                       return 133000;
+               else {
+                       switch (gcfgc & I915_DISPLAY_CLOCK_MASK) {
+                       case I915_DISPLAY_CLOCK_333_MHZ:
+                               return 333000;
+                       default:
+                       case I915_DISPLAY_CLOCK_190_200_MHZ:
+                               return 190000;
+                       }
+               }
+       } else if (IS_I865G(dev))
+               return 266000;
+       else if (IS_I855(dev)) {
+#if 0
+               PCITAG bridge = pciTag(0, 0, 0); /* This is always the host bridge */
+               u16 hpllcc = pciReadWord(bridge, I855_HPLLCC);
+               
+#endif
+               u16 hpllcc = 0;
+               /* Assume that the hardware is in the high speed state.  This
+                * should be the default.
+                */
+               switch (hpllcc & I855_CLOCK_CONTROL_MASK) {
+               case I855_CLOCK_133_200:
+               case I855_CLOCK_100_200:
+                       return 200000;
+               case I855_CLOCK_166_250:
+                       return 250000;
+               case I855_CLOCK_100_133:
+                       return 133000;
+               }
+       } else /* 852, 830 */
+               return 133000;
+       
+       return 0; /* Silence gcc warning */
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+int intel_panel_fitter_pipe (struct drm_device *dev)
+{
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       u32  pfit_control;
+    
+       /* i830 doesn't have a panel fitter */
+       if (IS_I830(dev))
+               return -1;
+    
+       pfit_control = I915_READ(PFIT_CONTROL);
+    
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+       
+       /* 965 can place panel fitter on either pipe */
+       if (IS_I965G(dev))
+               return (pfit_control >> 29) & 0x3;
+       
+       /* older chips can only use pipe 1 */
+       return 1;
+}
+
+#define WA_NO_FB_GARBAGE_DISPLAY
+#ifdef WA_NO_FB_GARBAGE_DISPLAY
+static u32 fp_reg_value[2];
+static u32 dpll_reg_value[2];
+static u32 dpll_md_reg_value[2];
+static u32 dspcntr_reg_value[2];
+static u32 pipeconf_reg_value[2];
+static u32 htot_reg_value[2];
+static u32 hblank_reg_value[2];
+static u32 hsync_reg_value[2];
+static u32 vtot_reg_value[2];
+static u32 vblank_reg_value[2];
+static u32 vsync_reg_value[2];
+static u32 dspsize_reg_value[2];
+static u32 dspstride_reg_value[2];
+static u32 dsppos_reg_value[2];
+static u32 pipesrc_reg_value[2];
+
+static u32 dspbase_value[2];
+
+static u32 lvds_reg_value[2];
+static u32 vgacntrl_reg_value[2];
+static u32 pfit_control_reg_value[2];
+
+void intel_crtc_mode_restore(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       int pipe = intel_crtc->pipe;
+       int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+
+       bool ok, is_sdvo = false, is_dvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_output *output;
+
+       list_for_each_entry(output, &mode_config->output_list, head) {
+               struct intel_output *intel_output = output->driver_private;
+
+               if (output->crtc != crtc)
+                       continue;
+
+               switch (intel_output->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = TRUE;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = TRUE;
+                       break;
+               case INTEL_OUTPUT_DVO:
+                       is_dvo = TRUE;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = TRUE;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = TRUE;
+                       break;
+               }
+               if(is_lvds && ((lvds_reg_value[pipe] & LVDS_PORT_EN) == 0))
+               {
+                       printk("%s: is_lvds but not the boot display, so return\n",
+                                                       __FUNCTION__);
+                       return;
+               }
+               output->funcs->prepare(output);
+       }
+
+       intel_crtc_prepare(crtc);
+       /* Disable the panel fitter if it was on our pipe */
+       if (intel_panel_fitter_pipe(dev) == pipe)
+               I915_WRITE(PFIT_CONTROL, 0);
+
+       if (dpll_reg_value[pipe] & DPLL_VCO_ENABLE) {
+               I915_WRITE(fp_reg, fp_reg_value[pipe]);
+               I915_WRITE(dpll_reg, dpll_reg_value[pipe]& ~DPLL_VCO_ENABLE);
+               I915_READ(dpll_reg);
+               udelay(150);
+       }
+
+       /*
+       if(is_lvds)
+               I915_WRITE(LVDS, lvds_reg_value[pipe]);
+       */
+       if (is_lvds) {
+               I915_WRITE(LVDS, lvds_reg_value[pipe]);
+               I915_READ(LVDS);
+       }
+
+       I915_WRITE(fp_reg, fp_reg_value[pipe]);
+       I915_WRITE(dpll_reg, dpll_reg_value[pipe]);
+       I915_READ(dpll_reg);
+       udelay(150);
+       //I915_WRITE(dpll_md_reg, dpll_md_reg_value[pipe]);
+       I915_WRITE(dpll_reg, dpll_reg_value[pipe]);
+       I915_READ(dpll_reg);
+       udelay(150);
+       I915_WRITE(htot_reg, htot_reg_value[pipe]);
+       I915_WRITE(hblank_reg, hblank_reg_value[pipe]);
+       I915_WRITE(hsync_reg, hsync_reg_value[pipe]);
+       I915_WRITE(vtot_reg, vtot_reg_value[pipe]);
+       I915_WRITE(vblank_reg, vblank_reg_value[pipe]);
+       I915_WRITE(vsync_reg, vsync_reg_value[pipe]);
+       I915_WRITE(dspstride_reg, dspstride_reg_value[pipe]);
+       I915_WRITE(dspsize_reg, dspsize_reg_value[pipe]);
+       I915_WRITE(dsppos_reg, dsppos_reg_value[pipe]);
+       I915_WRITE(pipesrc_reg, pipesrc_reg_value[pipe]);
+       I915_WRITE(pipeconf_reg, pipeconf_reg_value[pipe]);
+       I915_READ(pipeconf_reg);
+       intel_wait_for_vblank(dev);
+       I915_WRITE(dspcntr_reg, dspcntr_reg_value[pipe]);
+       I915_WRITE(dspbase, dspbase_value[pipe]);
+       I915_READ(dspbase);
+       I915_WRITE(VGACNTRL, vgacntrl_reg_value[pipe]);
+       intel_wait_for_vblank(dev);
+       I915_WRITE(PFIT_CONTROL, pfit_control_reg_value[pipe]);
+       
+       intel_crtc_commit(crtc);
+       list_for_each_entry(output, &mode_config->output_list, head) {
+               if (output->crtc != crtc)
+                       continue;
+
+               output->funcs->commit(output);
+               //output->funcs->dpms(output, DPMSModeOff);
+               //printk("turn off the display first\n");
+       }
+       return;
+}
+
+void intel_crtc_mode_save(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       int pipe = intel_crtc->pipe;
+       int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+       bool ok, is_sdvo = false, is_dvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_output *output;
+
+       list_for_each_entry(output, &mode_config->output_list, head) {
+               struct intel_output *intel_output = output->driver_private;
+
+               if (output->crtc != crtc)
+                       continue;
+
+               switch (intel_output->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = TRUE;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = TRUE;
+                       break;
+               case INTEL_OUTPUT_DVO:
+                       is_dvo = TRUE;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = TRUE;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = TRUE;
+                       break;
+               }
+       }
+       
+       fp_reg_value[pipe] = I915_READ(fp_reg);
+       dpll_reg_value[pipe] = I915_READ(dpll_reg);
+       dpll_md_reg_value[pipe] = I915_READ(dpll_md_reg);
+       dspcntr_reg_value[pipe] = I915_READ(dspcntr_reg);
+       pipeconf_reg_value[pipe] = I915_READ(pipeconf_reg);
+       htot_reg_value[pipe] = I915_READ(htot_reg);
+       hblank_reg_value[pipe] = I915_READ(hblank_reg);
+       hsync_reg_value[pipe] = I915_READ(hsync_reg);
+       vtot_reg_value[pipe] = I915_READ(vtot_reg);
+       vblank_reg_value[pipe] = I915_READ(vblank_reg);
+       vsync_reg_value[pipe] = I915_READ(vsync_reg);
+       dspsize_reg_value[pipe] = I915_READ(dspsize_reg);
+       dspstride_reg_value[pipe] = I915_READ(dspstride_reg);
+       dsppos_reg_value[pipe] = I915_READ(dsppos_reg);
+       pipesrc_reg_value[pipe] = I915_READ(pipesrc_reg);
+       dspbase_value[pipe] = I915_READ(dspbase);
+       if(is_lvds)
+               lvds_reg_value[pipe] = I915_READ(LVDS);
+       vgacntrl_reg_value[pipe] = I915_READ(VGACNTRL);
+       pfit_control_reg_value[pipe] = I915_READ(PFIT_CONTROL);
+}
+#endif
+
+static void intel_crtc_mode_set(struct drm_crtc *crtc,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode,
+                               int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       int pipe = intel_crtc->pipe;
+       int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int refclk;
+       intel_clock_t clock;
+       u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+       bool ok, is_sdvo = false, is_dvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_output *output;
+
+       if (!crtc->fb) {
+               DRM_ERROR("Can't set mode without attached fb\n");
+               return;
+       }
+
+       list_for_each_entry(output, &mode_config->output_list, head) {
+               struct intel_output *intel_output = output->driver_private;
+
+               if (output->crtc != crtc)
+                       continue;
+
+               switch (intel_output->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = TRUE;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = TRUE;
+                       break;
+               case INTEL_OUTPUT_DVO:
+                       is_dvo = TRUE;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = TRUE;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = TRUE;
+                       break;
+               }
+       }
+       
+       if (IS_I9XX(dev)) {
+               refclk = 96000;
+       } else {
+               refclk = 48000;
+       }
+
+       ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
+       if (!ok) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return;
+       }
+
+       fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+       
+       dpll = DPLL_VGA_MODE_DIS;
+       if (IS_I9XX(dev)) {
+               if (is_lvds) {
+                       dpll |= DPLLB_MODE_LVDS;
+                       if (IS_POULSBO(dev))
+                               dpll |= DPLL_DVO_HIGH_SPEED;
+               } else
+                       dpll |= DPLLB_MODE_DAC_SERIAL;
+               if (is_sdvo) {
+                       dpll |= DPLL_DVO_HIGH_SPEED;
+                       if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) {
+                               int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+                               dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+                       }
+               }
+               
+               /* compute bitmask from p1 value */
+               dpll |= (1 << (clock.p1 - 1)) << 16;
+               switch (clock.p2) {
+               case 5:
+                       dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+                       break;
+               case 7:
+                       dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+                       break;
+               case 10:
+                       dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+                       break;
+               case 14:
+                       dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+                       break;
+               }
+               if (IS_I965G(dev))
+                       dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+       } else {
+               if (is_lvds) {
+                       dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+               } else {
+                       if (clock.p1 == 2)
+                               dpll |= PLL_P1_DIVIDE_BY_TWO;
+                       else
+                               dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+                       if (clock.p2 == 4)
+                               dpll |= PLL_P2_DIVIDE_BY_4;
+               }
+       }
+       
+       if (is_tv) {
+               /* XXX: just matching BIOS for now */
+/*     dpll |= PLL_REF_INPUT_TVCLKINBC; */
+               dpll |= 3;
+       }
+#if 0
+       else if (is_lvds)
+               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+#endif
+       else
+               dpll |= PLL_REF_INPUT_DREFCLK;
+       
+       /* setup pipeconf */
+       pipeconf = I915_READ(pipeconf_reg);
+
+       /* Set up the display plane register */
+       dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case 16:
+               if (crtc->fb->depth == 15)
+                       dspcntr |= DISPPLANE_15_16BPP;
+               else
+                       dspcntr |= DISPPLANE_16BPP;
+               break;
+       case 32:
+               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+               break;
+       default:
+               DRM_ERROR("Unknown color depth\n");
+               return;
+       }
+       
+
+       if (pipe == 0)
+               dspcntr |= DISPPLANE_SEL_PIPE_A;
+       else
+               dspcntr |= DISPPLANE_SEL_PIPE_B;
+       
+       if (pipe == 0 && !IS_I965G(dev)) {
+               /* Enable pixel doubling when the dot clock is > 90% of the (display)
+                * core speed.
+                *
+                * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+                * pipe == 0 check?
+                */
+               if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
+                       pipeconf |= PIPEACONF_DOUBLE_WIDE;
+               else
+                       pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
+       }
+
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+       pipeconf |= PIPEACONF_ENABLE;
+       dpll |= DPLL_VCO_ENABLE;
+
+       
+       /* Disable the panel fitter if it was on our pipe */
+       if (intel_panel_fitter_pipe(dev) == pipe)
+               I915_WRITE(PFIT_CONTROL, 0);
+
+       DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+       drm_mode_debug_printmodeline(dev, mode);
+
+        /*psbPrintPll("chosen", &clock);*/
+    DRM_DEBUG("clock regs: 0x%08x, 0x%08x,dspntr is 0x%8x, pipeconf is 0x%8x\n", (int)dpll,
+             (int)fp,(int)dspcntr,(int)pipeconf);      
+#if 0
+       if (!xf86ModesEqual(mode, adjusted_mode)) {
+               xf86DrvMsg(pScrn->scrnIndex, X_INFO,
+                          "Adjusted mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+               xf86PrintModeline(pScrn->scrnIndex, mode);
+       }
+       i830PrintPll("chosen", &clock);
+#endif
+
+       if (dpll & DPLL_VCO_ENABLE) {
+               I915_WRITE(fp_reg, fp);
+               I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+               I915_READ(dpll_reg);
+               udelay(150);
+       }
+       
+       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+        * This is an exception to the general rule that mode_set doesn't turn
+        * things on.
+        */
+       if (is_lvds) {
+               u32 lvds = I915_READ(LVDS);
+               
+               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+               /* Set the B0-B3 data pairs corresponding to whether we're going to
+                * set the DPLLs for dual-channel mode or not.
+                */
+               if (clock.p2 == 7)
+                       lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+               else
+                       lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+               
+               /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+                * appropriately here, but we need to look more thoroughly into how
+                * panels behave in the two modes.
+                */
+               
+               I915_WRITE(LVDS, lvds);
+               I915_READ(LVDS);
+       }
+       
+       I915_WRITE(fp_reg, fp);
+       I915_WRITE(dpll_reg, dpll);
+       I915_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+       
+       if (IS_I965G(dev)) {
+               int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+               I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+                          ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+       } else {
+               /* write it again -- the BIOS does, after all */
+               I915_WRITE(dpll_reg, dpll);
+       }
+       I915_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+       
+       I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+                  ((adjusted_mode->crtc_htotal - 1) << 16));
+       I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+                  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+                  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+                  ((adjusted_mode->crtc_vtotal - 1) << 16));
+       I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+                  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+                  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       I915_WRITE(dspstride_reg, crtc->fb->pitch);
+       /* pipesrc and dspsize control the size that is scaled from, which should
+        * always be the user's requested size.
+        */
+       I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       I915_WRITE(dsppos_reg, 0);
+       I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+       I915_WRITE(pipeconf_reg, pipeconf);
+       I915_READ(pipeconf_reg);
+       
+       intel_wait_for_vblank(dev);
+       
+       I915_WRITE(dspcntr_reg, dspcntr);
+       
+       /* Flush the plane changes */
+       intel_pipe_set_base(crtc, x, y);
+       
+#if 0
+       intel_set_vblank(dev);
+#endif
+
+       /* Disable the VGA plane that we never use */
+       I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+       intel_wait_for_vblank(dev);    
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
+       int i;
+
+       /* The clocks have to be on to load the palette. */
+       if (!crtc->enabled)
+               return;
+
+       for (i = 0; i < 256; i++) {
+               I915_WRITE(palreg + 4 * i,
+                          (intel_crtc->lut_r[i] << 16) |
+                          (intel_crtc->lut_g[i] << 8) |
+                          intel_crtc->lut_b[i]);
+       }
+}
+
+/** Sets the color ramps on behalf of RandR */
+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                                u16 blue, int regno)
+{
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       
+       intel_crtc->lut_r[regno] = red >> 8;
+       intel_crtc->lut_g[regno] = green >> 8;
+       intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       int pipe = intel_crtc->pipe;
+       u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
+       u32 fp;
+       intel_clock_t clock;
+
+       if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+               fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
+       else
+               fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+
+       clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+       clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+       if (IS_I9XX(dev)) {
+               clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+                              DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+               switch (dpll & DPLL_MODE_MASK) {
+               case DPLLB_MODE_DAC_SERIAL:
+                       clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+                               5 : 10;
+                       break;
+               case DPLLB_MODE_LVDS:
+                       clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+                               7 : 14;
+                       break;
+               default:
+                       DRM_DEBUG("Unknown DPLL mode %08x in programmed "
+                                 "mode\n", (int)(dpll & DPLL_MODE_MASK));
+                       return 0;
+               }
+
+               /* XXX: Handle the 100Mhz refclk */
+               i9xx_clock(96000, &clock);
+       } else {
+               bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+
+               if (is_lvds) {
+                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+                                      DPLL_FPA01_P1_POST_DIV_SHIFT);
+                       clock.p2 = 14;
+
+                       if ((dpll & PLL_REF_INPUT_MASK) ==
+                           PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                               /* XXX: might not be 66MHz */
+                               i8xx_clock(66000, &clock);
+                       } else
+                               i8xx_clock(48000, &clock);              
+               } else {
+                       if (dpll & PLL_P1_DIVIDE_BY_TWO)
+                               clock.p1 = 2;
+                       else {
+                               clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+                                           DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+                       }
+                       if (dpll & PLL_P2_DIVIDE_BY_4)
+                               clock.p2 = 4;
+                       else
+                               clock.p2 = 2;
+
+                       i8xx_clock(48000, &clock);
+               }
+       }
+
+       /* XXX: It would be nice to validate the clocks, but we can't reuse
+        * i830PllIsValid() because it relies on the xf86_config output
+        * configuration being accurate, which it isn't necessarily.
+        */
+
+       return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc)
+{
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       int pipe = intel_crtc->pipe;
+       struct drm_display_mode *mode;
+       int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+       int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+       int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+       int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+
+       mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+       if (!mode)
+               return NULL;
+
+       mode->clock = intel_crtc_clock_get(dev, crtc);
+       mode->hdisplay = (htot & 0xffff) + 1;
+       mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+       mode->hsync_start = (hsync & 0xffff) + 1;
+       mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+       mode->vdisplay = (vtot & 0xffff) + 1;
+       mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+       mode->vsync_start = (vsync & 0xffff) + 1;
+       mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+       drm_mode_set_name(mode);
+       drm_mode_set_crtcinfo(mode, 0);
+
+       return mode;
+}
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+       .dpms = intel_crtc_dpms,
+       .lock = intel_crtc_lock,
+       .unlock = intel_crtc_unlock,
+       .mode_fixup = intel_crtc_mode_fixup,
+       .mode_set = intel_crtc_mode_set,
+       .gamma_set = intel_crtc_gamma_set,
+       .prepare = intel_crtc_prepare,
+       .commit = intel_crtc_commit,
+};
+
+
+void intel_crtc_init(struct drm_device *dev, int pipe)
+{
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+       int i;
+
+       crtc = drm_crtc_create(dev, &intel_crtc_funcs);
+       if (crtc == NULL)
+               return;
+
+       intel_crtc = kzalloc(sizeof(struct intel_crtc), GFP_KERNEL);
+       if (intel_crtc == NULL) {
+               kfree(crtc);
+               return;
+       }
+
+       intel_crtc->pipe = pipe;
+       for (i = 0; i < 256; i++) {
+               intel_crtc->lut_r[i] = i;
+               intel_crtc->lut_g[i] = i;
+               intel_crtc->lut_b[i] = i;
+       }
+
+       crtc->driver_private = intel_crtc;
+}
+
+struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+       struct drm_crtc *crtc = NULL;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct intel_crtc *intel_crtc = crtc->driver_private;
+               if (intel_crtc->pipe == pipe)
+                       break;
+       }
+       return crtc;
+}
+
+int intel_output_clones(struct drm_device *dev, int type_mask)
+{
+       int index_mask = 0;
+       struct drm_output *output;
+       int entry = 0;
+
+        list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               struct intel_output *intel_output = output->driver_private;
+               if (type_mask & (1 << intel_output->type))
+                       index_mask |= (1 << entry);
+               entry++;
+       }
+       return index_mask;
+}
+
+
+static void intel_setup_outputs(struct drm_device *dev)
+{
+       struct drm_output *output;
+
+       if (!IS_POULSBO(dev))
+               intel_crt_init(dev);
+
+       /* Set up integrated LVDS */
+       if (IS_MOBILE(dev) && !IS_I830(dev))
+               intel_lvds_init(dev);
+
+       if (IS_I9XX(dev)) {
+               intel_sdvo_init(dev, SDVOB);
+               intel_sdvo_init(dev, SDVOC);
+       }
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               struct intel_output *intel_output = output->driver_private;
+               int crtc_mask = 0, clone_mask = 0;
+               
+               /* valid crtcs */
+               switch(intel_output->type) {
+               case INTEL_OUTPUT_DVO:
+               case INTEL_OUTPUT_SDVO:
+                       crtc_mask = ((1 << 0)|
+                                    (1 << 1));
+                       clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
+                                     (1 << INTEL_OUTPUT_DVO) |
+                                     (1 << INTEL_OUTPUT_SDVO));
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       crtc_mask = ((1 << 0)|
+                                    (1 << 1));
+                       clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
+                                     (1 << INTEL_OUTPUT_DVO) |
+                                     (1 << INTEL_OUTPUT_SDVO));
+                       break;
+               case INTEL_OUTPUT_LVDS:
+                       crtc_mask = (1 << 1);
+                       clone_mask = (1 << INTEL_OUTPUT_LVDS);
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       crtc_mask = ((1 << 0) |
+                                    (1 << 1));
+                       clone_mask = (1 << INTEL_OUTPUT_TVOUT);
+                       break;
+               }
+               output->possible_crtcs = crtc_mask;
+               output->possible_clones = intel_output_clones(dev, clone_mask);
+       }
+}
+
+void intel_modeset_init(struct drm_device *dev)
+{
+       int num_pipe;
+       int i;
+
+       drm_mode_config_init(dev);
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+
+       dev->mode_config.max_width = 4096;
+       dev->mode_config.max_height = 4096;
+
+       /* set memory base */
+       if (IS_I9XX(dev))
+               dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+       else
+               dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+
+       if (IS_MOBILE(dev) || IS_I9XX(dev))
+               num_pipe = 2;
+       else
+               num_pipe = 1;
+       DRM_DEBUG("%d display pipe%s available.\n",
+                 num_pipe, num_pipe > 1 ? "s" : "");
+
+       for (i = 0; i < num_pipe; i++) {
+               intel_crtc_init(dev, i);
+       }
+
+       intel_setup_outputs(dev);
+
+       //drm_initial_config(dev, false);
+}
+
+void intel_modeset_cleanup(struct drm_device *dev)
+{
+       drm_mode_config_cleanup(dev);
+}
diff --git a/psb-kernel-source-4.41.1/intel_drv.h b/psb-kernel-source-4.41.1/intel_drv.h
new file mode 100644 (file)
index 0000000..d3b7cb7
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ */
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drm_crtc.h"
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only 
+   external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+struct intel_i2c_chan {
+       struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
+       u32 reg; /* GPIO reg */
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo;
+        u8 slave_addr;
+};
+
+struct intel_output {
+       int type;
+       struct intel_i2c_chan *i2c_bus; /* for control functions */
+       struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
+       bool load_detect_tmp;
+       void *dev_priv;
+};
+
+struct intel_crtc {
+       int pipe;
+       u8 lut_r[256], lut_g[256], lut_b[256];
+};
+
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
+                                       const char *name);
+void intel_i2c_destroy(struct intel_i2c_chan *chan);
+int intel_ddc_get_modes(struct drm_output *output);
+extern bool intel_ddc_probe(struct drm_output *output);
+
+extern void intel_crt_init(struct drm_device *dev);
+extern void intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void intel_lvds_init(struct drm_device *dev);
+
+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void intel_output_prepare (struct drm_output *output);
+extern void intel_output_commit (struct drm_output *output);
+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+                                                   struct drm_crtc *crtc);
+extern void intel_wait_for_vblank(struct drm_device *dev);
+extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
+
+extern int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
+extern int intelfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
+
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+
+#define WA_NO_FB_GARBAGE_DISPLAY
+#ifdef WA_NO_FB_GARBAGE_DISPLAY
+extern void intel_crtc_mode_restore(struct drm_crtc *crtc);
+extern void intel_crtc_mode_save(struct drm_crtc *crtc);
+#endif
+
+#endif /* __INTEL_DRV_H__ */
diff --git a/psb-kernel-source-4.41.1/intel_fb.c b/psb-kernel-source-4.41.1/intel_fb.c
new file mode 100644 (file)
index 0000000..cb0296b
--- /dev/null
@@ -0,0 +1,653 @@
+/*
+ * Copyright Â© 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+    /*
+     *  Modularization
+     */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+struct intelfb_par {
+       struct drm_device *dev;
+       struct drm_crtc *crtc;
+        struct drm_display_mode *fb_mode;
+};
+
+static int
+var_to_refresh(const struct fb_var_screeninfo *var)
+{
+       int xtot = var->xres + var->left_margin + var->right_margin +
+                  var->hsync_len;
+       int ytot = var->yres + var->upper_margin + var->lower_margin +
+                  var->vsync_len;
+
+       return (1000000000 / var->pixclock * 1000 + 500) / xtot / ytot;
+}
+
+static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+                          unsigned blue, unsigned transp,
+                          struct fb_info *info)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_crtc *crtc = par->crtc;
+
+       if (regno > 255)
+               return 1;
+
+       if (fb->depth == 8) {
+               if (crtc->funcs->gamma_set)
+                       crtc->funcs->gamma_set(crtc, red, green, blue, regno);
+               return 0;
+       }
+
+       if (regno < 16) {
+               switch (fb->depth) {
+               case 15:
+                       fb->pseudo_palette[regno] = ((red & 0xf800) >>  1) |
+                               ((green & 0xf800) >>  6) |
+                               ((blue & 0xf800) >> 11);
+                       break;
+               case 16:
+                       fb->pseudo_palette[regno] = (red & 0xf800) |
+                               ((green & 0xfc00) >>  5) |
+                               ((blue  & 0xf800) >> 11);
+                       break;
+               case 24:
+               case 32:
+                       fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
+                               (green & 0xff00) |
+                               ((blue  & 0xff00) >> 8);
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int intelfb_check_var(struct fb_var_screeninfo *var,
+                            struct fb_info *info)
+{
+        struct intelfb_par *par = info->par;
+        struct drm_device *dev = par->dev;
+       struct drm_framebuffer *fb = par->crtc->fb;
+        struct drm_output *output;
+        int depth, found = 0;
+
+        if (!var->pixclock)
+                return -EINVAL;
+
+        /* Need to resize the fb object !!! */
+        if (var->xres > fb->width || var->yres > fb->height) {
+                DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height);
+                DRM_ERROR("Need resizing code.\n");
+                return -EINVAL;
+        }
+
+        switch (var->bits_per_pixel) {
+        case 16:
+                depth = (var->green.length == 6) ? 16 : 15;
+                break;
+        case 32:
+                depth = (var->transp.length > 0) ? 32 : 24;
+                break;
+        default:
+                depth = var->bits_per_pixel;
+                break;
+        }
+                
+        switch (depth) {
+        case 8:
+                var->red.offset = 0;
+                var->green.offset = 0;
+                var->blue.offset = 0;
+                var->red.length = 8;
+                var->green.length = 8;
+                var->blue.length = 8;
+                var->transp.length = 0;
+                var->transp.offset = 0;
+                break;
+        case 15:
+                var->red.offset = 10;
+                var->green.offset = 5;
+                var->blue.offset = 0;
+                var->red.length = 5;
+                var->green.length = 5;
+                var->blue.length = 5;
+                var->transp.length = 1;
+                var->transp.offset = 15;
+                break;
+        case 16:
+                var->red.offset = 11;
+                var->green.offset = 6;
+                var->blue.offset = 0;
+                var->red.length = 5;
+                var->green.length = 6;
+                var->blue.length = 5;
+                var->transp.length = 0;
+                var->transp.offset = 0;
+                break;
+        case 24:
+                var->red.offset = 16;
+                var->green.offset = 8;
+                var->blue.offset = 0;
+                var->red.length = 8;
+                var->green.length = 8;
+                var->blue.length = 8;
+                var->transp.length = 0;
+                var->transp.offset = 0;
+                break;
+        case 32:
+                var->red.offset = 16;
+                var->green.offset = 8;
+                var->blue.offset = 0;
+                var->red.length = 8;
+                var->green.length = 8;
+                var->blue.length = 8;
+                var->transp.length = 8;
+                var->transp.offset = 24;
+                break;
+        default:
+                return -EINVAL; 
+        }
+
+#if 0
+        /* Here we walk the output mode list and look for modes. If we haven't
+         * got it, then bail. Not very nice, so this is disabled.
+         * In the set_par code, we create our mode based on the incoming
+         * parameters. Nicer, but may not be desired by some.
+         */
+        list_for_each_entry(output, &dev->mode_config.output_list, head) {
+                if (output->crtc == par->crtc)
+                        break;
+        }
+    
+        list_for_each_entry(drm_mode, &output->modes, head) {
+                if (drm_mode->hdisplay == var->xres &&
+                    drm_mode->vdisplay == var->yres &&
+                    (((PICOS2KHZ(var->pixclock))/1000) >= ((drm_mode->clock/1000)-1)) &&
+                    (((PICOS2KHZ(var->pixclock))/1000) <= ((drm_mode->clock/1000)+1))) {
+                       found = 1;
+                       break;
+               }
+       }
+        if (!found)
+                return -EINVAL;
+#endif
+
+       return 0;
+}
+
+/* this will let fbcon do the mode init */
+/* FIXME: take mode config lock? */
+static int intelfb_set_par(struct fb_info *info)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_device *dev = par->dev;
+        struct drm_display_mode *drm_mode, *search_mode;
+        struct drm_output *output;
+        struct fb_var_screeninfo *var = &info->var;
+       int found = 0;
+
+        switch (var->bits_per_pixel) {
+        case 16:
+                fb->depth = (var->green.length == 6) ? 16 : 15;
+                break;
+        case 32:
+                fb->depth = (var->transp.length > 0) ? 32 : 24;
+                break;
+        default:
+                fb->depth = var->bits_per_pixel;
+                break;
+        }
+
+        fb->bits_per_pixel = var->bits_per_pixel;
+
+        info->fix.line_length = fb->pitch;
+        info->fix.smem_len = info->fix.line_length * fb->height;
+        info->fix.visual = (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+
+        info->screen_size = info->fix.smem_len; /* ??? */
+
+       /* create a drm mode */
+        drm_mode = drm_mode_create(dev);
+        drm_mode->hdisplay = var->xres;
+        drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
+        drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
+        drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
+        drm_mode->vdisplay = var->yres;
+        drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
+        drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
+        drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
+        drm_mode->clock = PICOS2KHZ(var->pixclock);
+        drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
+        drm_mode_set_name(drm_mode);
+       drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
+
+        list_for_each_entry(output, &dev->mode_config.output_list, head) {
+                if (output->crtc == par->crtc)
+                        break;
+        }
+
+       drm_mode_debug_printmodeline(dev, drm_mode);    
+        list_for_each_entry(search_mode, &output->modes, head) {
+               DRM_ERROR("mode %s : %s\n", drm_mode->name, search_mode->name);
+               drm_mode_debug_printmodeline(dev, search_mode);
+               if (drm_mode_equal(drm_mode, search_mode)) {
+                       drm_mode_destroy(dev, drm_mode);
+                       drm_mode = search_mode;
+                       found = 1;
+                       break;
+               }
+       }
+       
+       if (!found) {
+               drm_mode_addmode(dev, drm_mode);
+               if (par->fb_mode) {
+                       drm_mode_detachmode_crtc(dev, par->fb_mode);
+                       drm_mode_rmmode(dev, par->fb_mode);
+               }
+       
+               par->fb_mode = drm_mode;
+               drm_mode_debug_printmodeline(dev, drm_mode);
+               /* attach mode */
+               drm_mode_attachmode_crtc(dev, par->crtc, par->fb_mode);
+       }
+
+        if (!drm_crtc_set_mode(par->crtc, drm_mode, 0, 0))
+                return -EINVAL;
+
+       return 0;
+}
+
+#if 0
+static void intelfb_copyarea(struct fb_info *info,
+                            const struct fb_copyarea *region)
+{
+        struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 src_x1, src_y1, dst_x1, dst_y1, dst_x2, dst_y2, offset;
+       u32 cmd, rop_depth_pitch, src_pitch;
+       RING_LOCALS;
+
+       cmd = XY_SRC_COPY_BLT_CMD;
+       src_x1 = region->sx;
+       src_y1 = region->sy;
+       dst_x1 = region->dx;
+       dst_y1 = region->dy;
+       dst_x2 = region->dx + region->width;
+       dst_y2 = region->dy + region->height;
+       offset = par->fb->offset;
+       rop_depth_pitch = BLT_ROP_GXCOPY | par->fb->pitch;
+       src_pitch = par->fb->pitch;
+
+       switch (par->fb->bits_per_pixel) {
+       case 16:
+               rop_depth_pitch |= BLT_DEPTH_16_565;
+               break;
+       case 32:
+               rop_depth_pitch |= BLT_DEPTH_32;
+               cmd |= XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB;
+               break;
+       }
+
+       BEGIN_LP_RING(8);
+       OUT_RING(cmd);
+       OUT_RING(rop_depth_pitch);
+       OUT_RING((dst_y1 << 16) | (dst_x1 & 0xffff));
+       OUT_RING((dst_y2 << 16) | (dst_x2 & 0xffff));
+       OUT_RING(offset);
+       OUT_RING((src_y1 << 16) | (src_x1 & 0xffff));
+       OUT_RING(src_pitch);
+       OUT_RING(offset);
+       ADVANCE_LP_RING();
+}
+
+#define ROUND_UP_TO(x, y)      (((x) + (y) - 1) / (y) * (y))
+#define ROUND_DOWN_TO(x, y)    ((x) / (y) * (y))
+
+void intelfb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+        struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 cmd, rop_pitch_depth, tmp;
+       int nbytes, ndwords, pad;
+       u32 dst_x1, dst_y1, dst_x2, dst_y2, offset, bg, fg;
+       int dat, ix, iy, iw;
+       int i, j;
+       RING_LOCALS;
+
+       /* size in bytes of a padded scanline */
+       nbytes = ROUND_UP_TO(image->width, 16) / 8;
+
+       /* Total bytes of padded scanline data to write out. */
+       nbytes *= image->height;
+
+       /*
+        * Check if the glyph data exceeds the immediate mode limit.
+        * It would take a large font (1K pixels) to hit this limit.
+        */
+       if (nbytes > 128 || image->depth != 1)
+               return cfb_imageblit(info, image);
+
+       /* Src data is packaged a dword (32-bit) at a time. */
+       ndwords = ROUND_UP_TO(nbytes, 4) / 4;
+
+       /*
+        * Ring has to be padded to a quad word. But because the command starts
+          with 7 bytes, pad only if there is an even number of ndwords
+        */
+       pad = !(ndwords % 2);
+
+       DRM_DEBUG("imageblit %dx%dx%d to (%d,%d)\n", image->width,
+                 image->height, image->depth, image->dx, image->dy);
+       DRM_DEBUG("nbytes: %d, ndwords: %d, pad: %d\n", nbytes, ndwords, pad);
+
+       tmp = (XY_MONO_SRC_COPY_IMM_BLT & 0xff) + ndwords;
+       cmd = (XY_MONO_SRC_COPY_IMM_BLT & ~0xff) | tmp;
+       offset = par->fb->offset;
+       dst_x1 = image->dx;
+       dst_y1 = image->dy;
+       dst_x2 = image->dx + image->width;
+       dst_y2 = image->dy + image->height;
+       rop_pitch_depth = BLT_ROP_GXCOPY | par->fb->pitch;
+
+       switch (par->fb->bits_per_pixel) {
+       case 8:
+               rop_pitch_depth |= BLT_DEPTH_8;
+               fg = image->fg_color;
+               bg = image->bg_color;
+               break;
+       case 16:
+               rop_pitch_depth |= BLT_DEPTH_16_565;
+               fg = par->fb->pseudo_palette[image->fg_color];
+               bg = par->fb->pseudo_palette[image->bg_color];
+               break;
+       case 32:
+               rop_pitch_depth |= BLT_DEPTH_32;
+               cmd |= XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB;
+               fg = par->fb->pseudo_palette[image->fg_color];
+               bg = par->fb->pseudo_palette[image->bg_color];
+               break;
+       default:
+               DRM_ERROR("unknown depth %d\n", par->fb->bits_per_pixel);
+               break;
+       }
+       
+       BEGIN_LP_RING(8 + ndwords);
+       OUT_RING(cmd);
+       OUT_RING(rop_pitch_depth);
+       OUT_RING((dst_y1 << 16) | (dst_x1 & 0xffff));
+       OUT_RING((dst_y2 << 16) | (dst_x2 & 0xffff));
+       OUT_RING(offset);
+       OUT_RING(bg);
+       OUT_RING(fg);
+       ix = iy = 0;
+       iw = ROUND_UP_TO(image->width, 8) / 8;
+       while (ndwords--) {
+               dat = 0;
+               for (j = 0; j < 2; ++j) {
+                       for (i = 0; i < 2; ++i) {
+                               if (ix != iw || i == 0)
+                                       dat |= image->data[iy*iw + ix++] << (i+j*2)*8;
+                       }
+                       if (ix == iw && iy != (image->height - 1)) {
+                               ix = 0;
+                               ++iy;
+                       }
+               }
+               OUT_RING(dat);
+       }
+       if (pad)
+               OUT_RING(MI_NOOP);
+       ADVANCE_LP_RING();
+}
+#endif
+
+static struct fb_ops intelfb_ops = {
+       .owner = THIS_MODULE,
+       //      .fb_open = intelfb_open,
+       //      .fb_read = intelfb_read,
+       //      .fb_write = intelfb_write,
+       //      .fb_release = intelfb_release,
+       //      .fb_ioctl = intelfb_ioctl,
+       .fb_check_var = intelfb_check_var,
+       .fb_set_par = intelfb_set_par,
+       .fb_setcolreg = intelfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea, //intelfb_copyarea,
+       .fb_imageblit = cfb_imageblit, //intelfb_imageblit,
+};
+
+int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct fb_info *info;
+       struct intelfb_par *par;
+       struct device *device = &dev->pdev->dev; 
+       struct drm_framebuffer *fb;
+       struct drm_display_mode *mode = crtc->desired_mode;
+       struct drm_buffer_object *fbo = NULL;
+       int ret;
+
+       info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+       if (!info){
+               return -EINVAL;
+       }
+
+       fb = drm_framebuffer_create(dev);
+       if (!fb) {
+               framebuffer_release(info);
+               DRM_ERROR("failed to allocate fb.\n");
+               return -EINVAL;
+       }
+       crtc->fb = fb;
+
+       fb->width = crtc->desired_mode->hdisplay;
+       fb->height = crtc->desired_mode->vdisplay;
+
+       fb->bits_per_pixel = 32;
+       fb->pitch = fb->width * ((fb->bits_per_pixel + 1) / 8);
+       fb->depth = 24;
+       ret = drm_buffer_object_create(dev, fb->width * fb->height * 4, 
+                                      drm_bo_type_kernel,
+                                      DRM_BO_FLAG_READ |
+                                      DRM_BO_FLAG_WRITE |
+                                      DRM_BO_FLAG_MEM_TT |
+                                      DRM_BO_FLAG_MEM_VRAM |
+                                      DRM_BO_FLAG_NO_EVICT,
+                                      DRM_BO_HINT_DONT_FENCE, 0, 0,
+                                      &fbo);
+       if (ret || !fbo) {
+               printk(KERN_ERR "failed to allocate framebuffer\n");
+               drm_framebuffer_destroy(fb);
+               framebuffer_release(info);
+               return -EINVAL;
+       }
+
+       fb->offset = fbo->offset;
+       fb->bo = fbo;
+       DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
+                      fb->height, fbo->offset, fbo);
+
+
+       fb->fbdev = info;
+               
+       par = info->par;
+
+       par->dev = dev;
+       par->crtc = crtc;
+
+       info->fbops = &intelfb_ops;
+
+       strcpy(info->fix.id, "intelfb");
+       info->fix.type = FB_TYPE_PACKED_PIXELS;
+       info->fix.visual = FB_VISUAL_TRUECOLOR;
+       info->fix.type_aux = 0;
+       info->fix.xpanstep = 8;
+       info->fix.ypanstep = 1;
+       info->fix.ywrapstep = 0;
+       info->fix.accel = FB_ACCEL_I830;
+       info->fix.type_aux = 0;
+       info->fix.mmio_start = 0;
+       info->fix.mmio_len = 0;
+       info->fix.line_length = fb->pitch;
+       info->fix.smem_start = fb->offset + dev->mode_config.fb_base;
+       info->fix.smem_len = info->fix.line_length * fb->height;
+
+       info->flags = FBINFO_DEFAULT;
+
+       ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
+       if (ret)
+               DRM_ERROR("error mapping fb: %d\n", ret);
+
+       info->screen_base = fb->kmap.virtual;
+       info->screen_size = info->fix.smem_len; /* FIXME */
+       info->pseudo_palette = fb->pseudo_palette;
+       info->var.xres_virtual = fb->width;
+       info->var.yres_virtual = fb->height;
+       info->var.bits_per_pixel = fb->bits_per_pixel;
+       info->var.xoffset = 0;
+       info->var.yoffset = 0;
+       info->var.activate = FB_ACTIVATE_NOW;
+       info->var.height = -1;
+       info->var.width = -1;
+       info->var.vmode = FB_VMODE_NONINTERLACED;
+
+        info->var.xres = mode->hdisplay;
+        info->var.right_margin = mode->hsync_start - mode->hdisplay;
+        info->var.hsync_len = mode->hsync_end - mode->hsync_start;
+        info->var.left_margin = mode->htotal - mode->hsync_end;
+        info->var.yres = mode->vdisplay;
+        info->var.lower_margin = mode->vsync_start - mode->vdisplay;
+        info->var.vsync_len = mode->vsync_end - mode->vsync_start;
+       info->var.upper_margin = mode->vtotal - mode->vsync_end;
+        info->var.pixclock = 10000000 / mode->htotal * 1000 /
+               mode->vtotal * 100;
+       /* avoid overflow */
+       info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
+
+       info->pixmap.size = 64*1024;
+       info->pixmap.buf_align = 8;
+       info->pixmap.access_align = 32;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+       info->pixmap.scan_align = 1;
+
+       DRM_DEBUG("fb depth is %d\n", fb->depth);
+       DRM_DEBUG("   pitch is %d\n", fb->pitch);
+       switch(fb->depth) {
+       case 8:
+                info->var.red.offset = 0;
+                info->var.green.offset = 0;
+                info->var.blue.offset = 0;
+                info->var.red.length = 8; /* 8bit DAC */
+                info->var.green.length = 8;
+                info->var.blue.length = 8;
+                info->var.transp.offset = 0;
+                info->var.transp.length = 0;
+                break;
+       case 15:
+                info->var.red.offset = 10;
+                info->var.green.offset = 5;
+                info->var.blue.offset = 0;
+                info->var.red.length = info->var.green.length =
+                        info->var.blue.length = 5;
+                info->var.transp.offset = 15;
+                info->var.transp.length = 1;
+                break;
+       case 16:
+                info->var.red.offset = 11;
+                info->var.green.offset = 5;
+                info->var.blue.offset = 0;
+                info->var.red.length = 5;
+                info->var.green.length = 6;
+                info->var.blue.length = 5;
+                info->var.transp.offset = 0;
+               break;
+       case 24:
+                info->var.red.offset = 16;
+                info->var.green.offset = 8;
+                info->var.blue.offset = 0;
+                info->var.red.length = info->var.green.length =
+                        info->var.blue.length = 8;
+                info->var.transp.offset = 0;
+                info->var.transp.length = 0;
+                break;
+       case 32:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = info->var.green.length =
+                       info->var.blue.length = 8;
+               info->var.transp.offset = 24;
+               info->var.transp.length = 8;
+               break;
+       default:
+               break;
+       }
+
+       if (register_framebuffer(info) < 0)
+               return -EINVAL;
+
+       printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+              info->fix.id);
+       return 0;
+}
+EXPORT_SYMBOL(intelfb_probe);
+
+int intelfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct drm_framebuffer *fb = crtc->fb;
+       struct fb_info *info = fb->fbdev;
+       
+       if (info) {
+               unregister_framebuffer(info);
+                framebuffer_release(info);
+                drm_bo_kunmap(&fb->kmap);
+                drm_bo_usage_deref_unlocked(&fb->bo);
+                drm_framebuffer_destroy(fb);
+        }
+       return 0;
+}
+EXPORT_SYMBOL(intelfb_remove);
+MODULE_LICENSE("GPL");
diff --git a/psb-kernel-source-4.41.1/intel_i2c.c b/psb-kernel-source-4.41.1/intel_i2c.c
new file mode 100644 (file)
index 0000000..efcbf65
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+       u32 val;
+
+       val = I915_READ(chan->reg);
+       return ((val & GPIO_CLOCK_VAL_IN) != 0);
+}
+
+static int get_data(void *data)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+       u32 val;
+
+       val = I915_READ(chan->reg);
+       return ((val & GPIO_DATA_VAL_IN) != 0);
+}
+
+static void set_clock(void *data, int state_high)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+       u32 reserved = 0, clock_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       if (!IS_I830(dev) && !IS_845G(dev))
+               reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                                  GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+       else
+               clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+                       GPIO_CLOCK_VAL_MASK;
+       I915_WRITE(chan->reg, reserved | clock_bits);
+       udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+       u32 reserved = 0, data_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       if (!IS_I830(dev) && !IS_845G(dev))
+               reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                                  GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+       else
+               data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+                       GPIO_DATA_VAL_MASK;
+
+       I915_WRITE(chan->reg, reserved | data_bits);
+       udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+/**
+ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ *   %GPIOA
+ *   %GPIOB
+ *   %GPIOC
+ *   %GPIOD
+ *   %GPIOE
+ *   %GPIOF
+ *   %GPIOG
+ *   %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
+                                       const char *name)
+{
+       struct intel_i2c_chan *chan;
+
+       chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
+       if (!chan)
+               goto out_free;
+
+       chan->drm_dev = dev;
+       chan->reg = reg;
+       snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+       chan->adapter.owner = THIS_MODULE;
+#ifndef I2C_HW_B_INTELFB
+#define I2C_HW_B_INTELFB I2C_HW_B_I810
+#endif
+       chan->adapter.id = I2C_HW_B_INTELFB;
+       chan->adapter.algo_data = &chan->algo;
+       chan->adapter.dev.parent = &dev->pdev->dev;
+       chan->algo.setsda = set_data;
+       chan->algo.setscl = set_clock;
+       chan->algo.getsda = get_data;
+       chan->algo.getscl = get_clock;
+       chan->algo.udelay = 20;
+       chan->algo.timeout = usecs_to_jiffies(2200);
+       chan->algo.data = chan;
+
+       i2c_set_adapdata(&chan->adapter, chan);
+
+       if(i2c_bit_add_bus(&chan->adapter))
+               goto out_free;
+
+       /* JJJ:  raise SCL and SDA? */
+       set_data(chan, 1);
+       set_clock(chan, 1);
+       udelay(20);
+
+       return chan;
+
+out_free:
+       kfree(chan);
+       return NULL;
+}
+
+/**
+ * intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void intel_i2c_destroy(struct intel_i2c_chan *chan)
+{
+       if (!chan)
+               return;
+
+       i2c_del_adapter(&chan->adapter);
+       kfree(chan);
+}
+
+       
+       
diff --git a/psb-kernel-source-4.41.1/intel_lvds.c b/psb-kernel-source-4.41.1/intel_lvds.c
new file mode 100644 (file)
index 0000000..208f3ad
--- /dev/null
@@ -0,0 +1,941 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_lvds.h"
+
+#include <acpi/acpi_drivers.h>
+
+int drm_intel_ignore_acpi = 0;
+MODULE_PARM_DESC(ignore_acpi, "Ignore ACPI");
+module_param_named(ignore_acpi, drm_intel_ignore_acpi, int, 0600);
+
+uint8_t blc_type;
+uint8_t blc_pol;
+uint8_t blc_freq;
+uint8_t blc_minbrightness;
+uint8_t blc_i2caddr;
+uint8_t blc_brightnesscmd;
+int lvds_backlight;    /* restore backlight to this value */
+
+struct intel_i2c_chan *lvds_i2c_bus; 
+u32 CoreClock;
+u32 PWMControlRegFreq;
+
+unsigned char * dev_OpRegion = NULL;
+unsigned int dev_OpRegionSize;
+
+#define PCI_PORT5_REG80_FFUSE                          0xD0058000
+#define PCI_PORT5_REG80_MAXRES_INT_EN          0x0040
+#define MAX_HDISPLAY 800
+#define MAX_VDISPLAY 480
+bool sku_bMaxResEnableInt = false;
+
+/** Set BLC through I2C*/
+static int
+LVDSI2CSetBacklight(struct drm_device *dev, unsigned char ch)
+{
+       u8 out_buf[2];
+       struct i2c_msg msgs[] = {
+               { 
+                       .addr = lvds_i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = out_buf,
+               }
+       };
+
+       DRM_INFO("LVDSI2CSetBacklight: the slave_addr is 0x%x, the backlight value is %d\n", lvds_i2c_bus->slave_addr, ch);
+
+       out_buf[0] = blc_brightnesscmd;
+       out_buf[1] = ch;
+
+       if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+       {
+               DRM_INFO("LVDSI2CSetBacklight: i2c_transfer done\n");
+               return true;
+       }
+
+       DRM_ERROR("msg: i2c_transfer error\n");
+       return false;
+}
+
+/**
+ * Calculate PWM control register value.
+ */
+static int 
+LVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
+{
+       unsigned long value = 0;
+
+       DRM_INFO("Enter LVDSCalculatePWMCtrlRegFreq.\n");
+       if (blc_freq == 0) {
+               DRM_ERROR("LVDSCalculatePWMCtrlRegFreq:  Frequency Requested is 0.\n");
+               return FALSE;
+       }
+       value = (CoreClock * MHz);
+       value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
+       value = (value * BLC_PWM_PRECISION_FACTOR);
+       value = (value / blc_freq);
+       value = (value / BLC_PWM_PRECISION_FACTOR);
+
+       if (value > (unsigned long)BLC_MAX_PWM_REG_FREQ ||
+                       value < (unsigned long)BLC_MIN_PWM_REG_FREQ) {
+               return FALSE;
+       } else {
+               PWMControlRegFreq = ((u32)value & ~BLC_PWM_LEGACY_MODE_ENABLE);
+               return TRUE;
+       }
+}
+
+/**
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32
+LVDSGetPWMMaxBacklight(struct drm_device *dev)
+{
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       u32 max_pwm_blc = 0;
+
+       max_pwm_blc = ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> \
+                       BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+       if (!(max_pwm_blc & BLC_MAX_PWM_REG_FREQ)) {
+               if (LVDSCalculatePWMCtrlRegFreq(dev)) {
+                       max_pwm_blc = PWMControlRegFreq;
+               }
+       }
+
+       DRM_INFO("LVDSGetPWMMaxBacklight: the max_pwm_blc is %d.\n", max_pwm_blc);
+       return max_pwm_blc;
+}
+
+
+/**
+ * Sets the backlight level.
+ *
+ * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
+ */
+static void intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       //u32 blc_pwm_ctl;
+
+       /*           
+       blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+       I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+               (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+        */
+       u32 newbacklight = 0;
+
+       DRM_INFO("intel_lvds_set_backlight: the level is %d\n", level);
+
+       if(blc_type == BLC_I2C_TYPE){
+               newbacklight = BRIGHTNESS_MASK & ((unsigned long)level * \
+                               BRIGHTNESS_MASK /BRIGHTNESS_MAX_LEVEL);
+
+               if (blc_pol == BLC_POLARITY_INVERSE) {
+                       newbacklight = BRIGHTNESS_MASK - newbacklight;
+               }
+
+               LVDSI2CSetBacklight(dev, newbacklight);
+
+       } else if (blc_type == BLC_PWM_TYPE) {
+               u32 max_pwm_blc = LVDSGetPWMMaxBacklight(dev);
+
+               u32 blc_pwm_duty_cycle;
+
+               /* Provent LVDS going to total black */
+               if ( level < 20) {
+                       level = 20;
+               }
+               blc_pwm_duty_cycle = level * max_pwm_blc/BRIGHTNESS_MAX_LEVEL;
+
+               if (blc_pol == BLC_POLARITY_INVERSE) {
+                       blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+               }
+
+               blc_pwm_duty_cycle &= BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+
+               I915_WRITE(BLC_PWM_CTL,
+                               (max_pwm_blc << BACKLIGHT_PWM_CTL_SHIFT)| (blc_pwm_duty_cycle));
+       }
+}
+
+/**
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+       return BRIGHTNESS_MAX_LEVEL;
+       /*
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+    
+       return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
+               BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+       */
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       u32 pp_status;
+
+       DRM_INFO("intel_lvds_set_power: %d\n", on);
+       if (on) {
+               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+                               POWER_TARGET_ON);
+               do {
+                       pp_status = I915_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+
+               intel_lvds_set_backlight(dev, lvds_backlight);
+       } else {
+               intel_lvds_set_backlight(dev, 0);
+
+               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
+                               ~POWER_TARGET_ON);
+               do {
+                       pp_status = I915_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+       }
+}
+
+static void intel_lvds_dpms(struct drm_output *output, int mode)
+{
+       struct drm_device *dev = output->dev;
+
+       DRM_INFO("intel_lvds_dpms: the mode is %d\n", mode);
+       if (mode == DPMSModeOn)
+               intel_lvds_set_power(dev, true);
+       else
+               intel_lvds_set_power(dev, false);
+
+       /* XXX: We never power down the LVDS pairs. */
+}
+
+static void intel_lvds_save(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+       dev_priv->savePP_ON = I915_READ(LVDSPP_ON);
+       dev_priv->savePP_OFF = I915_READ(LVDSPP_OFF);
+       dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+       dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
+       dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+       dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+                                      BACKLIGHT_DUTY_CYCLE_MASK);
+
+       /*
+        * If the light is off at server startup, just make it full brightness
+        */
+       if (dev_priv->backlight_duty_cycle == 0)
+               lvds_backlight=
+                       intel_lvds_get_max_backlight(dev);
+}
+
+static void intel_lvds_restore(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+       I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+       I915_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
+       I915_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
+       I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
+       I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+       if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
+               intel_lvds_set_power(dev, true);
+       else
+               intel_lvds_set_power(dev, false);
+}
+
+static int intel_lvds_mode_valid(struct drm_output *output,
+                                struct drm_display_mode *mode)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+
+       if (fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+
+       if (IS_POULSBO(dev) && sku_bMaxResEnableInt) {
+               if (mode->hdisplay > MAX_HDISPLAY)
+                       return MODE_PANEL;
+               if (mode->vdisplay > MAX_VDISPLAY)
+                       return MODE_PANEL;
+       }
+
+       return MODE_OK;
+}
+
+static bool intel_lvds_mode_fixup(struct drm_output *output,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = output->crtc->driver_private;
+       struct drm_output *tmp_output;
+
+       /* Should never happen!! */
+       if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+               DRM_ERROR(KERN_ERR "Can't support LVDS on pipe A\n");
+               return false;
+       }
+
+       /* Should never happen!! */
+       list_for_each_entry(tmp_output, &dev->mode_config.output_list, head) {
+               if (tmp_output != output && tmp_output->crtc == output->crtc) {
+                       DRM_ERROR("Can't enable LVDS and another "
+                              "output on the same pipe\n");
+                       return false;
+               }
+       }
+
+       /*
+        * If we have timings from the BIOS for the panel, put them in
+        * to the adjusted mode.  The CRTC will be set up for this mode,
+        * with the panel scaling set up to source from the H/VDisplay
+        * of the original mode.
+        */
+       if (dev_priv->panel_fixed_mode != NULL) {
+               adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
+               adjusted_mode->hsync_start =
+                       dev_priv->panel_fixed_mode->hsync_start;
+               adjusted_mode->hsync_end =
+                       dev_priv->panel_fixed_mode->hsync_end;
+               adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
+               adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
+               adjusted_mode->vsync_start =
+                       dev_priv->panel_fixed_mode->vsync_start;
+               adjusted_mode->vsync_end =
+                       dev_priv->panel_fixed_mode->vsync_end;
+               adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
+               adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
+               drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+       }
+
+       /*
+        * XXX: It would be nice to support lower refresh rates on the
+        * panels to reduce power consumption, and perhaps match the
+        * user's requested refresh rate.
+        */
+
+       return true;
+}
+
+static void intel_lvds_prepare(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+       DRM_INFO("intel_lvds_prepare\n");
+       dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+       dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+                                      BACKLIGHT_DUTY_CYCLE_MASK);
+
+       intel_lvds_set_power(dev, false);
+}
+
+static void intel_lvds_commit( struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+       DRM_INFO("intel_lvds_commit\n");
+       if (dev_priv->backlight_duty_cycle == 0)
+               //dev_priv->backlight_duty_cycle =
+               lvds_backlight =
+                       intel_lvds_get_max_backlight(dev);
+
+       intel_lvds_set_power(dev, true);
+}
+
+static void intel_lvds_mode_set(struct drm_output *output,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = output->crtc->driver_private;
+       u32 pfit_control;
+
+       /*
+        * The LVDS pin pair will already have been turned on in the
+        * intel_crtc_mode_set since it has a large impact on the DPLL
+        * settings.
+        */
+
+       /*
+        * Enable automatic panel scaling so that non-native modes fill the
+        * screen.  Should be enabled before the pipe is enabled, according to
+        * register description and PRM.
+        */
+       if (mode->hdisplay != adjusted_mode->hdisplay ||
+           mode->vdisplay != adjusted_mode->vdisplay)
+               pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+                               HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+                               HORIZ_INTERP_BILINEAR);
+       else
+               pfit_control = 0;
+
+       if (!IS_I965G(dev)) {
+               if (dev_priv->panel_wants_dither)
+                       pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+       }
+       else
+               pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+       I915_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns OUTPUT_STATUS_CONNECTED.  This output should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_output_status intel_lvds_detect(struct drm_output *output)
+{
+       return output_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available.
+ */
+static int intel_lvds_get_modes(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       struct intel_output *intel_output = output->driver_private;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+        struct edid *edid;
+
+       /* Try reading DDC from the adapter */
+        edid = (struct edid *)drm_ddc_read(&intel_output->ddc_bus->adapter);
+
+        if (!edid) {
+                DRM_INFO("%s: no EDID data from device, reading ACPI _DDC data.\n",
+                         output->name);
+                edid = kzalloc(sizeof(struct edid), GFP_KERNEL);
+                drm_get_acpi_edid(ACPI_EDID_LCD, (char*)edid, 128);
+        }
+
+       if (edid)
+               drm_add_edid_modes(output, edid);
+
+       /* Didn't get an EDID */
+       if (!output->monitor_info) {
+               struct drm_display_info *dspinfo;
+               dspinfo = kzalloc(sizeof(*output->monitor_info), GFP_KERNEL);
+               if (!dspinfo)
+                       goto out;
+
+               /* Set wide sync ranges so we get all modes
+                * handed to valid_mode for checking
+                */
+               dspinfo->min_vfreq = 0;
+               dspinfo->max_vfreq = 200;
+               dspinfo->min_hfreq = 0;
+               dspinfo->max_hfreq = 200;
+               output->monitor_info = dspinfo;
+       }
+
+out:
+       if (dev_priv->panel_fixed_mode != NULL) {
+               struct drm_display_mode *mode =
+                       drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+               drm_mode_probed_add(output, mode);
+               return 1;
+       }
+
+       return 0;
+}
+
+/* added by alek du to add /sys/class/backlight interface */
+static int update_bl_status(struct backlight_device *bd)
+{
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,20)
+        int value = bd->props->brightness;
+#else
+        int value = bd->props.brightness;
+#endif
+       
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)
+       struct drm_device *dev = class_get_devdata (&bd->class_dev);
+#else
+       struct drm_device *dev = bl_get_data(bd);
+#endif
+       lvds_backlight = value;
+       intel_lvds_set_backlight(dev, value);
+       /*value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
+       intel_lvds_set_power(dev,value);*/
+       return 0;
+}
+
+static int read_brightness(struct backlight_device *bd)
+{
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,20)
+        return bd->props->brightness;
+#else
+        return bd->props.brightness;
+#endif
+}
+
+static struct backlight_device *psbbl_device = NULL;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,20)
+static struct backlight_properties psbbl_ops = {
+        .get_brightness = read_brightness,
+        .update_status = update_bl_status,
+        .max_brightness = BRIGHTNESS_MAX_LEVEL, 
+};
+#else
+static struct backlight_ops psbbl_ops = {
+        .get_brightness = read_brightness,
+        .update_status = update_bl_status,
+};
+#endif
+
+/**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @output: output to free
+ *
+ * Unregister the DDC bus for this output then free the driver private
+ * structure.
+ */
+static void intel_lvds_destroy(struct drm_output *output)
+{
+       struct intel_output *intel_output = output->driver_private;
+
+       if (psbbl_device){
+               backlight_device_unregister(psbbl_device);
+       }               
+       if(dev_OpRegion != NULL)
+               iounmap(dev_OpRegion);
+       intel_i2c_destroy(intel_output->ddc_bus);
+       intel_i2c_destroy(lvds_i2c_bus);
+       kfree(output->driver_private);
+}
+
+static const struct drm_output_funcs intel_lvds_output_funcs = {
+       .dpms = intel_lvds_dpms,
+       .save = intel_lvds_save,
+       .restore = intel_lvds_restore,
+       .mode_valid = intel_lvds_mode_valid,
+       .mode_fixup = intel_lvds_mode_fixup,
+       .prepare = intel_lvds_prepare,
+       .mode_set = intel_lvds_mode_set,
+       .commit = intel_lvds_commit,
+       .detect = intel_lvds_detect,
+       .get_modes = intel_lvds_get_modes,
+       .cleanup = intel_lvds_destroy
+};
+
+int intel_get_acpi_dod(char *method)
+{
+       int status;
+       int found = 0;
+       int i;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *dod = NULL;
+       union acpi_object *obj;
+
+       status = acpi_evaluate_object(NULL, method, NULL, &buffer);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       dod = buffer.pointer;
+       if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
+               status = -EFAULT;
+               goto out;
+       }
+
+       DRM_DEBUG("Found %d video heads in _DOD\n", dod->package.count);
+
+       for (i = 0; i < dod->package.count; i++) {
+               obj = &dod->package.elements[i];
+
+               if (obj->type != ACPI_TYPE_INTEGER) {
+                       DRM_DEBUG("Invalid _DOD data\n");
+               } else {
+                       DRM_DEBUG("dod element[%d] = 0x%x\n", i,
+                                 (int)obj->integer.value);
+
+                       /* look for an LVDS type */
+                       if (obj->integer.value & 0x00000400) 
+                               found = 1;
+               }
+       }
+      out:
+       kfree(buffer.pointer);
+       return found;
+}
+/**
+ * intel_lvds_init - setup LVDS outputs on this device
+ * @dev: drm device
+ *
+ * Create the output, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void intel_lvds_init(struct drm_device *dev)
+{
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct drm_output *output;
+       struct intel_output *intel_output;
+       struct drm_display_mode *scan; /* *modes, *bios_mode; */
+       struct drm_crtc *crtc;
+       u32 lvds;
+       int pipe;
+
+       if (!drm_intel_ignore_acpi && !intel_get_acpi_dod(ACPI_DOD))
+               return;
+
+       output = drm_output_create(dev, &intel_lvds_output_funcs, "LVDS");
+       if (!output)
+               return;
+
+       intel_output = kmalloc(sizeof(struct intel_output), GFP_KERNEL);
+       if (!intel_output) {
+               drm_output_destroy(output);
+               return;
+       }
+
+       intel_output->type = INTEL_OUTPUT_LVDS;
+       output->driver_private = intel_output;
+       output->subpixel_order = SubPixelHorizontalRGB;
+       output->interlace_allowed = FALSE;
+       output->doublescan_allowed = FALSE;
+
+       //initialize the I2C bus and BLC data
+       lvds_i2c_bus = intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+       if (!lvds_i2c_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev, "i2c bus registration "
+                          "failed.\n");
+               return;
+       }
+       lvds_i2c_bus->slave_addr = 0x2c;//0x58;
+       lvds_backlight = BRIGHTNESS_MAX_LEVEL;
+       blc_type = 0;
+       blc_pol = 0;
+
+       if (1) { //get the BLC init data from VBT 
+               u32 OpRegion_Phys;
+               unsigned int OpRegion_Size = 0x100;
+               OpRegionPtr OpRegion;
+               char *OpRegion_String = "IntelGraphicsMem";
+
+               struct vbt_header *vbt;
+               struct bdb_header *bdb;
+               int vbt_off, bdb_off, bdb_block_off, block_size;
+               int panel_type = -1;
+               unsigned char *bios;
+               unsigned char *vbt_buf;
+
+               pci_read_config_dword(dev->pdev, 0xFC, &OpRegion_Phys);
+
+               //dev_OpRegion =  phys_to_virt(OpRegion_Phys);
+               dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_Size);
+               dev_OpRegionSize = OpRegion_Size;
+
+               OpRegion = (OpRegionPtr) dev_OpRegion;
+
+               if (!memcmp(OpRegion->sign, OpRegion_String, 16)) {
+                       unsigned int OpRegion_NewSize;
+
+                       OpRegion_NewSize = OpRegion->size * 1024;
+
+                       dev_OpRegionSize = OpRegion_NewSize;
+                       
+                       iounmap(dev_OpRegion);
+                       dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_NewSize);
+               } else {
+                       iounmap(dev_OpRegion);
+                       dev_OpRegion = NULL;
+               }
+
+               if((dev_OpRegion != NULL)&&(dev_OpRegionSize >= OFFSET_OPREGION_VBT)) {
+                       DRM_INFO("intel_lvds_init: OpRegion has the VBT address\n");
+                       vbt_buf = dev_OpRegion + OFFSET_OPREGION_VBT;
+                       vbt = (struct vbt_header *)(dev_OpRegion + OFFSET_OPREGION_VBT);
+               } else {                
+                       DRM_INFO("intel_lvds_init: No OpRegion, use the bios at fixed address 0xc0000\n");
+                       bios = phys_to_virt(0xC0000);
+                       if(*((u16 *)bios) != 0xAA55){
+                               bios = NULL;
+                               DRM_ERROR("the bios is incorrect\n");
+                               goto blc_out;           
+                       }
+                       vbt_off = bios[0x1a] | (bios[0x1a + 1] << 8);
+                       DRM_INFO("intel_lvds_init: the vbt off is %x\n", vbt_off);
+                       vbt_buf = bios + vbt_off;
+                       vbt = (struct vbt_header *)(bios + vbt_off);
+               }
+
+               bdb_off = vbt->bdb_offset;
+               bdb = (struct bdb_header *)(vbt_buf + bdb_off);
+
+               DRM_INFO("intel_lvds_init: The bdb->signature is %s, the bdb_off is %d\n",bdb->signature, bdb_off);
+
+               if (memcmp(bdb->signature, "BIOS_DATA_BLOCK ", 16) != 0) {
+                       DRM_ERROR("the vbt is error\n");
+                       goto blc_out;
+               }
+
+               for (bdb_block_off = bdb->header_size; bdb_block_off < bdb->bdb_size;
+                               bdb_block_off += block_size) {
+                       int start = bdb_off + bdb_block_off;
+                       int id, num_entries;
+                       struct lvds_bdb_1 *lvds1;
+                       struct lvds_blc *lvdsblc;
+                       struct lvds_bdb_blc *bdbblc;
+
+                       id = vbt_buf[start];
+                       block_size = (vbt_buf[start + 1] | (vbt_buf[start + 2] << 8)) + 3;
+                       switch (id) {
+                               case 40:
+                                       lvds1 = (struct lvds_bdb_1 *)(vbt_buf+ start);
+                                       panel_type = lvds1->panel_type;
+                                       //if (lvds1->caps & LVDS_CAP_DITHER)
+                                       //      *panelWantsDither = TRUE;
+                                       break;
+
+                               case 43:
+                                       bdbblc = (struct lvds_bdb_blc *)(vbt_buf + start);
+                                       num_entries = bdbblc->table_size? (bdbblc->size - \
+                                                       sizeof(bdbblc->table_size))/bdbblc->table_size : 0;
+                                       if (num_entries << 16 && bdbblc->table_size == sizeof(struct lvds_blc)) {
+                                               lvdsblc = (struct lvds_blc *)(vbt_buf + start + sizeof(struct lvds_bdb_blc));
+                                               lvdsblc += panel_type;
+                                               blc_type = lvdsblc->type;
+                                               blc_pol = lvdsblc->pol;
+                                               blc_freq = lvdsblc->freq;
+                                               blc_minbrightness = lvdsblc->minbrightness;
+                                               blc_i2caddr = lvdsblc->i2caddr;
+                                               blc_brightnesscmd = lvdsblc->brightnesscmd;
+                                               DRM_INFO("intel_lvds_init: BLC Data in BIOS VBT tables: datasize=%d paneltype=%d \
+                                                               type=0x%02x pol=0x%02x freq=0x%04x minlevel=0x%02x    \
+                                                               i2caddr=0x%02x cmd=0x%02x \n",
+                                                               0,
+                                                               panel_type,
+                                                               lvdsblc->type,
+                                                               lvdsblc->pol,
+                                                               lvdsblc->freq,
+                                                               lvdsblc->minbrightness,
+                                                               lvdsblc->i2caddr,
+                                                               lvdsblc->brightnesscmd);
+                                       }
+                                       break;
+                       }
+               }
+
+       }
+
+       if(1){
+               //get the Core Clock for calculating MAX PWM value
+               //check whether the MaxResEnableInt is 
+               struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
+               u32 clock;
+               u32 sku_value = 0;
+               unsigned int CoreClocks[] = {
+                       100,
+                       133,
+                       150,
+                       178,
+                       200,
+                       266,
+                       266,
+                       266
+               };
+               if(pci_root)
+               {
+                       pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+                       pci_read_config_dword(pci_root, 0xD4, &clock);
+                       CoreClock = CoreClocks[clock & 0x07];
+                       DRM_INFO("intel_lvds_init: the CoreClock is %d\n", CoreClock);
+                       
+                       pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
+                       pci_read_config_dword(pci_root, 0xD4, &sku_value);
+                       sku_bMaxResEnableInt = (sku_value & PCI_PORT5_REG80_MAXRES_INT_EN)? true : false;
+                       DRM_INFO("intel_lvds_init: sku_value is 0x%08x\n", sku_value);
+                       DRM_INFO("intel_lvds_init: sku_bMaxResEnableInt is %d\n", sku_bMaxResEnableInt);
+               }
+       }
+
+       if ((blc_type == BLC_I2C_TYPE) || (blc_type == BLC_PWM_TYPE)){  
+               /* add /sys/class/backlight interface as standard */
+               psbbl_device = backlight_device_register("psblvds", &dev->pdev->dev, dev, &psbbl_ops);
+               if (psbbl_device){
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,20)
+                       down(&psbbl_device->sem);
+                       psbbl_device->props->max_brightness = BRIGHTNESS_MAX_LEVEL;
+                       psbbl_device->props->brightness = lvds_backlight;
+                       psbbl_device->props->power = FB_BLANK_UNBLANK;
+                       psbbl_device->props->update_status(psbbl_device);
+                       up(&psbbl_device->sem);
+#else
+                       psbbl_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+                       psbbl_device->props.brightness = lvds_backlight;
+                       psbbl_device->props.power = FB_BLANK_UNBLANK;
+                       backlight_update_status(psbbl_device);
+#endif
+               }
+       }
+
+blc_out:
+
+       /* Set up the DDC bus. */
+       intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+       if (!intel_output->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+                          "failed.\n");
+               intel_i2c_destroy(lvds_i2c_bus);
+               return;
+       }
+
+       /*
+        * Attempt to get the fixed panel mode from DDC.  Assume that the
+        * preferred mode is the right one.
+        */
+       intel_lvds_get_modes(output);
+
+       list_for_each_entry(scan, &output->probed_modes, head) {
+               if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+                       dev_priv->panel_fixed_mode = 
+                               drm_mode_duplicate(dev, scan);
+                       goto out; /* FIXME: check for quirks */
+               }
+       }
+
+       /*
+        * If we didn't get EDID, try checking if the panel is already turned
+        * on.  If so, assume that whatever is currently programmed is the
+        * correct mode.
+        */
+       lvds = I915_READ(LVDS);
+       pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+       crtc = intel_get_crtc_from_pipe(dev, pipe);
+               
+       if (crtc && (lvds & LVDS_PORT_EN)) {
+               dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
+               if (dev_priv->panel_fixed_mode) {
+                       dev_priv->panel_fixed_mode->type |=
+                               DRM_MODE_TYPE_PREFERRED;
+                       goto out; /* FIXME: check for quirks */
+               }
+       }
+
+       /* If we still don't have a mode after all that, give up. */
+       if (!dev_priv->panel_fixed_mode)
+               goto failed;
+
+       /* FIXME: probe the BIOS for modes and check for LVDS quirks */
+#if 0
+       /* Get the LVDS fixed mode out of the BIOS.  We should support LVDS
+        * with the BIOS being unavailable or broken, but lack the
+        * configuration options for now.
+        */
+       bios_mode = intel_bios_get_panel_mode(pScrn);
+       if (bios_mode != NULL) {
+               if (dev_priv->panel_fixed_mode != NULL) {
+                       if (dev_priv->debug_modes &&
+                           !xf86ModesEqual(dev_priv->panel_fixed_mode,
+                                           bios_mode))
+                       {
+                               xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
+                                          "BIOS panel mode data doesn't match probed data, "
+                                          "continuing with probed.\n");
+                               xf86DrvMsg(pScrn->scrnIndex, X_INFO, "BIOS mode:\n");
+                               xf86PrintModeline(pScrn->scrnIndex, bios_mode);
+                               xf86DrvMsg(pScrn->scrnIndex, X_INFO, "probed mode:\n");
+                               xf86PrintModeline(pScrn->scrnIndex, dev_priv->panel_fixed_mode);
+                               xfree(bios_mode->name);
+                               xfree(bios_mode);
+                       }
+               }  else {
+                       dev_priv->panel_fixed_mode = bios_mode;
+               }
+       } else {
+               xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
+                          "Couldn't detect panel mode.  Disabling panel\n");
+               goto disable_exit;
+       }
+
+       /*
+        * Blacklist machines with BIOSes that list an LVDS panel without
+        * actually having one.
+        */
+       if (dev_priv->PciInfo->chipType == PCI_CHIP_I945_GM) {
+               /* aopen mini pc */
+               if (dev_priv->PciInfo->subsysVendor == 0xa0a0)
+                       goto disable_exit;
+
+               if ((dev_priv->PciInfo->subsysVendor == 0x8086) &&
+                   (dev_priv->PciInfo->subsysCard == 0x7270)) {
+                       /* It's a Mac Mini or Macbook Pro.
+                        *
+                        * Apple hardware is out to get us.  The macbook pro
+                        * has a real LVDS panel, but the mac mini does not,
+                        * and they have the same device IDs.  We'll
+                        * distinguish by panel size, on the assumption
+                        * that Apple isn't about to make any machines with an
+                        * 800x600 display.
+                        */
+
+                       if (dev_priv->panel_fixed_mode != NULL &&
+                           dev_priv->panel_fixed_mode->HDisplay == 800 &&
+                           dev_priv->panel_fixed_mode->VDisplay == 600)
+                       {
+                               xf86DrvMsg(pScrn->scrnIndex, X_INFO,
+                                          "Suspected Mac Mini, ignoring the LVDS\n");
+                               goto disable_exit;
+                       }
+               }
+       }
+
+#endif
+
+out:
+       return;
+
+failed:
+        DRM_DEBUG("No LVDS modes found, disabling.\n");
+       drm_output_destroy(output); /* calls intel_lvds_destroy above */
+}
diff --git a/psb-kernel-source-4.41.1/intel_lvds.h b/psb-kernel-source-4.41.1/intel_lvds.h
new file mode 100644 (file)
index 0000000..bc7220f
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * @file lvds definitions and structures.
+ */
+
+#define BLC_I2C_TYPE 0x01
+#define BLC_PWM_TYPE 0x02
+#define BRIGHTNESS_MASK 0xff
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+#define BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xfffe)
+#define BACKLIGHT_PWM_CTL_SHIFT (16)
+#define BLC_MAX_PWM_REG_FREQ 0xfffe
+#define BLC_MIN_PWM_REG_FREQ 0x2
+#define BLC_PWM_LEGACY_MODE_ENABLE 0x0001
+#define BLC_PWM_PRECISION_FACTOR 10//10000000 
+#define BLC_PWM_FREQ_CALC_CONSTANT 32 
+#define MHz 1000000 
+#define OFFSET_OPREGION_VBT    0x400   
+
+typedef struct OpRegion_Header
+{
+       char sign[16];
+       u32 size;
+       u32 over;
+       char sver[32];
+       char vver[16];
+       char gver[16];
+       u32 mbox;
+       char rhd1[164];
+} OpRegionRec, *OpRegionPtr;
+
+struct vbt_header
+{
+       char signature[20];             /**< Always starts with 'VBT$' */
+       u16 version;                    /**< decimal */
+       u16 header_size;                /**< in bytes */
+       u16 vbt_size;                   /**< in bytes */
+       u8 vbt_checksum;
+       u8 reserved0;
+       u32 bdb_offset;                 /**< from beginning of VBT */
+       u32 aim1_offset;                /**< from beginning of VBT */
+       u32 aim2_offset;                /**< from beginning of VBT */
+       u32 aim3_offset;                /**< from beginning of VBT */
+       u32 aim4_offset;                /**< from beginning of VBT */
+} __attribute__ ((packed));
+
+struct bdb_header
+{
+       char signature[16];             /**< Always 'BIOS_DATA_BLOCK' */
+       u16 version;                    /**< decimal */
+       u16 header_size;                /**< in bytes */
+       u16 bdb_size;                   /**< in bytes */
+} __attribute__ ((packed));    
+
+#define LVDS_CAP_EDID                  (1 << 6)
+#define LVDS_CAP_DITHER                        (1 << 5)
+#define LVDS_CAP_PFIT_AUTO_RATIO       (1 << 4)
+#define LVDS_CAP_PFIT_GRAPHICS_MODE    (1 << 3)
+#define LVDS_CAP_PFIT_TEXT_MODE                (1 << 2)
+#define LVDS_CAP_PFIT_GRAPHICS         (1 << 1)
+#define LVDS_CAP_PFIT_TEXT             (1 << 0)
+struct lvds_bdb_1
+{
+       u8 id;                          /**< 40 */
+       u16 size;
+       u8 panel_type;
+       u8 reserved0;
+       u16 caps;
+} __attribute__ ((packed));
+
+struct lvds_bdb_2_fp_params
+{
+       u16 x_res;
+       u16 y_res;
+       u32 lvds_reg;
+       u32 lvds_reg_val;
+       u32 pp_on_reg;
+       u32 pp_on_reg_val;
+       u32 pp_off_reg;
+       u32 pp_off_reg_val;
+       u32 pp_cycle_reg;
+       u32 pp_cycle_reg_val;
+       u32 pfit_reg;
+       u32 pfit_reg_val;
+       u16 terminator;
+} __attribute__ ((packed));
+
+struct lvds_bdb_2_fp_edid_dtd
+{
+       u16 dclk;               /**< In 10khz */
+       u8 hactive;
+       u8 hblank;
+       u8 high_h;              /**< 7:4 = hactive 11:8, 3:0 = hblank 11:8 */
+       u8 vactive;
+       u8 vblank;
+       u8 high_v;              /**< 7:4 = vactive 11:8, 3:0 = vblank 11:8 */
+       u8 hsync_off;
+       u8 hsync_pulse_width;
+       u8 vsync_off;
+       u8 high_hsync_off;      /**< 7:6 = hsync off 9:8 */
+       u8 h_image;
+       u8 v_image;
+       u8 max_hv;
+       u8 h_border;
+       u8 v_border;
+       u8 flags;
+#define FP_EDID_FLAG_VSYNC_POSITIVE    (1 << 2)
+#define FP_EDID_FLAG_HSYNC_POSITIVE    (1 << 1)
+} __attribute__ ((packed));
+
+struct lvds_bdb_2_entry
+{
+       u16 fp_params_offset;           /**< From beginning of BDB */
+       u8 fp_params_size;
+       u16 fp_edid_dtd_offset;
+       u8 fp_edid_dtd_size;
+       u16 fp_edid_pid_offset;
+       u8 fp_edid_pid_size;
+} __attribute__ ((packed));
+
+struct lvds_bdb_2
+{
+       u8 id;                          /**< 41 */
+       u16 size;
+       u8 table_size;                 /* not sure on this one */
+       struct lvds_bdb_2_entry panels[16];
+} __attribute__ ((packed));
+
+
+struct lvds_bdb_blc
+{
+       u8 id;                          /**< 43 */
+       u16 size;
+       u8 table_size;
+} __attribute__ ((packed));
+
+struct lvds_blc
+{
+       u8 type:2;
+       u8 pol:1;
+       u8 gpio:3;
+       u8 gmbus:2;
+       u16 freq;
+       u8 minbrightness;
+       u8 i2caddr;
+       u8 brightnesscmd;
+       /* more... */
+} __attribute__ ((packed));
+
diff --git a/psb-kernel-source-4.41.1/intel_modes.c b/psb-kernel-source-4.41.1/intel_modes.c
new file mode 100644 (file)
index 0000000..84eea98
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+
+/**
+ * intel_ddc_probe
+ *
+ */
+bool intel_ddc_probe(struct drm_output *output)
+{
+       struct intel_output *intel_output = output->driver_private;
+       u8 out_buf[] = { 0x0, 0x0};
+       u8 buf[2];
+       int ret;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = 0x50,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = 0x50,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = buf,
+               }
+       };
+
+       ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
+       if (ret == 2)
+               return true;
+
+       return false;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @output: DRM output device to use
+ *
+ * Fetch the EDID information from @output using the DDC bus.
+ */
+int intel_ddc_get_modes(struct drm_output *output)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct edid *edid;
+       int ret = 0;
+
+       edid = drm_get_edid(output, &intel_output->ddc_bus->adapter);
+       if (edid) {
+               ret = drm_add_edid_modes(output, edid);
+               kfree(edid);
+       }
+       return ret;
+}
diff --git a/psb-kernel-source-4.41.1/intel_sdvo.c b/psb-kernel-source-4.41.1/intel_sdvo.c
new file mode 100644 (file)
index 0000000..3351759
--- /dev/null
@@ -0,0 +1,4043 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include "drm_crtc.h"
+#include "intel_sdvo_regs.h"
+
+#include <linux/proc_fs.h>
+#include <linux/wait.h>
+
+struct proc_dir_entry *proc_sdvo_dir = NULL;
+wait_queue_head_t hotplug_queue;
+
+#define MAX_VAL 1000
+#define DPLL_CLOCK_PHASE_9 (1<<9 | 1<<12)
+
+#define PCI_PORT5_REG80_FFUSE                          0xD0058000
+#define PCI_PORT5_REG80_SDVO_DISABLE           0x0020
+
+#define SII_1392_WA
+#ifdef SII_1392_WA
+int SII_1392=0;
+extern int drm_psb_no_fb;
+#endif
+
+typedef struct _EXTVDATA
+{
+    u32 Value;
+    u32 Default;
+    u32 Min;
+    u32 Max;
+    u32 Step;                 // arbitrary unit (e.g. pixel, percent) returned during VP_COMMAND_GET
+} EXTVDATA, *PEXTVDATA;
+
+typedef struct _sdvo_display_params
+{
+    EXTVDATA FlickerFilter;           /* Flicker Filter : for TV onl */
+    EXTVDATA AdaptiveFF;              /* Adaptive Flicker Filter : for TV onl */
+    EXTVDATA TwoD_FlickerFilter;       /* 2D Flicker Filter : for TV onl */
+    EXTVDATA Brightness;              /* Brightness : for TV & CRT onl */
+    EXTVDATA Contrast;                /* Contrast : for TV & CRT onl */
+    EXTVDATA PositionX;                       /* Horizontal Position : for all device */
+    EXTVDATA PositionY;                       /* Vertical Position : for all device */
+    /*EXTVDATA    OverScanX;         Horizontal Overscan : for TV onl */
+    EXTVDATA DotCrawl;                /* Dot crawl value : for TV onl */
+    EXTVDATA ChromaFilter;            /* Chroma Filter : for TV onl */
+    /* EXTVDATA    OverScanY;        Vertical Overscan : for TV onl */
+    EXTVDATA LumaFilter;              /* Luma Filter : for TV only */
+    EXTVDATA Sharpness;                       /* Sharpness : for TV & CRT onl */
+    EXTVDATA Saturation;              /* Saturation : for TV & CRT onl */
+    EXTVDATA Hue;                     /* Hue : for TV & CRT onl */
+    EXTVDATA Dither;                  /* Dither : For LVDS onl */
+} sdvo_display_params;
+
+typedef enum _SDVO_PICTURE_ASPECT_RATIO_T
+{
+    UAIM_PAR_NO_DATA = 0x00000000,
+    UAIM_PAR_4_3 = 0x00000100,
+    UAIM_PAR_16_9 = 0x00000200,
+    UAIM_PAR_FUTURE = 0x00000300,
+    UAIM_PAR_MASK = 0x00000300,
+} SDVO_PICTURE_ASPECT_RATIO_T;
+
+typedef enum _SDVO_FORMAT_ASPECT_RATIO_T
+{
+    UAIM_FAR_NO_DATA = 0x00000000,
+    UAIM_FAR_SAME_AS_PAR = 0x00002000,
+    UAIM_FAR_4_BY_3_CENTER = 0x00002400,
+    UAIM_FAR_16_BY_9_CENTER = 0x00002800,
+    UAIM_FAR_14_BY_9_CENTER = 0x00002C00,
+    UAIM_FAR_16_BY_9_LETTERBOX_TOP = 0x00000800,
+    UAIM_FAR_14_BY_9_LETTERBOX_TOP = 0x00000C00,
+    UAIM_FAR_GT_16_BY_9_LETTERBOX_CENTER = 0x00002000,
+    UAIM_FAR_4_BY_3_SNP_14_BY_9_CENTER = 0x00003400,   /* With shoot and protect 14:9 cente */
+    UAIM_FAR_16_BY_9_SNP_14_BY_9_CENTER = 0x00003800,  /* With shoot and protect 14:9 cente */
+    UAIM_FAR_16_BY_9_SNP_4_BY_3_CENTER = 0x00003C00,   /* With shoot and protect 4:3 cente */
+    UAIM_FAR_MASK = 0x00003C00,
+} SDVO_FORMAT_ASPECT_RATIO_T;
+
+// TV image aspect ratio
+typedef enum _CP_IMAGE_ASPECT_RATIO
+{
+    CP_ASPECT_RATIO_FF_4_BY_3 = 0,
+    CP_ASPECT_RATIO_14_BY_9_CENTER = 1,
+    CP_ASPECT_RATIO_14_BY_9_TOP = 2,
+    CP_ASPECT_RATIO_16_BY_9_CENTER = 3,
+    CP_ASPECT_RATIO_16_BY_9_TOP = 4,
+    CP_ASPECT_RATIO_GT_16_BY_9_CENTER = 5,
+    CP_ASPECT_RATIO_FF_4_BY_3_PROT_CENTER = 6,
+    CP_ASPECT_RATIO_FF_16_BY_9_ANAMORPHIC = 7,
+} CP_IMAGE_ASPECT_RATIO;
+
+typedef struct _SDVO_ANCILLARY_INFO_T
+{
+    CP_IMAGE_ASPECT_RATIO AspectRatio;
+    u32 RedistCtrlFlag;               /* Redistribution control flag (get and set */
+} SDVO_ANCILLARY_INFO_T, *PSDVO_ANCILLARY_INFO_T;
+
+struct intel_sdvo_priv {
+       struct intel_i2c_chan *i2c_bus;
+       int slaveaddr;
+       int output_device;
+
+       u16 active_outputs;
+
+       struct intel_sdvo_caps caps;
+       int pixel_clock_min, pixel_clock_max;
+
+       int save_sdvo_mult;
+       u16 save_active_outputs;
+       struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
+       struct intel_sdvo_dtd save_output_dtd[16];
+       u32 save_SDVOX;
+       /**
+       * SDVO TV encoder support
+       */
+    u32 ActiveDevice;         /* CRT, TV, LVDS, TMDS */
+    u32 TVStandard;                   /* PAL, NTSC */
+    int TVOutput;                     /* S-Video, CVBS,YPbPr,RGB */
+    int TVMode;                               /* SDTV/HDTV/SECAM mod */
+    u32 TVStdBitmask;
+    u32 dwSDVOHDTVBitMask;
+    u32 dwSDVOSDTVBitMask;
+    u8 byInputWiring;
+    bool bGetClk;
+    u32 dwMaxDotClk;
+    u32 dwMinDotClk;
+
+    u32 dwMaxInDotClk;
+    u32 dwMinInDotClk;
+
+    u32 dwMaxOutDotClk;
+    u32 dwMinOutDotClk;
+    u32 dwSupportedEnhancements;
+    EXTVDATA OverScanY;                       /* Vertical Overscan : for TV onl */
+    EXTVDATA OverScanX;                       /* Horizontal Overscan : for TV onl */
+    sdvo_display_params dispParams;
+    SDVO_ANCILLARY_INFO_T AncillaryInfo;       
+};
+
+/* Define TV mode type */
+/* The full set are defined in xf86str.h*/
+#define M_T_TV 0x80
+
+typedef struct _tv_mode_t
+{
+    /* the following data is detailed mode information as it would be passed to the hardware: */
+    struct drm_display_mode mode_entry;
+    u32 dwSupportedSDTVvss;
+    u32 dwSupportedHDTVvss;
+    bool m_preferred;
+    bool isTVMode;
+} tv_mode_t;
+
+static tv_mode_t tv_modes[] = {
+    {
+     .mode_entry =
+     {DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x2625a00 / 1000, 800, 840, 968, 1056, 0,
+      600, 601,
+      604, 628, 0, V_PHSYNC | V_PVSYNC)},
+     .dwSupportedSDTVvss = TVSTANDARD_SDTV_ALL,
+     .dwSupportedHDTVvss = TVSTANDARD_HDTV_ALL,
+     .m_preferred = TRUE,
+     .isTVMode = TRUE,
+     },
+    {
+     .mode_entry =
+     {DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x3dfd240 / 1000, 1024, 0x418, 0x49f, 0x540,
+      0, 768,
+      0x303, 0x308, 0x325, 0, V_PHSYNC | V_PVSYNC)},
+     .dwSupportedSDTVvss = TVSTANDARD_SDTV_ALL,
+     .dwSupportedHDTVvss = TVSTANDARD_HDTV_ALL,
+     .m_preferred = FALSE,
+     .isTVMode = TRUE,
+     },
+    {
+     .mode_entry =
+     {DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x1978ff0 / 1000, 720, 0x2e1, 0x326, 0x380, 0,
+      480,
+      0x1f0, 0x1e1, 0x1f1, 0, V_PHSYNC | V_PVSYNC)},
+     .dwSupportedSDTVvss =
+     TVSTANDARD_NTSC_M | TVSTANDARD_NTSC_M_J | TVSTANDARD_NTSC_433,
+     .dwSupportedHDTVvss = 0x0,
+     .m_preferred = FALSE,
+     .isTVMode = TRUE,
+     },
+    {
+     /*Modeline "720x576_SDVO"   0.96 720 756 788 864  576 616 618 700 +vsync  */
+     .mode_entry =
+     {DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x1f25a20 / 1000, 720, 756, 788, 864, 0, 576,
+      616,
+      618, 700, 0, V_PHSYNC | V_PVSYNC)},
+     .dwSupportedSDTVvss =
+     (TVSTANDARD_PAL_B | TVSTANDARD_PAL_D | TVSTANDARD_PAL_H |
+      TVSTANDARD_PAL_I | TVSTANDARD_PAL_N | TVSTANDARD_SECAM_B |
+      TVSTANDARD_SECAM_D | TVSTANDARD_SECAM_G | TVSTANDARD_SECAM_H |
+      TVSTANDARD_SECAM_K | TVSTANDARD_SECAM_K1 | TVSTANDARD_SECAM_L |
+      TVSTANDARD_PAL_G | TVSTANDARD_SECAM_L1),
+     .dwSupportedHDTVvss = 0x0,
+     .m_preferred = FALSE,
+     .isTVMode = TRUE,
+     },
+    {
+     .mode_entry =
+     {DRM_MODE("1280x720@60",DRM_MODE_TYPE_DRIVER | M_T_TV, 74250000 / 1000, 1280, 1390, 1430, 1650, 0,
+      720,
+      725, 730, 750, 0, V_PHSYNC | V_PVSYNC)},
+     .dwSupportedSDTVvss = 0x0,
+     .dwSupportedHDTVvss = HDTV_SMPTE_296M_720p60,
+     .m_preferred = FALSE,
+     .isTVMode = TRUE,
+     },
+    {
+     .mode_entry =
+     {DRM_MODE("1280x720@50", DRM_MODE_TYPE_DRIVER | M_T_TV, 74250000 / 1000, 1280, 1720, 1759, 1980, 0,
+      720,
+      725, 730, 750, 0, V_PHSYNC | V_PVSYNC)},
+     .dwSupportedSDTVvss = 0x0,
+     .dwSupportedHDTVvss = HDTV_SMPTE_296M_720p50,
+     .m_preferred = FALSE,
+     .isTVMode = TRUE,
+     },
+    {
+     .mode_entry =
+     {DRM_MODE("1920x1080@60", DRM_MODE_TYPE_DRIVER | M_T_TV, 148500000 / 1000, 1920, 2008, 2051, 2200, 0,
+      1080,
+      1084, 1088, 1124, 0, V_PHSYNC | V_PVSYNC)},
+     .dwSupportedSDTVvss = 0x0,
+     .dwSupportedHDTVvss = HDTV_SMPTE_274M_1080i60,
+     .m_preferred = FALSE,
+     .isTVMode = TRUE,
+     },
+};
+
+#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
+
+typedef struct {
+    /* given values */    
+    int n;
+    int m1, m2;
+    int p1, p2;
+    /* derived values */
+    int        dot;
+    int        vco;
+    int        m;
+    int        p;
+} ex_intel_clock_t;
+
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void intel_sdvo_write_sdvox(struct drm_output *output, u32 val)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv   *sdvo_priv = intel_output->dev_priv;
+       u32 bval = val, cval = val;
+       int i;
+
+       if (sdvo_priv->output_device == SDVOB)
+               cval = I915_READ(SDVOC);
+       else
+               bval = I915_READ(SDVOB);
+       /*
+        * Write the registers twice for luck. Sometimes,
+        * writing them only once doesn't appear to 'stick'.
+        * The BIOS does this too. Yay, magic
+        */
+       for (i = 0; i < 2; i++)
+       {
+               I915_WRITE(SDVOB, bval);
+               I915_READ(SDVOB);
+               I915_WRITE(SDVOC, cval);
+               I915_READ(SDVOC);
+       }
+}
+
+static bool intel_sdvo_read_byte(struct drm_output *output, u8 addr,
+                                u8 *ch)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       u8 out_buf[2];
+       u8 buf[2];
+       int ret;
+
+       struct i2c_msg msgs[] = {
+               { 
+                       .addr = sdvo_priv->i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               }, 
+               {
+                       .addr = sdvo_priv->i2c_bus->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = buf,
+               }
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = 0;
+
+       if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
+       {
+//             DRM_DEBUG("got back from addr %02X = %02x\n", out_buf[0], buf[0]); 
+               *ch = buf[0];
+               return true;
+       }
+
+       DRM_DEBUG("i2c transfer returned %d\n", ret);
+       return false;
+}
+
+
+#if 0
+static bool intel_sdvo_read_byte_quiet(struct drm_output *output, int addr,
+                                      u8 *ch)
+{
+       return true;
+
+}
+#endif
+
+static bool intel_sdvo_write_byte(struct drm_output *output, int addr,
+                                 u8 ch)
+{
+       struct intel_output *intel_output = output->driver_private;
+       u8 out_buf[2];
+       struct i2c_msg msgs[] = {
+               { 
+                       .addr = intel_output->i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = out_buf,
+               }
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = ch;
+
+       if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
+       {
+               return true;
+       }
+       return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+const static struct _sdvo_cmd_name {
+    u8 cmd;
+    char *name;
+} sdvo_cmd_names[] = {
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+};
+
+#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
+#define SDVO_PRIV(output)   ((struct intel_sdvo_priv *) (output)->dev_priv)
+
+static void intel_sdvo_write_cmd(struct drm_output *output, u8 cmd,
+                                void *args, int args_len)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       int i;
+
+        if (drm_debug) {
+                DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
+                for (i = 0; i < args_len; i++)
+                        printk("%02X ", ((u8 *)args)[i]);
+                for (; i < 8; i++)
+                        printk("   ");
+                for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
+                        if (cmd == sdvo_cmd_names[i].cmd) {
+                                printk("(%s)", sdvo_cmd_names[i].name);
+                                break;
+                        }
+                }
+                if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
+                        printk("(%02X)",cmd);
+                printk("\n");
+        }
+                        
+       for (i = 0; i < args_len; i++) {
+               intel_sdvo_write_byte(output, SDVO_I2C_ARG_0 - i, ((u8*)args)[i]);
+       }
+
+       intel_sdvo_write_byte(output, SDVO_I2C_OPCODE, cmd);
+}
+
+static const char *cmd_status_names[] = {
+       "Power on",
+       "Success",
+       "Not supported",
+       "Invalid arg",
+       "Pending",
+       "Target not specified",
+       "Scaling not supported"
+};
+
+static u8 intel_sdvo_read_response(struct drm_output *output, void *response,
+                                  int response_len)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       int i;
+       u8 status;
+       u8 retry = 50;
+
+       while (retry--) {
+               /* Read the command response */
+               for (i = 0; i < response_len; i++) {
+                       intel_sdvo_read_byte(output, SDVO_I2C_RETURN_0 + i,
+                                    &((u8 *)response)[i]);
+               }
+
+               /* read the return status */
+               intel_sdvo_read_byte(output, SDVO_I2C_CMD_STATUS, &status);
+
+               if (drm_debug) {
+                       DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
+                               for (i = 0; i < response_len; i++)
+                               printk("%02X ", ((u8 *)response)[i]);
+                       for (; i < 8; i++)
+                               printk("   ");
+                       if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+                               printk("(%s)", cmd_status_names[status]);
+                       else
+                               printk("(??? %d)", status);
+                       printk("\n");
+               }
+
+               if (status != SDVO_CMD_STATUS_PENDING)
+                       return status;
+
+               mdelay(50);
+       }
+
+       return status;
+}
+
+int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+       if (mode->clock >= 100000)
+               return 1;
+       else if (mode->clock >= 50000)
+               return 2;
+       else
+               return 4;
+}
+
+/**
+ * Don't check status code from this as it switches the bus back to the
+ * SDVO chips which defeats the purpose of doing a bus switch in the first
+ * place.
+ */
+void intel_sdvo_set_control_bus_switch(struct drm_output *output, u8 target)
+{
+       intel_sdvo_write_cmd(output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
+}
+
+static bool intel_sdvo_set_target_input(struct drm_output *output, bool target_0, bool target_1)
+{
+       struct intel_sdvo_set_target_input_args targets = {0};
+       u8 status;
+
+       if (target_0 && target_1)
+               return SDVO_CMD_STATUS_NOTSUPP;
+
+       if (target_1)
+               targets.target_1 = 1;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_SET_TARGET_INPUT, &targets,
+                            sizeof(targets));
+
+       status = intel_sdvo_read_response(output, NULL, 0);
+
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool intel_sdvo_get_trained_inputs(struct drm_output *output, bool *input_1, bool *input_2)
+{
+       struct intel_sdvo_get_trained_inputs_response response;
+       u8 status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
+       status = intel_sdvo_read_response(output, &response, sizeof(response));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       *input_1 = response.input0_trained;
+       *input_2 = response.input1_trained;
+       return true;
+}
+
+static bool intel_sdvo_get_active_outputs(struct drm_output *output,
+                                         u16 *outputs)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
+       status = intel_sdvo_read_response(output, outputs, sizeof(*outputs));
+
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_set_active_outputs(struct drm_output *output,
+                                         u16 outputs)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
+                            sizeof(outputs));
+       status = intel_sdvo_read_response(output, NULL, 0);
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_set_encoder_power_state(struct drm_output *output,
+                                              int mode)
+{
+       u8 status, state = SDVO_ENCODER_STATE_ON;
+
+       switch (mode) {
+       case DPMSModeOn:
+               state = SDVO_ENCODER_STATE_ON;
+               break;
+       case DPMSModeStandby:
+               state = SDVO_ENCODER_STATE_STANDBY;
+               break;
+       case DPMSModeSuspend:
+               state = SDVO_ENCODER_STATE_SUSPEND;
+               break;
+       case DPMSModeOff:
+               state = SDVO_ENCODER_STATE_OFF;
+               break;
+       }
+       
+       intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
+                            sizeof(state));
+       status = intel_sdvo_read_response(output, NULL, 0);
+
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_get_input_pixel_clock_range(struct drm_output *output,
+                                                  int *clock_min,
+                                                  int *clock_max)
+{
+       struct intel_sdvo_pixel_clock_range clocks;
+       u8 status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+                            NULL, 0);
+
+       status = intel_sdvo_read_response(output, &clocks, sizeof(clocks));
+
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       /* Convert the values from units of 10 kHz to kHz. */
+       *clock_min = clocks.min * 10;
+       *clock_max = clocks.max * 10;
+
+       return true;
+}
+
+static bool intel_sdvo_set_target_output(struct drm_output *output,
+                                        u16 outputs)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
+                            sizeof(outputs));
+
+       status = intel_sdvo_read_response(output, NULL, 0);
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_get_timing(struct drm_output *output, u8 cmd,
+                                 struct intel_sdvo_dtd *dtd)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(output, cmd, NULL, 0);
+       status = intel_sdvo_read_response(output, &dtd->part1,
+                                         sizeof(dtd->part1));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       intel_sdvo_write_cmd(output, cmd + 1, NULL, 0);
+       status = intel_sdvo_read_response(output, &dtd->part2,
+                                         sizeof(dtd->part2));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+
+static bool intel_sdvo_get_input_timing(struct drm_output *output,
+                                        struct intel_sdvo_dtd *dtd)
+{
+       return intel_sdvo_get_timing(output,
+                                    SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_get_output_timing(struct drm_output *output,
+                                        struct intel_sdvo_dtd *dtd)
+{
+       return intel_sdvo_get_timing(output,
+                                    SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_timing(struct drm_output *output, u8 cmd,
+                                 struct intel_sdvo_dtd *dtd)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(output, cmd, &dtd->part1, sizeof(dtd->part1));
+       status = intel_sdvo_read_response(output, NULL, 0);
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       intel_sdvo_write_cmd(output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+       status = intel_sdvo_read_response(output, NULL, 0);
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+
+static bool intel_sdvo_set_input_timing(struct drm_output *output,
+                                        struct intel_sdvo_dtd *dtd)
+{
+       return intel_sdvo_set_timing(output,
+                                    SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_output_timing(struct drm_output *output,
+                                        struct intel_sdvo_dtd *dtd)
+{
+       return intel_sdvo_set_timing(output,
+                                    SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+#if 0
+static bool intel_sdvo_get_preferred_input_timing(struct drm_output *output,
+                                                 struct intel_sdvo_dtd *dtd)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       u8 status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+                            NULL, 0);
+
+       status = intel_sdvo_read_response(output, &dtd->part1,
+                                         sizeof(dtd->part1));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+                            NULL, 0);
+       status = intel_sdvo_read_response(output, &dtd->part2,
+                                         sizeof(dtd->part2));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+#endif
+
+static int intel_sdvo_get_clock_rate_mult(struct drm_output *output)
+{
+       u8 response, status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
+       status = intel_sdvo_read_response(output, &response, 1);
+
+       if (status != SDVO_CMD_STATUS_SUCCESS) {
+               DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
+               return SDVO_CLOCK_RATE_MULT_1X;
+       } else {
+               DRM_DEBUG("Current clock rate multiplier: %d\n", response);
+       }
+
+       return response;
+}
+
+static bool intel_sdvo_set_clock_rate_mult(struct drm_output *output, u8 val)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+       status = intel_sdvo_read_response(output, NULL, 0);
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+
+static bool intel_sdvo_mode_fixup(struct drm_output *output,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       /* Make the CRTC code factor in the SDVO pixel multiplier.  The SDVO
+        * device will be told of the multiplier during mode_set.
+        */
+       DRM_DEBUG("xxintel_sdvo_fixup\n");
+       adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+       return true;
+}
+
+#if 0
+static void i830_sdvo_map_hdtvstd_bitmask(struct drm_output * output)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+    switch (sdvo_priv->TVStandard) {
+    case HDTV_SMPTE_274M_1080i50:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i50;
+       break;
+
+    case HDTV_SMPTE_274M_1080i59:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i59;
+       break;
+
+    case HDTV_SMPTE_274M_1080i60:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i60;
+       break;
+    case HDTV_SMPTE_274M_1080p60:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080p60;
+       break;
+    case HDTV_SMPTE_296M_720p59:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p59;
+       break;
+
+    case HDTV_SMPTE_296M_720p60:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p60;
+       break;
+
+    case HDTV_SMPTE_296M_720p50:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p50;
+       break;
+
+    case HDTV_SMPTE_293M_480p59:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_293M_480p59;
+       break;
+
+    case HDTV_SMPTE_293M_480p60:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_EIA_7702A_480p60;
+       break;
+
+    case HDTV_SMPTE_170M_480i59:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_170M_480i59;
+       break;
+
+    case HDTV_ITURBT601_576i50:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_ITURBT601_576i50;
+       break;
+
+    case HDTV_ITURBT601_576p50:
+       sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_ITURBT601_576p50;
+       break;
+    default:
+       DRM_DEBUG("ERROR: Unknown TV Standard!!!\n");
+       /*Invalid return 0 */
+       sdvo_priv->TVStdBitmask = 0;
+    }
+
+}
+
+static void i830_sdvo_map_sdtvstd_bitmask(struct drm_output * output)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+    switch (sdvo_priv->TVStandard) {
+    case TVSTANDARD_NTSC_M:
+       sdvo_priv->TVStdBitmask = SDVO_NTSC_M;
+       break;
+
+    case TVSTANDARD_NTSC_M_J:
+       sdvo_priv->TVStdBitmask = SDVO_NTSC_M_J;
+       break;
+
+    case TVSTANDARD_NTSC_433:
+       sdvo_priv->TVStdBitmask = SDVO_NTSC_433;
+       break;
+
+    case TVSTANDARD_PAL_B:
+       sdvo_priv->TVStdBitmask = SDVO_PAL_B;
+       break;
+
+    case TVSTANDARD_PAL_D:
+       sdvo_priv->TVStdBitmask = SDVO_PAL_D;
+       break;
+
+    case TVSTANDARD_PAL_G:
+       sdvo_priv->TVStdBitmask = SDVO_PAL_G;
+       break;
+
+    case TVSTANDARD_PAL_H:
+       sdvo_priv->TVStdBitmask = SDVO_PAL_H;
+       break;
+
+    case TVSTANDARD_PAL_I:
+       sdvo_priv->TVStdBitmask = SDVO_PAL_I;
+       break;
+
+    case TVSTANDARD_PAL_M:
+       sdvo_priv->TVStdBitmask = SDVO_PAL_M;
+       break;
+
+    case TVSTANDARD_PAL_N:
+       sdvo_priv->TVStdBitmask = SDVO_PAL_N;
+       break;
+
+    case TVSTANDARD_PAL_60:
+       sdvo_priv->TVStdBitmask = SDVO_PAL_60;
+       break;
+
+    case TVSTANDARD_SECAM_B:
+       sdvo_priv->TVStdBitmask = SDVO_SECAM_B;
+       break;
+
+    case TVSTANDARD_SECAM_D:
+       sdvo_priv->TVStdBitmask = SDVO_SECAM_D;
+       break;
+
+    case TVSTANDARD_SECAM_G:
+       sdvo_priv->TVStdBitmask = SDVO_SECAM_G;
+       break;
+
+    case TVSTANDARD_SECAM_K:
+       sdvo_priv->TVStdBitmask = SDVO_SECAM_K;
+       break;
+
+    case TVSTANDARD_SECAM_K1:
+       sdvo_priv->TVStdBitmask = SDVO_SECAM_K1;
+       break;
+
+    case TVSTANDARD_SECAM_L:
+       sdvo_priv->TVStdBitmask = SDVO_SECAM_L;
+       break;
+
+    case TVSTANDARD_SECAM_L1:
+       DRM_DEBUG("TVSTANDARD_SECAM_L1 not supported by encoder\n");
+       break;
+
+    case TVSTANDARD_SECAM_H:
+       DRM_DEBUG("TVSTANDARD_SECAM_H not supported by encoder\n");
+       break;
+
+    default:
+       DRM_DEBUG("ERROR: Unknown TV Standard\n");
+       /*Invalid return 0 */
+       sdvo_priv->TVStdBitmask = 0;
+       break;
+    }
+}
+#endif 
+
+static bool i830_sdvo_set_tvoutputs_formats(struct drm_output * output)
+{
+    u8 byArgs[6];
+    u8 status;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    if (sdvo_priv->TVMode & (TVMODE_SDTV)) {
+       /* Fill up the arguement value */
+       byArgs[0] = (u8) (sdvo_priv->TVStdBitmask & 0xFF);
+       byArgs[1] = (u8) ((sdvo_priv->TVStdBitmask >> 8) & 0xFF);
+       byArgs[2] = (u8) ((sdvo_priv->TVStdBitmask >> 16) & 0xFF);
+    } else {
+       /* Fill up the arguement value */
+       byArgs[0] = 0;
+       byArgs[1] = 0;
+       byArgs[2] = (u8) ((sdvo_priv->TVStdBitmask & 0xFF));
+       byArgs[3] = (u8) ((sdvo_priv->TVStdBitmask >> 8) & 0xFF);
+       byArgs[4] = (u8) ((sdvo_priv->TVStdBitmask >> 16) & 0xFF);
+       byArgs[5] = (u8) ((sdvo_priv->TVStdBitmask >> 24) & 0xFF);
+    }
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMATS, byArgs, 6);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+
+}
+
+static bool i830_sdvo_create_preferred_input_timing(struct drm_output * output,
+                                       struct drm_display_mode * mode)
+{
+    u8 byArgs[7];
+    u8 status;
+    u32 dwClk;
+    u32 dwHActive, dwVActive;
+    bool bIsInterlaced, bIsScaled;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement values */
+    dwHActive = mode->crtc_hdisplay;
+    dwVActive = mode->crtc_vdisplay;
+
+    dwClk = mode->clock * 1000 / 10000;
+    byArgs[0] = (u8) (dwClk & 0xFF);
+    byArgs[1] = (u8) ((dwClk >> 8) & 0xFF);
+
+    /* HActive & VActive should not exceed 12 bits each. So check it */
+    if ((dwHActive > 0xFFF) || (dwVActive > 0xFFF))
+       return FALSE;
+
+    byArgs[2] = (u8) (dwHActive & 0xFF);
+    byArgs[3] = (u8) ((dwHActive >> 8) & 0xF);
+    byArgs[4] = (u8) (dwVActive & 0xFF);
+    byArgs[5] = (u8) ((dwVActive >> 8) & 0xF);
+
+    bIsInterlaced = 1;
+    bIsScaled = 0;
+
+    byArgs[6] = bIsInterlaced ? 1 : 0;
+    byArgs[6] |= bIsScaled ? 2 : 0;
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMINGS,
+                       byArgs, 7);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+
+}
+
+static bool i830_sdvo_get_preferred_input_timing(struct drm_output * output,
+                                    struct intel_sdvo_dtd *output_dtd)
+{
+    return intel_sdvo_get_timing(output,
+                               SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+                               output_dtd);
+}
+
+static bool i830_sdvo_set_current_inoutmap(struct drm_output * output, u32 in0outputmask,
+                              u32 in1outputmask)
+{
+    u8 byArgs[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement values; */
+    byArgs[0] = (u8) (in0outputmask & 0xFF);
+    byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
+    byArgs[2] = (u8) (in1outputmask & 0xFF);
+    byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+
+}
+
+void i830_sdvo_set_iomap(struct drm_output * output)
+{
+    u32 dwCurrentSDVOIn0 = 0;
+    u32 dwCurrentSDVOIn1 = 0;
+    u32 dwDevMask = 0;
+
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+
+    /* Please DO NOT change the following code. */
+    /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
+    /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
+    if (sdvo_priv->byInputWiring & (SDVOB_IN0 | SDVOC_IN0)) {
+       switch (sdvo_priv->ActiveDevice) {
+       case SDVO_DEVICE_LVDS:
+           dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
+           break;
+
+       case SDVO_DEVICE_TMDS:
+           dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
+           break;
+
+       case SDVO_DEVICE_TV:
+           dwDevMask =
+               SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
+               SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
+               SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
+           break;
+
+       case SDVO_DEVICE_CRT:
+           dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
+           break;
+       }
+       dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
+    } else if (sdvo_priv->byInputWiring & (SDVOB_IN1 | SDVOC_IN1)) {
+       switch (sdvo_priv->ActiveDevice) {
+       case SDVO_DEVICE_LVDS:
+           dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
+           break;
+
+       case SDVO_DEVICE_TMDS:
+           dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
+           break;
+
+       case SDVO_DEVICE_TV:
+           dwDevMask =
+               SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
+               SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
+               SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
+           break;
+
+       case SDVO_DEVICE_CRT:
+           dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
+           break;
+       }
+       dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
+    }
+
+    i830_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
+                                  dwCurrentSDVOIn1);
+}
+
+static bool i830_sdvo_get_input_output_pixelclock_range(struct drm_output * output,
+                                           bool direction)
+{
+    u8 byRets[4];
+    u8 status;
+
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+    if (direction)                    /* output pixel clock */
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE,
+                           NULL, 0);
+    else
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+                           NULL, 0);
+    status = intel_sdvo_read_response(output, byRets, 4);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    if (direction) {
+       /* Fill up the return values. */
+       sdvo_priv->dwMinOutDotClk =
+           (u32) byRets[0] | ((u32) byRets[1] << 8);
+       sdvo_priv->dwMaxOutDotClk =
+           (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+       /* Multiply 10000 with the clocks obtained */
+       sdvo_priv->dwMinOutDotClk = (sdvo_priv->dwMinOutDotClk) * 10000;
+       sdvo_priv->dwMaxOutDotClk = (sdvo_priv->dwMaxOutDotClk) * 10000;
+
+    } else {
+       /* Fill up the return values. */
+       sdvo_priv->dwMinInDotClk = (u32) byRets[0] | ((u32) byRets[1] << 8);
+       sdvo_priv->dwMaxInDotClk = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+       /* Multiply 10000 with the clocks obtained */
+       sdvo_priv->dwMinInDotClk = (sdvo_priv->dwMinInDotClk) * 10000;
+       sdvo_priv->dwMaxInDotClk = (sdvo_priv->dwMaxInDotClk) * 10000;
+    }
+    DRM_DEBUG("MinDotClk = 0x%x\n", sdvo_priv->dwMinInDotClk);
+    DRM_DEBUG("MaxDotClk = 0x%x\n", sdvo_priv->dwMaxInDotClk);
+
+    return TRUE;
+
+}
+
+static bool i830_sdvo_get_supported_tvoutput_formats(struct drm_output * output,
+                                        u32 * pTVStdMask,
+                                        u32 * pHDTVStdMask, u32 *pTVStdFormat)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;     
+
+    u8 byRets[6];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 6);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values; */
+    *pTVStdMask = (((u32) byRets[0]) |
+                  ((u32) byRets[1] << 8) |
+                  ((u32) (byRets[2] & 0x7) << 16));
+
+    *pHDTVStdMask = (((u32) byRets[2] & 0xF8) |
+                    ((u32) byRets[3] << 8) |
+                    ((u32) byRets[4] << 16) | ((u32) byRets[5] << 24));
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMATS, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 6);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values; */
+       if(sdvo_priv->TVMode == TVMODE_SDTV)
+    *pTVStdFormat = (((u32) byRets[0]) |
+                  ((u32) byRets[1] << 8) |
+                  ((u32) (byRets[2] & 0x7) << 16));
+    else
+    *pTVStdFormat = (((u32) byRets[2] & 0xF8) |
+                    ((u32) byRets[3] << 8) |
+                    ((u32) byRets[4] << 16) | ((u32) byRets[5] << 24));        
+       DRM_DEBUG("BIOS TV format is %d\n",*pTVStdFormat);
+    return TRUE;
+
+}
+
+static bool i830_sdvo_get_supported_enhancements(struct drm_output * output,
+                                    u32 * psupported_enhancements)
+{
+
+    u8 status;
+    u8 byRets[2];
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 2);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    sdvo_priv->dwSupportedEnhancements = *psupported_enhancements =
+       ((u32) byRets[0] | ((u32) byRets[1] << 8));
+    return TRUE;
+
+}
+
+static bool i830_sdvo_get_max_horizontal_overscan(struct drm_output * output, u32 * pMaxVal,
+                                     u32 * pDefaultVal)
+{
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HORIZONTAL_OVERSCAN, NULL,
+                       0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_vertical_overscan(struct drm_output * output, u32 * pMaxVal,
+                                   u32 * pDefaultVal)
+{
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_VERTICAL_OVERSCAN, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_horizontal_position(struct drm_output * output, u32 * pMaxVal,
+                                     u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HORIZONTAL_POSITION, NULL,
+                       0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_vertical_position(struct drm_output * output,
+                                   u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_VERTICAL_POSITION, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_flickerfilter(struct drm_output * output,
+                               u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_FLICKER_FILTER, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_brightness(struct drm_output * output,
+                            u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_contrast(struct drm_output * output,
+                          u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_sharpness(struct drm_output * output,
+                           u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_SHARPNESS, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_hue(struct drm_output * output,
+                     u32 * pMaxVal, u32 * pDefaultVal)
+{
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HUE, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_saturation(struct drm_output * output,
+                            u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
+
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_adaptive_flickerfilter(struct drm_output * output,
+                                        u32 * pMaxVal,
+                                        u32 * pDefaultVal)
+{
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FILTER,
+                       NULL, 0);
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_lumafilter(struct drm_output * output,
+                            u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_TV_LUMA_FILTER, NULL, 0);
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_chromafilter(struct drm_output * output,
+                              u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_TV_CHROMA_FILTER, NULL, 0);
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_get_dotcrawl(struct drm_output * output,
+                      u32 * pCurrentVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_DOT_CRAWL, NULL, 0);
+    status = intel_sdvo_read_response(output, byRets, 2);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Tibet issue 1603772: Dot crawl do not persist after reboot/Hibernate */
+    /* Details : Bit0 is considered as DotCrawl Max value. But according to EDS, Bit0 */
+    /*           represents the Current DotCrawl value. */
+    /* Fix     : The current value is updated with Bit0. */
+
+    /* Fill up the return values. */
+    *pCurrentVal = (u32) (byRets[0] & 0x1);
+    *pDefaultVal = (u32) ((byRets[0] >> 1) & 0x1);
+    return TRUE;
+}
+
+static bool i830_sdvo_get_max_2D_flickerfilter(struct drm_output * output,
+                                  u32 * pMaxVal, u32 * pDefaultVal)
+{
+
+    u8 byRets[4];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byRets, 0, sizeof(byRets));
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_2D_FLICKER_FILTER, NULL, 0);
+    status = intel_sdvo_read_response(output, byRets, 4);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    /* Fill up the return values. */
+    *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
+    *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_horizontal_overscan(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_HORIZONTAL_OVERSCAN, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+    return TRUE;
+}
+
+static bool i830_sdvo_set_vertical_overscan(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_VERTICAL_OVERSCAN, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+    return TRUE;
+}
+
+static bool i830_sdvo_set_horizontal_position(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_HORIZONTAL_POSITION, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_vertical_position(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_VERTICAL_POSITION, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+
+}
+
+static bool i830_sdvo_set_flickerilter(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_FLICKER_FILTER, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_brightness(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_BRIGHTNESS, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_contrast(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_CONTRAST, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_sharpness(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_SHARPNESS, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_hue(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_HUE, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_saturation(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_SATURATION, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_adaptive_flickerfilter(struct drm_output * output, u32 dwVal)
+{
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_ADAPTIVE_FLICKER_FILTER, byArgs,
+                       2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+
+}
+
+static bool i830_sdvo_set_lumafilter(struct drm_output * output, u32 dwVal)
+{
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_LUMA_FILTER, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_chromafilter(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_CHROMA_FILTER, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_dotcrawl(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_DOT_CRAWL, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+static bool i830_sdvo_set_2D_flickerfilter(struct drm_output * output, u32 dwVal)
+{
+
+    u8 byArgs[2];
+    u8 status;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Fill up the arguement value */
+    byArgs[0] = (u8) (dwVal & 0xFF);
+    byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_2D_FLICKER_FILTER, byArgs, 2);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+}
+
+#if 0
+static bool i830_sdvo_set_ancillary_video_information(struct drm_output * output)
+{
+
+    u8 status;
+    u8 byArgs[4];
+    u32 dwAncillaryBits = 0;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+
+    PSDVO_ANCILLARY_INFO_T pAncillaryInfo = &sdvo_priv->AncillaryInfo;
+
+    /* Make all fields of the  args/ret to zero */
+    memset(byArgs, 0, sizeof(byArgs));
+
+    /* Handle picture aspect ratio (bits 8, 9) and */
+    /* active format aspect ratio (bits 10, 13) */
+    switch (pAncillaryInfo->AspectRatio) {
+    case CP_ASPECT_RATIO_FF_4_BY_3:
+       dwAncillaryBits |= UAIM_PAR_4_3;
+       dwAncillaryBits |= UAIM_FAR_4_BY_3_CENTER;
+       break;
+    case CP_ASPECT_RATIO_14_BY_9_CENTER:
+       dwAncillaryBits |= UAIM_FAR_14_BY_9_CENTER;
+       break;
+    case CP_ASPECT_RATIO_14_BY_9_TOP:
+       dwAncillaryBits |= UAIM_FAR_14_BY_9_LETTERBOX_TOP;
+       break;
+    case CP_ASPECT_RATIO_16_BY_9_CENTER:
+       dwAncillaryBits |= UAIM_PAR_16_9;
+       dwAncillaryBits |= UAIM_FAR_16_BY_9_CENTER;
+       break;
+    case CP_ASPECT_RATIO_16_BY_9_TOP:
+       dwAncillaryBits |= UAIM_PAR_16_9;
+       dwAncillaryBits |= UAIM_FAR_16_BY_9_LETTERBOX_TOP;
+       break;
+    case CP_ASPECT_RATIO_GT_16_BY_9_CENTER:
+       dwAncillaryBits |= UAIM_PAR_16_9;
+       dwAncillaryBits |= UAIM_FAR_GT_16_BY_9_LETTERBOX_CENTER;
+       break;
+    case CP_ASPECT_RATIO_FF_4_BY_3_PROT_CENTER:
+       dwAncillaryBits |= UAIM_FAR_4_BY_3_SNP_14_BY_9_CENTER;
+       break;
+    case CP_ASPECT_RATIO_FF_16_BY_9_ANAMORPHIC:
+       dwAncillaryBits |= UAIM_PAR_16_9;
+       break;
+    default:
+       DRM_DEBUG("fail to set ancillary video info\n");
+       return FALSE;
+
+    }
+
+    /* Fill up the argument value */
+    byArgs[0] = (u8) ((dwAncillaryBits >> 0) & 0xFF);
+    byArgs[1] = (u8) ((dwAncillaryBits >> 8) & 0xFF);
+    byArgs[2] = (u8) ((dwAncillaryBits >> 16) & 0xFF);
+    byArgs[3] = (u8) ((dwAncillaryBits >> 24) & 0xFF);
+
+    /* Send the arguements & SDVO opcode to the h/w */
+
+    intel_sdvo_write_cmd(output, SDVO_CMD_SET_ANCILLARY_VIDEO_INFORMATION,
+                       byArgs, 4);
+    status = intel_sdvo_read_response(output, NULL, 0);
+
+    if (status != SDVO_CMD_STATUS_SUCCESS)
+       return FALSE;
+
+    return TRUE;
+
+}
+#endif
+static bool i830_tv_program_display_params(struct drm_output * output)
+
+{
+    u8 status;
+    u32 dwMaxVal = 0;
+    u32 dwDefaultVal = 0;
+    u32 dwCurrentVal = 0;
+
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+
+    /* X & Y Positions */
+
+    /* Horizontal postition */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_HORIZONTAL_POSITION) {
+       status =
+           i830_sdvo_get_max_horizontal_position(output, &dwMaxVal,
+                                                 &dwDefaultVal);
+
+       if (status) {
+           /*Tibet issue 1596943: After changing mode from 8x6 to 10x7 open CUI and press Restore Defaults */
+           /*Position changes. */
+
+           /* Tibet:1629992 : can't keep previous TV setting status if re-boot system after TV setting(screen position & size) of CUI */
+           /* Fix : compare whether current postion is greater than max value and then assign the default value. Earlier the check was */
+           /*       against the pAim->PositionX.Max value to dwMaxVal. When we boot the PositionX.Max value is 0 and so after every reboot, */
+           /*       position is set to default. */
+
+           if (sdvo_priv->dispParams.PositionX.Value > dwMaxVal)
+               sdvo_priv->dispParams.PositionX.Value = dwDefaultVal;
+
+           status =
+               i830_sdvo_set_horizontal_position(output,
+                                                 sdvo_priv->dispParams.PositionX.
+                                                 Value);
+
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.PositionX.Max = dwMaxVal;
+           sdvo_priv->dispParams.PositionX.Min = 0;
+           sdvo_priv->dispParams.PositionX.Default = dwDefaultVal;
+           sdvo_priv->dispParams.PositionX.Step = 1;
+       } else {
+           return status;
+       }
+    }
+
+    /* Vertical position */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_VERTICAL_POSITION) {
+       status =
+           i830_sdvo_get_max_vertical_position(output, &dwMaxVal,
+                                               &dwDefaultVal);
+
+       if (status) {
+
+           /*Tibet issue 1596943: After changing mode from 8x6 to 10x7 open CUI and press Restore Defaults */
+           /*Position changes. */
+           /*currently if we are out of range get back to default */
+
+           /* Tibet:1629992 : can't keep previous TV setting status if re-boot system after TV setting(screen position & size) of CUI */
+           /* Fix : compare whether current postion is greater than max value and then assign the default value. Earlier the check was */
+           /*       against the pAim->PositionY.Max  value to dwMaxVal. When we boot the PositionX.Max value is 0 and so after every reboot, */
+           /*       position is set to default. */
+
+           if (sdvo_priv->dispParams.PositionY.Value > dwMaxVal)
+               sdvo_priv->dispParams.PositionY.Value = dwDefaultVal;
+
+           status =
+               i830_sdvo_set_vertical_position(output,
+                                               sdvo_priv->dispParams.PositionY.
+                                               Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.PositionY.Max = dwMaxVal;
+           sdvo_priv->dispParams.PositionY.Min = 0;
+           sdvo_priv->dispParams.PositionY.Default = dwDefaultVal;
+           sdvo_priv->dispParams.PositionY.Step = 1;
+       } else {
+           return status;
+       }
+    }
+
+    /* Flicker Filter */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_FLICKER_FILTER) {
+       status =
+           i830_sdvo_get_max_flickerfilter(output, &dwMaxVal, &dwDefaultVal);
+
+       if (status) {
+           /*currently if we are out of range get back to default */
+           if (sdvo_priv->dispParams.FlickerFilter.Value > dwMaxVal)
+               sdvo_priv->dispParams.FlickerFilter.Value = dwDefaultVal;
+
+           status =
+               i830_sdvo_set_flickerilter(output,
+                                          sdvo_priv->dispParams.FlickerFilter.
+                                          Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.FlickerFilter.Max = dwMaxVal;
+           sdvo_priv->dispParams.FlickerFilter.Min = 0;
+           sdvo_priv->dispParams.FlickerFilter.Default = dwDefaultVal;
+           sdvo_priv->dispParams.FlickerFilter.Step = 1;
+       } else {
+           return status;
+       }
+    }
+
+    /* Brightness */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_BRIGHTNESS) {
+
+       status =
+           i830_sdvo_get_max_brightness(output, &dwMaxVal, &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.Brightness.Value > dwMaxVal)
+               sdvo_priv->dispParams.Brightness.Value = dwDefaultVal;
+
+           /* Program the device */
+           status =
+               i830_sdvo_set_brightness(output,
+                                        sdvo_priv->dispParams.Brightness.Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.Brightness.Max = dwMaxVal;
+           sdvo_priv->dispParams.Brightness.Min = 0;
+           sdvo_priv->dispParams.Brightness.Default = dwDefaultVal;
+           sdvo_priv->dispParams.Brightness.Step = 1;
+       } else {
+           return status;
+       }
+
+    }
+
+    /* Contrast */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_CONTRAST) {
+
+       status = i830_sdvo_get_max_contrast(output, &dwMaxVal, &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.Contrast.Value > dwMaxVal)
+               sdvo_priv->dispParams.Contrast.Value = dwDefaultVal;
+
+           /* Program the device */
+           status =
+               i830_sdvo_set_contrast(output,
+                                      sdvo_priv->dispParams.Contrast.Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.Contrast.Max = dwMaxVal;
+           sdvo_priv->dispParams.Contrast.Min = 0;
+           sdvo_priv->dispParams.Contrast.Default = dwDefaultVal;
+
+           sdvo_priv->dispParams.Contrast.Step = 1;
+
+       } else {
+           return status;
+       }
+    }
+
+    /* Sharpness */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_SHARPNESS) {
+
+       status =
+           i830_sdvo_get_max_sharpness(output, &dwMaxVal, &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.Sharpness.Value > dwMaxVal)
+               sdvo_priv->dispParams.Sharpness.Value = dwDefaultVal;
+
+           /* Program the device */
+           status =
+               i830_sdvo_set_sharpness(output,
+                                       sdvo_priv->dispParams.Sharpness.Value);
+           if (!status)
+               return status;
+           sdvo_priv->dispParams.Sharpness.Max = dwMaxVal;
+           sdvo_priv->dispParams.Sharpness.Min = 0;
+           sdvo_priv->dispParams.Sharpness.Default = dwDefaultVal;
+
+           sdvo_priv->dispParams.Sharpness.Step = 1;
+       } else {
+           return status;
+       }
+    }
+
+    /* Hue */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_HUE) {
+
+       status = i830_sdvo_get_max_hue(output, &dwMaxVal, &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.Hue.Value > dwMaxVal)
+               sdvo_priv->dispParams.Hue.Value = dwDefaultVal;
+
+           /* Program the device */
+           status = i830_sdvo_set_hue(output, sdvo_priv->dispParams.Hue.Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.Hue.Max = dwMaxVal;
+           sdvo_priv->dispParams.Hue.Min = 0;
+           sdvo_priv->dispParams.Hue.Default = dwDefaultVal;
+
+           sdvo_priv->dispParams.Hue.Step = 1;
+
+       } else {
+           return status;
+       }
+    }
+
+    /* Saturation */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_SATURATION) {
+       status =
+           i830_sdvo_get_max_saturation(output, &dwMaxVal, &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.Saturation.Value > dwMaxVal)
+               sdvo_priv->dispParams.Saturation.Value = dwDefaultVal;
+
+           /* Program the device */
+           status =
+               i830_sdvo_set_saturation(output,
+                                        sdvo_priv->dispParams.Saturation.Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.Saturation.Max = dwMaxVal;
+           sdvo_priv->dispParams.Saturation.Min = 0;
+           sdvo_priv->dispParams.Saturation.Default = dwDefaultVal;
+           sdvo_priv->dispParams.Saturation.Step = 1;
+       } else {
+           return status;
+       }
+
+    }
+
+    /* Adaptive Flicker filter */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_ADAPTIVE_FLICKER_FILTER) {
+       status =
+           i830_sdvo_get_max_adaptive_flickerfilter(output, &dwMaxVal,
+                                                    &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.AdaptiveFF.Value > dwMaxVal)
+               sdvo_priv->dispParams.AdaptiveFF.Value = dwDefaultVal;
+
+           status =
+               i830_sdvo_set_adaptive_flickerfilter(output,
+                                                    sdvo_priv->dispParams.
+                                                    AdaptiveFF.Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.AdaptiveFF.Max = dwMaxVal;
+           sdvo_priv->dispParams.AdaptiveFF.Min = 0;
+           sdvo_priv->dispParams.AdaptiveFF.Default = dwDefaultVal;
+           sdvo_priv->dispParams.AdaptiveFF.Step = 1;
+       } else {
+           return status;
+       }
+    }
+
+    /* 2D Flicker filter */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_2D_FLICKER_FILTER) {
+
+       status =
+           i830_sdvo_get_max_2D_flickerfilter(output, &dwMaxVal,
+                                              &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.TwoD_FlickerFilter.Value > dwMaxVal)
+               sdvo_priv->dispParams.TwoD_FlickerFilter.Value = dwDefaultVal;
+
+           status =
+               i830_sdvo_set_2D_flickerfilter(output,
+                                              sdvo_priv->dispParams.
+                                              TwoD_FlickerFilter.Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.TwoD_FlickerFilter.Max = dwMaxVal;
+           sdvo_priv->dispParams.TwoD_FlickerFilter.Min = 0;
+           sdvo_priv->dispParams.TwoD_FlickerFilter.Default = dwDefaultVal;
+           sdvo_priv->dispParams.TwoD_FlickerFilter.Step = 1;
+       } else {
+           return status;
+       }
+    }
+
+    /* Luma Filter */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_TV_MAX_LUMA_FILTER) {
+       status =
+           i830_sdvo_get_max_lumafilter(output, &dwMaxVal, &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.LumaFilter.Value > dwMaxVal)
+               sdvo_priv->dispParams.LumaFilter.Value = dwDefaultVal;
+
+           /* Program the device */
+           status =
+               i830_sdvo_set_lumafilter(output,
+                                        sdvo_priv->dispParams.LumaFilter.Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.LumaFilter.Max = dwMaxVal;
+           sdvo_priv->dispParams.LumaFilter.Min = 0;
+           sdvo_priv->dispParams.LumaFilter.Default = dwDefaultVal;
+           sdvo_priv->dispParams.LumaFilter.Step = 1;
+
+       } else {
+           return status;
+       }
+
+    }
+
+    /* Chroma Filter */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_MAX_TV_CHROMA_FILTER) {
+
+       status =
+           i830_sdvo_get_max_chromafilter(output, &dwMaxVal, &dwDefaultVal);
+
+       if (status) {
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+           if (sdvo_priv->dispParams.ChromaFilter.Value > dwMaxVal)
+               sdvo_priv->dispParams.ChromaFilter.Value = dwDefaultVal;
+
+           /* Program the device */
+           status =
+               i830_sdvo_set_chromafilter(output,
+                                          sdvo_priv->dispParams.ChromaFilter.
+                                          Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.ChromaFilter.Max = dwMaxVal;
+           sdvo_priv->dispParams.ChromaFilter.Min = 0;
+           sdvo_priv->dispParams.ChromaFilter.Default = dwDefaultVal;
+           sdvo_priv->dispParams.ChromaFilter.Step = 1;
+       } else {
+           return status;
+       }
+
+    }
+
+    /* Dot Crawl */
+    if (sdvo_priv->dwSupportedEnhancements & SDVO_DOT_CRAWL) {
+       status = i830_sdvo_get_dotcrawl(output, &dwCurrentVal, &dwDefaultVal);
+
+       if (status) {
+
+           dwMaxVal = 1;
+           /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
+           /*no need to check it. */
+
+           /* Tibet issue 1603772: Dot crawl do not persist after reboot/Hibernate */
+           /* Details : "Dotcrawl.value" is compared with "dwDefaultVal". Since */
+           /*            dwDefaultVal is always 0, dotCrawl value is always set to 0. */
+           /* Fix     : Compare the current dotCrawl value with dwMaxValue. */
+
+           if (sdvo_priv->dispParams.DotCrawl.Value > dwMaxVal)
+
+               sdvo_priv->dispParams.DotCrawl.Value = dwMaxVal;
+
+           status =
+               i830_sdvo_set_dotcrawl(output,
+                                      sdvo_priv->dispParams.DotCrawl.Value);
+           if (!status)
+               return status;
+
+           sdvo_priv->dispParams.DotCrawl.Max = dwMaxVal;
+           sdvo_priv->dispParams.DotCrawl.Min = 0;
+           sdvo_priv->dispParams.DotCrawl.Default = dwMaxVal;
+           sdvo_priv->dispParams.DotCrawl.Step = 1;
+       } else {
+           return status;
+       }
+    }
+
+    return TRUE;
+}
+
+static bool i830_tv_set_overscan_parameters(struct drm_output * output)
+{
+    u8 status;
+
+    u32 dwDefaultVal = 0;
+    u32 dwMaxVal = 0;
+    u32 dwPercentageValue = 0;
+    u32 dwDefOverscanXValue = 0;
+    u32 dwDefOverscanYValue = 0;
+    u32 dwOverscanValue = 0;
+    u32 dwSupportedEnhancements;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+
+    /* Get supported picture enhancements */
+    status =
+       i830_sdvo_get_supported_enhancements(output,
+                                            &dwSupportedEnhancements);
+    if (!status)
+       return status;
+
+    /* Horizontal Overscan */
+    if (dwSupportedEnhancements & SDVO_HORIZONTAL_OVERSCAN) {
+       status =
+           i830_sdvo_get_max_horizontal_overscan(output, &dwMaxVal,
+                                                 &dwDefaultVal);
+       if (!status)
+           return status;
+
+       /*Calculate the default value in terms of percentage */
+       dwDefOverscanXValue = ((dwDefaultVal * 100) / dwMaxVal);
+
+       /*Calculate the default value in 0-1000 range */
+       dwDefOverscanXValue = (dwDefOverscanXValue * 10);
+
+       /*Overscan is in the range of 0 to 10000 as per MS spec */
+       if (sdvo_priv->OverScanX.Value > MAX_VAL)
+           sdvo_priv->OverScanX.Value = dwDefOverscanXValue;
+
+       /*Calculate the percentage(0-100%) of the overscan value */
+       dwPercentageValue = (sdvo_priv->OverScanX.Value * 100) / 1000;
+
+       /* Now map the % value to absolute value to be programed to the encoder */
+       dwOverscanValue = (dwMaxVal * dwPercentageValue) / 100;
+
+       status = i830_sdvo_set_horizontal_overscan(output, dwOverscanValue);
+       if (!status)
+           return status;
+
+       sdvo_priv->OverScanX.Max = 1000;
+       sdvo_priv->OverScanX.Min = 0;
+       sdvo_priv->OverScanX.Default = dwDefOverscanXValue;
+       sdvo_priv->OverScanX.Step = 20;
+    }
+
+    /* Horizontal Overscan */
+    /* vertical Overscan */
+    if (dwSupportedEnhancements & SDVO_VERTICAL_OVERSCAN) {
+       status =
+           i830_sdvo_get_max_vertical_overscan(output, &dwMaxVal,
+                                               &dwDefaultVal);
+       if (!status)
+           return status;
+
+       /*Calculate the default value in terms of percentage */
+       dwDefOverscanYValue = ((dwDefaultVal * 100) / dwMaxVal);
+
+       /*Calculate the default value in 0-1000 range */
+       dwDefOverscanYValue = (dwDefOverscanYValue * 10);
+
+       /*Overscan is in the range of 0 to 10000 as per MS spec */
+       if (sdvo_priv->OverScanY.Value > MAX_VAL)
+           sdvo_priv->OverScanY.Value = dwDefOverscanYValue;
+
+       /*Calculate the percentage(0-100%) of the overscan value */
+       dwPercentageValue = (sdvo_priv->OverScanY.Value * 100) / 1000;
+
+       /* Now map the % value to absolute value to be programed to the encoder */
+       dwOverscanValue = (dwMaxVal * dwPercentageValue) / 100;
+
+       status = i830_sdvo_set_vertical_overscan(output, dwOverscanValue);
+       if (!status)
+           return status;
+
+       sdvo_priv->OverScanY.Max = 1000;
+       sdvo_priv->OverScanY.Min = 0;
+       sdvo_priv->OverScanY.Default = dwDefOverscanYValue;
+       sdvo_priv->OverScanY.Step = 20;
+
+    }
+    /* vertical Overscan */
+    return TRUE;
+}
+
+static bool i830_translate_dtd2timing(struct drm_display_mode * pTimingInfo,
+                         struct intel_sdvo_dtd *pDTD)
+{
+
+    u32 dwHBLHigh = 0;
+    u32 dwVBLHigh = 0;
+    u32 dwHSHigh1 = 0;
+    u32 dwHSHigh2 = 0;
+    u32 dwVSHigh1 = 0;
+    u32 dwVSHigh2 = 0;
+    u32 dwVPWLow = 0;
+    bool status = FALSE;
+
+    if ((pDTD == NULL) || (pTimingInfo == NULL)) {
+       return status;
+    }
+
+    pTimingInfo->clock= pDTD->part1.clock * 10000 / 1000;      /*fix me if i am wrong */
+
+    pTimingInfo->hdisplay = pTimingInfo->crtc_hdisplay =
+       (u32) pDTD->part1.
+       h_active | ((u32) (pDTD->part1.h_high & 0xF0) << 4);
+
+    pTimingInfo->vdisplay = pTimingInfo->crtc_vdisplay =
+       (u32) pDTD->part1.
+       v_active | ((u32) (pDTD->part1.v_high & 0xF0) << 4);
+
+    pTimingInfo->crtc_hblank_start = pTimingInfo->crtc_hdisplay;
+
+    /* Horizontal Total = Horizontal Active + Horizontal Blanking */
+    dwHBLHigh = (u32) (pDTD->part1.h_high & 0x0F);
+    pTimingInfo->htotal = pTimingInfo->crtc_htotal =
+       pTimingInfo->crtc_hdisplay + (u32) pDTD->part1.h_blank +
+       (dwHBLHigh << 8);
+
+    pTimingInfo->crtc_hblank_end = pTimingInfo->crtc_htotal - 1;
+
+    /* Vertical Total = Vertical Active + Vertical Blanking */
+    dwVBLHigh = (u32) (pDTD->part1.v_high & 0x0F);
+    pTimingInfo->vtotal = pTimingInfo->crtc_vtotal =
+       pTimingInfo->crtc_vdisplay + (u32) pDTD->part1.v_blank +
+       (dwVBLHigh << 8);
+    pTimingInfo->crtc_vblank_start = pTimingInfo->crtc_vdisplay;
+    pTimingInfo->crtc_vblank_end = pTimingInfo->crtc_vtotal - 1;
+
+    /* Horz Sync Start = Horz Blank Start + Horz Sync Offset */
+    dwHSHigh1 = (u32) (pDTD->part2.sync_off_width_high & 0xC0);
+    pTimingInfo->hsync_start = pTimingInfo->crtc_hsync_start =
+       pTimingInfo->crtc_hblank_start + (u32) pDTD->part2.h_sync_off +
+       (dwHSHigh1 << 2);
+
+    /* Horz Sync End = Horz Sync Start + Horz Sync Pulse Width */
+    dwHSHigh2 = (u32) (pDTD->part2.sync_off_width_high & 0x30);
+    pTimingInfo->hsync_end = pTimingInfo->crtc_hsync_end =
+       pTimingInfo->crtc_hsync_start + (u32) pDTD->part2.h_sync_width +
+       (dwHSHigh2 << 4) - 1;
+
+    /* Vert Sync Start = Vert Blank Start + Vert Sync Offset */
+    dwVSHigh1 = (u32) (pDTD->part2.sync_off_width_high & 0x0C);
+    dwVPWLow = (u32) (pDTD->part2.v_sync_off_width & 0xF0);
+
+    pTimingInfo->vsync_start = pTimingInfo->crtc_vsync_start =
+       pTimingInfo->crtc_vblank_start + (dwVPWLow >> 4) + (dwVSHigh1 << 2);
+
+    /* Vert Sync End = Vert Sync Start + Vert Sync Pulse Width */
+    dwVSHigh2 = (u32) (pDTD->part2.sync_off_width_high & 0x03);
+    pTimingInfo->vsync_end = pTimingInfo->crtc_vsync_end =
+       pTimingInfo->crtc_vsync_start +
+       (u32) (pDTD->part2.v_sync_off_width & 0x0F) + (dwVSHigh2 << 4) - 1;
+
+    /* Fillup flags */
+    status = TRUE;
+
+    return status;
+}
+
+static void i830_translate_timing2dtd(struct drm_display_mode * mode, struct intel_sdvo_dtd *dtd)
+{
+    u16 width, height;
+    u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+    u16 h_sync_offset, v_sync_offset;
+
+    width = mode->crtc_hdisplay;
+    height = mode->crtc_vdisplay;
+
+    /* do some mode translations */
+    h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+    h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+    v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+    v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+    h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+    v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+    dtd->part1.clock = mode->clock * 1000 / 10000;     /*xiaolin, fixme, do i need to by 1k hz */
+    dtd->part1.h_active = width & 0xff;
+    dtd->part1.h_blank = h_blank_len & 0xff;
+    dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+       ((h_blank_len >> 8) & 0xf);
+    dtd->part1.v_active = height & 0xff;
+    dtd->part1.v_blank = v_blank_len & 0xff;
+    dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+       ((v_blank_len >> 8) & 0xf);
+
+    dtd->part2.h_sync_off = h_sync_offset;
+    dtd->part2.h_sync_width = h_sync_len & 0xff;
+    dtd->part2.v_sync_off_width = ((v_sync_offset & 0xf) << 4 |
+                                  (v_sync_len & 0xf)) + 1;
+    dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+       ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+       ((v_sync_len & 0x30) >> 4);
+
+    dtd->part2.dtd_flags = 0x18;
+    if (mode->flags & V_PHSYNC)
+       dtd->part2.dtd_flags |= 0x2;
+    if (mode->flags & V_PVSYNC)
+       dtd->part2.dtd_flags |= 0x4;
+
+    dtd->part2.sdvo_flags = 0;
+    dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+    dtd->part2.reserved = 0;
+
+}
+
+static bool i830_tv_set_target_io(struct drm_output* output)
+{
+    bool status;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+    status = intel_sdvo_set_target_input(output, TRUE, FALSE);
+    if (status)
+       status = intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
+
+    return status;
+}
+
+static bool i830_tv_get_max_min_dotclock(struct drm_output* output)
+{
+    u32 dwMaxClkRateMul = 1;
+    u32 dwMinClkRateMul = 1;
+    u8 status;
+
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+
+    /* Set Target Input/Outputs */
+    status = i830_tv_set_target_io(output);
+    if (!status) {
+       DRM_DEBUG("SetTargetIO function FAILED!!! \n");
+       return status;
+    }
+
+    /* Get the clock rate multiplies supported by the encoder */
+    dwMinClkRateMul = 1;
+#if 0
+    /* why we need do this, some time, tv can't bring up for the wrong setting in the last time */
+    dwClkRateMulMask = i830_sdvo_get_clock_rate_mult(output);
+
+    /* Find the minimum clock rate multiplier supported */
+
+    if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_1X)
+       dwMinClkRateMul = 1;
+    else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_2X)
+       dwMinClkRateMul = 2;
+    else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_3X)
+       dwMinClkRateMul = 3;
+    else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_4X)
+       dwMinClkRateMul = 4;
+    else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_5X)
+       dwMinClkRateMul = 5;
+    else
+       return FALSE;
+#endif
+    /* Get the min and max input Dot Clock supported by the encoder */
+    status = i830_sdvo_get_input_output_pixelclock_range(output, FALSE);       /* input */
+
+    if (!status) {
+       DRM_DEBUG("SDVOGetInputPixelClockRange() FAILED!!! \n");
+       return status;
+    }
+
+    /* Get the min and max output Dot Clock supported by the encoder */
+    status = i830_sdvo_get_input_output_pixelclock_range(output, TRUE);        /* output */
+
+    if (!status) {
+       DRM_DEBUG("SDVOGetOutputPixelClockRange() FAILED!!! \n");
+       return status;
+    }
+
+    /* Maximum Dot Clock supported should be the minimum of the maximum */
+    /* dot clock supported by the encoder & the SDVO bus clock rate */
+    sdvo_priv->dwMaxDotClk =
+       ((sdvo_priv->dwMaxInDotClk * dwMaxClkRateMul) <
+        (sdvo_priv->dwMaxOutDotClk)) ? (sdvo_priv->dwMaxInDotClk *
+                                    dwMaxClkRateMul) : (sdvo_priv->dwMaxOutDotClk);
+
+    /* Minimum Dot Clock supported should be the maximum of the minimum */
+    /* dot clocks supported by the input & output */
+    sdvo_priv->dwMinDotClk =
+       ((sdvo_priv->dwMinInDotClk * dwMinClkRateMul) >
+        (sdvo_priv->dwMinOutDotClk)) ? (sdvo_priv->dwMinInDotClk *
+                                    dwMinClkRateMul) : (sdvo_priv->dwMinOutDotClk);
+
+    DRM_DEBUG("leave, i830_tv_get_max_min_dotclock() !!! \n");
+
+    return TRUE;
+
+}
+
+bool i830_tv_mode_check_support(struct drm_output* output, struct drm_display_mode* pMode)
+{
+    u32 dwDotClk = 0;
+    bool status;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+
+    dwDotClk = pMode->clock * 1000;
+
+    /*TODO:  Need to fix this from SoftBios side........ */
+    if (sdvo_priv->TVMode == TVMODE_HDTV) {
+       if (((pMode->hdisplay == 1920) && (pMode->vdisplay== 1080)) ||
+           ((pMode->hdisplay== 1864) && (pMode->vdisplay== 1050)) ||
+           ((pMode->hdisplay== 1704) && (pMode->vdisplay== 960)) ||
+           ((pMode->hdisplay== 640) && (pMode->vdisplay== 448)))
+           return true;
+    }
+
+    if (sdvo_priv->bGetClk) {
+       status = i830_tv_get_max_min_dotclock(output);
+       if (!status) {
+           DRM_DEBUG("get max min dotclok failed\n");
+           return status;
+       }
+       sdvo_priv->bGetClk = false;
+    }
+
+    /* Check the Dot clock first. If the requested Dot Clock should fall */
+    /* in the supported range for the mode to be supported */
+    if ((dwDotClk <= sdvo_priv->dwMinDotClk) || (dwDotClk >= sdvo_priv->dwMaxDotClk)) {
+       DRM_DEBUG("dwDotClk value is out of range\n");
+       /*TODO: now consider VBT add and Remove mode. */
+       /* This mode can't be supported */
+       return false;
+    }
+    DRM_DEBUG("i830_tv_mode_check_support leave\n");
+    return true;
+
+}
+
+void print_Pll(char *prefix, ex_intel_clock_t * clock)
+{
+    DRM_DEBUG("%s: dotclock %d vco %d ((m %d, m1 %d, m2 %d), n %d, (p %d, p1 %d, p2 %d))\n",
+             prefix, clock->dot, clock->vco, clock->m, clock->m1, clock->m2,
+             clock->n, clock->p, clock->p1, clock->p2);
+}
+
+extern int intel_panel_fitter_pipe (struct drm_device *dev);
+extern int intel_get_core_clock_speed(struct drm_device *dev);
+
+void i830_sdvo_tv_settiming(struct drm_crtc *crtc, struct drm_display_mode * mode,
+                      struct drm_display_mode * adjusted_mode)
+{
+
+       struct drm_device *dev = crtc->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+    int pipe = 0;
+    int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+    int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+    int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+    int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+    int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+    int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+    int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+    int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+    int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+    int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+    int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+    int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+    int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+    ex_intel_clock_t clock;
+    u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+    bool ok, is_sdvo = FALSE;
+    int centerX = 0, centerY = 0;
+    u32 ulPortMultiplier, ulTemp, ulDotClock;
+    int sdvo_pixel_multiply;
+       u32 dotclock;
+
+    /* Set up some convenient bools for what outputs are connected to
+     * our pipe, used in DPLL setup.
+     */
+       if (!crtc->fb) {
+               DRM_ERROR("Can't set mode without attached fb\n");
+               return;
+       }     
+    is_sdvo = TRUE;
+    ok = TRUE;
+    ulDotClock = mode->clock * 1000 / 1000;    /*xiaolin, fixme, do i need to by 1k hz */
+    for (ulPortMultiplier = 1; ulPortMultiplier <= 5; ulPortMultiplier++) {
+       ulTemp = ulDotClock * ulPortMultiplier;
+       if ((ulTemp >= 100000) && (ulTemp <= 200000)) {
+           if ((ulPortMultiplier == 3) || (ulPortMultiplier == 5))
+               continue;
+           else
+               break;
+       }
+    }
+    /* ulPortMultiplier is 2, dotclok is 1babc, fall into the first one case */
+    /* add two to each m and n value -- optimizes (slightly) the search algo. */
+    dotclock = ulPortMultiplier * (mode->clock * 1000) / 1000;
+       DRM_DEBUG("mode->clock is %x, dotclock is %x,!\n", mode->clock,dotclock);
+
+    if ((dotclock >= 100000) && (dotclock < 140500)) {
+       DRM_DEBUG("dotclock is between 10000 and 140500!\n");
+       clock.p1 = 0x2;
+       clock.p2 = 0x00;
+       clock.n = 0x3;
+       clock.m1 = 0x10;
+       clock.m2 = 0x8;
+    } else if ((dotclock >= 140500) && (dotclock <= 200000)) {
+    
+       DRM_DEBUG("dotclock is between 140500 and 200000!\n");
+       clock.p1 = 0x1;
+       /*CG was using 0x10 from spreadsheet it should be 0 */
+       /*pClock_Data->Clk_P2 = 0x10; */
+       clock.p2 = 0x00;
+       clock.n = 0x6;
+       clock.m1 = 0xC;
+       clock.m2 = 0x8;
+    } else
+       ok = FALSE;
+
+    if (!ok)
+       DRM_DEBUG("Couldn't find PLL settings for mode!\n");
+
+    fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+    dpll = DPLL_VGA_MODE_DIS | DPLL_CLOCK_PHASE_9;
+
+    dpll |= DPLLB_MODE_DAC_SERIAL;
+
+    sdvo_pixel_multiply = ulPortMultiplier;
+    dpll |= DPLL_DVO_HIGH_SPEED;
+    dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+
+    /* compute bitmask from p1 value */
+    dpll |= (clock.p1 << 16);
+    dpll |= (clock.p2 << 24);
+
+    dpll |= PLL_REF_INPUT_TVCLKINBC;
+
+    /* Set up the display plane register */
+    dspcntr = DISPPLANE_GAMMA_ENABLE;
+    switch (crtc->fb->bits_per_pixel) {
+    case 8:
+       dspcntr |= DISPPLANE_8BPP;
+       break;
+    case 16:
+       if (crtc->fb->depth == 15)
+           dspcntr |= DISPPLANE_15_16BPP;
+       else
+           dspcntr |= DISPPLANE_16BPP;
+       break;
+    case 32:
+       dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+       break;
+    default:
+       DRM_DEBUG("unknown display bpp\n");
+    }
+
+    if (pipe == 0)
+       dspcntr |= DISPPLANE_SEL_PIPE_A;
+    else
+       dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+    pipeconf = I915_READ(pipeconf_reg);
+    if (pipe == 0) {
+       /* Enable pixel doubling when the dot clock is > 90% of the (display)
+        * core speed.
+        *
+        * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+        * pipe == 0 check?
+        */
+       if (mode->clock * 1000 > (intel_get_core_clock_speed(dev)) * 9 / 10)    /*xiaolin, fixme, do i need to by 1k hz */
+          { pipeconf |= PIPEACONF_DOUBLE_WIDE; DRM_DEBUG("PIPEACONF_DOUBLE_WIDE\n");}
+       else
+          { pipeconf &= ~PIPEACONF_DOUBLE_WIDE; DRM_DEBUG("non PIPEACONF_DOUBLE_WIDE\n");}
+    }
+       
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+    pipeconf |= PIPEACONF_ENABLE;
+    dpll |= DPLL_VCO_ENABLE;
+
+    /* Disable the panel fitter if it was on our pipe */
+    if (intel_panel_fitter_pipe(dev) == pipe)
+       I915_WRITE(PFIT_CONTROL, 0);
+
+    print_Pll("chosen", &clock);
+       DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+       drm_mode_debug_printmodeline(dev, mode);        
+       DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d\n",
+                 mode->mode_id, mode->name, mode->crtc_htotal, mode->crtc_hdisplay,
+                 mode->crtc_hblank_end, mode->crtc_hblank_start,
+                 mode->crtc_vtotal, mode->crtc_vdisplay,
+                 mode->crtc_vblank_end, mode->crtc_vblank_start);      
+    DRM_DEBUG("clock regs: 0x%08x, 0x%08x,dspntr is 0x%8x, pipeconf is 0x%8x\n", (int)dpll,
+             (int)fp,(int)dspcntr,(int)pipeconf);
+
+    if (dpll & DPLL_VCO_ENABLE) {
+       I915_WRITE(fp_reg, fp);
+       I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+       (void)I915_READ(dpll_reg);
+       udelay(150);
+    }
+    I915_WRITE(fp_reg, fp);
+    I915_WRITE(dpll_reg, dpll);
+    (void)I915_READ(dpll_reg);
+    /* Wait for the clocks to stabilize. */
+    udelay(150);
+
+       /* write it again -- the BIOS does, after all */
+       I915_WRITE(dpll_reg, dpll);
+       I915_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+    I915_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+               ((mode->crtc_htotal - 1) << 16));
+    I915_WRITE(hblank_reg, (mode->crtc_hblank_start - 1) |
+               ((mode->crtc_hblank_end - 1) << 16));
+    I915_WRITE(hsync_reg, (mode->crtc_hsync_start - 1) |
+               ((mode->crtc_hsync_end - 1) << 16));
+    I915_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+               ((mode->crtc_vtotal - 1) << 16));
+    I915_WRITE(vblank_reg, (mode->crtc_vblank_start - 1) |
+               ((mode->crtc_vblank_end - 1) << 16));
+    I915_WRITE(vsync_reg, (mode->crtc_vsync_start - 1) |
+               ((mode->crtc_vsync_end - 1) << 16));
+       I915_WRITE(dspstride_reg, crtc->fb->pitch);
+
+    if (0) {
+
+       centerX = (adjusted_mode->crtc_hdisplay - mode->hdisplay) / 2;
+       centerY = (adjusted_mode->crtc_vdisplay - mode->vdisplay) / 2;
+       I915_WRITE(dspsize_reg,
+                   ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+
+       I915_WRITE(dsppos_reg, centerY << 16 | centerX);
+       I915_WRITE(pipesrc_reg,
+                   ((adjusted_mode->crtc_hdisplay -
+                     1) << 16) | (adjusted_mode->crtc_vdisplay - 1));
+    } else {
+       /* pipesrc and dspsize control the size that is scaled from, which should
+        * always be the user's requested size.
+        */
+       I915_WRITE(dspsize_reg,
+                   ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       I915_WRITE(dsppos_reg, 0);
+       I915_WRITE(pipesrc_reg,
+                   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+
+    }
+    I915_WRITE(pipeconf_reg, pipeconf);
+    I915_READ(pipeconf_reg);
+       
+    intel_wait_for_vblank(dev);
+
+    I915_WRITE(dspcntr_reg, dspcntr);
+       /* Flush the plane changes */
+       //intel_pipe_set_base(crtc, 0, 0);
+       /* Disable the VGA plane that we never use */
+       //I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);       
+    //intel_wait_for_vblank(dev);
+
+}
+
+static void intel_sdvo_mode_set(struct drm_output *output,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = output->crtc;
+       struct intel_crtc *intel_crtc = crtc->driver_private;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+       u32 sdvox;
+       struct intel_sdvo_dtd output_dtd;
+       int sdvo_pixel_multiply;
+       bool success;
+       struct drm_display_mode * save_mode;
+       DRM_DEBUG("xxintel_sdvo_mode_set\n");
+
+       if (!mode)
+               return;
+
+    if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
+       if (!i830_tv_mode_check_support(output, mode)) {
+           DRM_DEBUG("mode setting failed, use the forced mode\n");
+           mode = &tv_modes[0].mode_entry;
+               drm_mode_set_crtcinfo(mode, 0);
+       }
+    }  
+    save_mode = mode;
+#if 0
+       width = mode->crtc_hdisplay;
+       height = mode->crtc_vdisplay;
+
+       /* do some mode translations */
+       h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+       h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+       v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+       v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+       h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+       v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+       output_dtd.part1.clock = mode->clock / 10;
+       output_dtd.part1.h_active = width & 0xff;
+       output_dtd.part1.h_blank = h_blank_len & 0xff;
+       output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
+               ((h_blank_len >> 8) & 0xf);
+       output_dtd.part1.v_active = height & 0xff;
+       output_dtd.part1.v_blank = v_blank_len & 0xff;
+       output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
+               ((v_blank_len >> 8) & 0xf);
+       
+       output_dtd.part2.h_sync_off = h_sync_offset;
+       output_dtd.part2.h_sync_width = h_sync_len & 0xff;
+       output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+               (v_sync_len & 0xf);
+       output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+               ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+               ((v_sync_len & 0x30) >> 4);
+       
+       output_dtd.part2.dtd_flags = 0x18;
+       if (mode->flags & V_PHSYNC)
+               output_dtd.part2.dtd_flags |= 0x2;
+       if (mode->flags & V_PVSYNC)
+               output_dtd.part2.dtd_flags |= 0x4;
+
+       output_dtd.part2.sdvo_flags = 0;
+       output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
+       output_dtd.part2.reserved = 0;
+#else
+    /* disable and enable the display output */
+    intel_sdvo_set_target_output(output, 0);
+
+    //intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);
+    memset(&output_dtd, 0, sizeof(struct intel_sdvo_dtd));
+    /* check if this mode can be supported or not */
+       
+    i830_translate_timing2dtd(mode, &output_dtd);
+#endif 
+    intel_sdvo_set_target_output(output, 0);
+    /* set the target input & output first */
+       /* Set the input timing to the screen. Assume always input 0. */
+       intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
+       intel_sdvo_set_output_timing(output, &output_dtd);
+       intel_sdvo_set_target_input(output, true, false);
+
+    if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
+       i830_tv_set_overscan_parameters(output);
+       /* Set TV standard */
+       #if 0
+       if (sdvo_priv->TVMode == TVMODE_HDTV)
+           i830_sdvo_map_hdtvstd_bitmask(output);
+       else
+           i830_sdvo_map_sdtvstd_bitmask(output);
+       #endif
+       /* Set TV format */
+       i830_sdvo_set_tvoutputs_formats(output);
+       /* We would like to use i830_sdvo_create_preferred_input_timing() to
+        * provide the device with a timing it can support, if it supports that
+        * feature.  However, presumably we would need to adjust the CRTC to output
+        * the preferred timing, and we don't support that currently.
+        */
+       success = i830_sdvo_create_preferred_input_timing(output, mode);
+       if (success) {
+           i830_sdvo_get_preferred_input_timing(output, &output_dtd);
+       }
+       /* Set the overscan values now as input timing is dependent on overscan values */
+
+    }
+       
+
+       /* We would like to use i830_sdvo_create_preferred_input_timing() to
+        * provide the device with a timing it can support, if it supports that
+        * feature.  However, presumably we would need to adjust the CRTC to
+        * output the preferred timing, and we don't support that currently.
+        */
+#if 0
+       success = intel_sdvo_create_preferred_input_timing(output, clock,
+                                                          width, height);
+       if (success) {
+               struct intel_sdvo_dtd *input_dtd;
+               
+               intel_sdvo_get_preferred_input_timing(output, &input_dtd);
+               intel_sdvo_set_input_timing(output, &input_dtd);
+       }
+#else
+    /* Set input timing (in DTD) */
+       intel_sdvo_set_input_timing(output, &output_dtd);
+#endif 
+    if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
+       
+       DRM_DEBUG("xxintel_sdvo_mode_set tv path\n");
+       i830_tv_program_display_params(output);
+       /* translate dtd 2 timing */
+       i830_translate_dtd2timing(mode, &output_dtd);
+       /* Program clock rate multiplier, 2x,clock is = 0x360b730 */
+       if ((mode->clock * 1000 >= 24000000)
+           && (mode->clock * 1000 < 50000000)) {
+           intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_4X);
+       } else if ((mode->clock * 1000 >= 50000000)
+                  && (mode->clock * 1000 < 100000000)) {
+           intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_2X);
+       } else if ((mode->clock * 1000 >= 100000000)
+                  && (mode->clock * 1000 < 200000000)) {
+           intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_1X);
+       } else
+           DRM_DEBUG("i830_sdvo_set_clock_rate is failed\n");
+
+       i830_sdvo_tv_settiming(output->crtc, mode, adjusted_mode);
+       //intel_crtc_mode_set(output->crtc, mode,adjusted_mode,0,0);
+       mode = save_mode;
+    } else {
+       DRM_DEBUG("xxintel_sdvo_mode_set - non tv path\n");
+       switch (intel_sdvo_get_pixel_multiplier(mode)) {
+       case 1:
+               intel_sdvo_set_clock_rate_mult(output,
+                                              SDVO_CLOCK_RATE_MULT_1X);
+               break;
+       case 2:
+               intel_sdvo_set_clock_rate_mult(output,
+                                              SDVO_CLOCK_RATE_MULT_2X);
+               break;
+       case 4:
+               intel_sdvo_set_clock_rate_mult(output,
+                                              SDVO_CLOCK_RATE_MULT_4X);
+               break;
+       }       
+    }
+       /* Set the SDVO control regs. */
+        if (0/*IS_I965GM(dev)*/) {
+                sdvox = SDVO_BORDER_ENABLE;
+        } else {
+                sdvox = I915_READ(sdvo_priv->output_device);
+                switch (sdvo_priv->output_device) {
+                case SDVOB:
+                        sdvox &= SDVOB_PRESERVE_MASK;
+                        break;
+                case SDVOC:
+                        sdvox &= SDVOC_PRESERVE_MASK;
+                        break;
+                }
+                sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+        }
+       if (intel_crtc->pipe == 1)
+               sdvox |= SDVO_PIPE_B_SELECT;
+
+       sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
+       if (IS_I965G(dev)) {
+               /* done in crtc_mode_set as the dpll_md reg must be written 
+                  early */
+       } else if (IS_POULSBO(dev) || IS_I945G(dev) || IS_I945GM(dev)) {
+               /* done in crtc_mode_set as it lives inside the 
+                  dpll register */
+       } else {
+               sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+       }
+
+       intel_sdvo_write_sdvox(output, sdvox);
+       i830_sdvo_set_iomap(output);    
+}
+
+static void intel_sdvo_dpms(struct drm_output *output, int mode)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       u32 temp;
+
+       DRM_DEBUG("xxintel_sdvo_dpms, dpms mode is %d, active output is %d\n",mode,sdvo_priv->active_outputs);
+
+#ifdef SII_1392_WA
+       if((SII_1392==1) && (drm_psb_no_fb ==1)) {
+               DRM_DEBUG("don't touch 1392 card when no_fb=1\n");
+               return;
+       }
+#endif
+
+       if (mode != DPMSModeOn) {
+               intel_sdvo_set_active_outputs(output, sdvo_priv->output_device);
+               if (0)
+                       intel_sdvo_set_encoder_power_state(output, mode);
+
+               if (mode == DPMSModeOff) {
+                       temp = I915_READ(sdvo_priv->output_device);
+                       if ((temp & SDVO_ENABLE) != 0) {
+                               intel_sdvo_write_sdvox(output, temp & ~SDVO_ENABLE);
+                       }
+               }
+       } else {
+               bool input1, input2;
+               int i;
+               u8 status;
+               
+               temp = I915_READ(sdvo_priv->output_device);
+               if ((temp & SDVO_ENABLE) == 0)
+                       intel_sdvo_write_sdvox(output, temp | SDVO_ENABLE);
+               for (i = 0; i < 2; i++)
+                 intel_wait_for_vblank(dev);
+               
+               status = intel_sdvo_get_trained_inputs(output, &input1,
+                                                      &input2);
+
+               
+               /* Warn if the device reported failure to sync. 
+                * A lot of SDVO devices fail to notify of sync, but it's
+                * a given it the status is a success, we succeeded.
+                */
+               if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+                       DRM_DEBUG("First %s output reported failure to sync\n",
+                                  SDVO_NAME(sdvo_priv));
+               }
+               
+               if (0)
+                       intel_sdvo_set_encoder_power_state(output, mode);
+               
+               DRM_DEBUG("xiaolin active output is %d\n",sdvo_priv->active_outputs);
+               intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);
+       }       
+       return;
+}
+
+static void intel_sdvo_save(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+       DRM_DEBUG("xxintel_sdvo_save\n");
+
+       sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(output);
+       intel_sdvo_get_active_outputs(output, &sdvo_priv->save_active_outputs);
+
+       if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
+               intel_sdvo_set_target_input(output, true, false);
+               intel_sdvo_get_input_timing(output,
+                                           &sdvo_priv->save_input_dtd_1);
+       }
+
+       if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
+               intel_sdvo_set_target_input(output, false, true);
+               intel_sdvo_get_input_timing(output,
+                                           &sdvo_priv->save_input_dtd_2);
+       }
+
+    intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
+    intel_sdvo_get_output_timing(output,
+                               &sdvo_priv->save_output_dtd[sdvo_priv->active_outputs]);        
+       sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
+}
+
+static void intel_sdvo_restore(struct drm_output *output)
+{
+       struct drm_device *dev = output->dev;
+       DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       int i;
+       bool input1, input2;
+       u8 status;
+       DRM_DEBUG("xxintel_sdvo_restore\n");
+
+       intel_sdvo_set_active_outputs(output, 0);
+
+    intel_sdvo_set_target_output(output, sdvo_priv->save_active_outputs);
+    intel_sdvo_set_output_timing(output,
+                               &sdvo_priv->save_output_dtd[sdvo_priv->save_active_outputs]);
+       if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
+               intel_sdvo_set_target_input(output, true, false);
+               intel_sdvo_set_input_timing(output, &sdvo_priv->save_input_dtd_1);
+       }
+
+       if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
+               intel_sdvo_set_target_input(output, false, true);
+               intel_sdvo_set_input_timing(output, &sdvo_priv->save_input_dtd_2);
+       }
+       
+       intel_sdvo_set_clock_rate_mult(output, sdvo_priv->save_sdvo_mult);
+       
+       I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
+       
+       if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
+       {
+               for (i = 0; i < 2; i++)
+                       intel_wait_for_vblank(dev);
+               status = intel_sdvo_get_trained_inputs(output, &input1, &input2);
+               if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
+                       DRM_DEBUG("First %s output reported failure to sync\n",
+                                  SDVO_NAME(sdvo_priv));
+       }
+       
+    i830_sdvo_set_iomap(output);       
+       intel_sdvo_set_active_outputs(output, sdvo_priv->save_active_outputs);
+}
+
+static bool i830_tv_mode_find(struct drm_output * output,struct drm_display_mode * pMode)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+    bool find = FALSE;
+    int i;
+       
+    DRM_DEBUG("i830_tv_mode_find,0x%x\n", sdvo_priv->TVStandard);
+       
+    for (i = 0; i < NUM_TV_MODES; i++) 
+    {
+       const tv_mode_t *tv_mode = &tv_modes[i];
+       if (strcmp (tv_mode->mode_entry.name, pMode->name) == 0
+                           && (pMode->type & M_T_TV)) {
+           find = TRUE;
+           break;
+       }
+    }
+    return find;
+}
+
+
+static int intel_sdvo_mode_valid(struct drm_output *output,
+                                struct drm_display_mode *mode)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       
+       bool status = TRUE;
+       DRM_DEBUG("xxintel_sdvo_mode_valid\n");
+
+       if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
+          status = i830_tv_mode_check_support(output, mode);
+          if (status) {
+                  if(i830_tv_mode_find(output,mode)) {
+                          DRM_DEBUG("%s is ok\n", mode->name);
+                          return MODE_OK;
+                  }
+                  else
+                          return MODE_CLOCK_RANGE;
+          } else {
+                  DRM_DEBUG("%s is failed\n",
+                                mode->name);
+                  return MODE_CLOCK_RANGE;
+          }
+       }
+
+       if (mode->flags & V_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       if (sdvo_priv->pixel_clock_min > mode->clock)
+               return MODE_CLOCK_LOW;
+
+       if (sdvo_priv->pixel_clock_max < mode->clock)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
+static bool intel_sdvo_get_capabilities(struct drm_output *output, struct intel_sdvo_caps *caps)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
+       status = intel_sdvo_read_response(output, caps, sizeof(*caps));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+
+void i830_tv_get_default_params(struct drm_output * output)
+{
+    u32 dwSupportedSDTVBitMask = 0;
+    u32 dwSupportedHDTVBitMask = 0;
+       u32 dwTVStdBitmask = 0;
+
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;     
+
+
+    /* Get supported TV Standard */
+    i830_sdvo_get_supported_tvoutput_formats(output, &dwSupportedSDTVBitMask,
+                                            &dwSupportedHDTVBitMask,&dwTVStdBitmask);
+
+    sdvo_priv->dwSDVOSDTVBitMask = dwSupportedSDTVBitMask;
+    sdvo_priv->dwSDVOHDTVBitMask = dwSupportedHDTVBitMask;
+       sdvo_priv->TVStdBitmask = dwTVStdBitmask;
+
+}
+
+static enum drm_output_status intel_sdvo_detect(struct drm_output *output)
+{
+       u8 response[2];
+       u8 status;
+       u8 count = 5;
+
+    char deviceName[256];
+    char *name_suffix;
+       char *name_prefix;
+       unsigned char bytes[2];
+        
+       struct drm_device *dev = output->dev;
+       
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;     
+       
+       DRM_DEBUG("xxintel_sdvo_detect\n");
+    intel_sdvo_dpms(output, DPMSModeOn);
+    if (!intel_sdvo_get_capabilities(output, &sdvo_priv->caps)) {
+        /*No SDVO support, power down the pipe */
+        intel_sdvo_dpms(output, DPMSModeOff);
+        return output_status_disconnected;
+    }
+
+#ifdef SII_1392_WA
+       if ((sdvo_priv->caps.vendor_id == 0x04) && (sdvo_priv->caps.device_id==0xAE)){
+           /*Leave the control of 1392 to X server*/
+               SII_1392=1;
+               printk("%s: detect 1392 card, leave the setting to up level\n", __FUNCTION__);
+               if (drm_psb_no_fb == 0)
+                       intel_sdvo_dpms(output, DPMSModeOff);
+               return output_status_disconnected;
+       }
+#endif
+    while (count--) {
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+       status = intel_sdvo_read_response(output, &response, 2);
+
+       if(count >3 && status == SDVO_CMD_STATUS_PENDING) {
+               intel_sdvo_write_cmd(output,SDVO_CMD_RESET,NULL,0);
+               intel_sdvo_read_response(output, &response, 2);
+               continue;
+       }
+
+       if ((status != SDVO_CMD_STATUS_SUCCESS) || (response[0] == 0 && response[1] == 0)) {
+           udelay(500);
+           continue;           
+    } else
+       break;
+    }  
+    if (response[0] != 0 || response[1] != 0) {
+       /*Check what device types are connected to the hardware CRT/HDTV/S-Video/Composite */
+       /*in case of CRT and multiple TV's attached give preference in the order mentioned below */
+       /* 1. RGB */
+       /* 2. HDTV */
+       /* 3. S-Video */
+       /* 4. composite */
+       if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "TMDS";
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
+       } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "TMDS";
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
+       } else if (response[0] & SDVO_OUTPUT_RGB0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "RGB0";
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
+       } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_RGB1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "RGB1";
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
+       } else if (response[0] & SDVO_OUTPUT_YPRPB0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB0;
+       } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_YPRPB1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB1;
+       }
+       /* SCART is given Second preference */
+       else if (response[0] & SDVO_OUTPUT_SCART0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_SCART0;
+
+       } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SCART1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_SCART1;
+       }
+       /* if S-Video type TV is connected along with Composite type TV give preference to S-Video */
+       else if (response[0] & SDVO_OUTPUT_SVID0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_SVID0;
+
+       } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SVID1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_SVID1;
+       }
+       /* Composite is given least preference */
+       else if (response[0] & SDVO_OUTPUT_CVBS0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS0;
+       } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_CVBS1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS1;
+       } else {
+           DRM_DEBUG("no display attached\n");
+
+           memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
+           DRM_DEBUG("%s: No active TMDS or RGB outputs (0x%02x%02x) 0x%08x\n",
+                      SDVO_NAME(sdvo_priv), bytes[0], bytes[1],
+                      sdvo_priv->caps.output_flags);
+           name_prefix = "Unknown";
+       }
+
+       /* init para for TV connector */
+       if (sdvo_priv->active_outputs & SDVO_OUTPUT_TV0) {
+               DRM_INFO("TV is attaced\n");
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "TV0";
+           /* Init TV mode setting para */
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_TV;
+           sdvo_priv->bGetClk = TRUE;
+            if (sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB0 ||
+                                         sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB1) {
+            /*sdvo_priv->TVStandard = HDTV_SMPTE_274M_1080i60;*/
+                sdvo_priv->TVMode = TVMODE_HDTV;
+            } else {
+            /*sdvo_priv->TVStandard = TVSTANDARD_NTSC_M;*/
+                sdvo_priv->TVMode = TVMODE_SDTV;
+            }
+                       
+           /*intel_output->pDevice->TVEnabled = TRUE;*/
+               
+               i830_tv_get_default_params(output);
+           /*Init Display parameter for TV */
+           sdvo_priv->OverScanX.Value = 0xffffffff;
+           sdvo_priv->OverScanY.Value = 0xffffffff;
+           sdvo_priv->dispParams.Brightness.Value = 0x80;
+           sdvo_priv->dispParams.FlickerFilter.Value = 0xffffffff;
+           sdvo_priv->dispParams.AdaptiveFF.Value = 7;
+           sdvo_priv->dispParams.TwoD_FlickerFilter.Value = 0xffffffff;
+           sdvo_priv->dispParams.Contrast.Value = 0x40;
+           sdvo_priv->dispParams.PositionX.Value = 0x200;
+           sdvo_priv->dispParams.PositionY.Value = 0x200;
+           sdvo_priv->dispParams.DotCrawl.Value = 1;
+           sdvo_priv->dispParams.ChromaFilter.Value = 1;
+           sdvo_priv->dispParams.LumaFilter.Value = 2;
+           sdvo_priv->dispParams.Sharpness.Value = 4;
+           sdvo_priv->dispParams.Saturation.Value = 0x45;
+           sdvo_priv->dispParams.Hue.Value = 0x40;
+           sdvo_priv->dispParams.Dither.Value = 0;
+               
+       }
+       else {
+            name_prefix = "RGB0";
+           DRM_INFO("non TV is attaced\n");
+       }
+        if (sdvo_priv->output_device == SDVOB) {
+           name_suffix = "-1";
+       } else {
+           name_suffix = "-2";
+       }
+
+       strcpy(deviceName, name_prefix);
+       strcat(deviceName, name_suffix);
+
+       if(output->name && (strcmp(output->name,deviceName) != 0)){
+           DRM_DEBUG("change the output name to %s\n", deviceName);
+           if (!drm_output_rename(output, deviceName)) {
+               drm_output_destroy(output);
+               return output_status_disconnected;
+           }
+
+       }
+       i830_sdvo_set_iomap(output);
+
+       DRM_INFO("get attached displays=0x%x,0x%x,connectedouputs=0x%x\n",
+                 response[0], response[1], sdvo_priv->active_outputs);
+               return output_status_connected;
+    } else {
+        /*No SDVO display device attached */
+        intel_sdvo_dpms(output, DPMSModeOff);
+               sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
+               return output_status_disconnected;
+    }  
+}
+
+static int i830_sdvo_get_tvmode_from_table(struct drm_output *output)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       struct drm_device *dev = output->dev;
+
+       int i, modes = 0;
+
+       for (i = 0; i < NUM_TV_MODES; i++)
+               if (((sdvo_priv->TVMode == TVMODE_HDTV) && /*hdtv mode list */
+               (tv_modes[i].dwSupportedHDTVvss & TVSTANDARD_HDTV_ALL)) ||
+               ((sdvo_priv->TVMode == TVMODE_SDTV) && /*sdtv mode list */
+               (tv_modes[i].dwSupportedSDTVvss & TVSTANDARD_SDTV_ALL))) {
+                       struct drm_display_mode *newmode;
+                       newmode = drm_mode_duplicate(dev, &tv_modes[i].mode_entry);             
+                       drm_mode_set_crtcinfo(newmode,0);
+                       drm_mode_probed_add(output, newmode);
+                       modes++;
+               }
+
+       return modes;   
+
+}
+
+static int intel_sdvo_get_modes(struct drm_output *output)
+{
+       struct intel_output *intel_output = output->driver_private;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+       DRM_DEBUG("xxintel_sdvo_get_modes\n");
+
+       if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
+               DRM_DEBUG("SDVO_DEVICE_TV\n");
+               i830_sdvo_get_tvmode_from_table(output);
+               if (list_empty(&output->probed_modes))
+                       return 0;
+               return 1;
+
+       } else {
+       /* set the bus switch and get the modes */
+       intel_sdvo_set_control_bus_switch(output, SDVO_CONTROL_BUS_DDC2);
+       intel_ddc_get_modes(output);
+
+       if (list_empty(&output->probed_modes))
+               return 0;
+       return 1;
+       }
+#if 0
+       /* Mac mini hack.  On this device, I get DDC through the analog, which
+        * load-detects as disconnected.  I fail to DDC through the SDVO DDC,
+        * but it does load-detect as connected.  So, just steal the DDC bits 
+        * from analog when we fail at finding it the right way.
+        */
+       /* TODO */
+       return NULL;
+
+       return NULL;
+#endif
+}
+
+static void intel_sdvo_destroy(struct drm_output *output)
+{
+       struct intel_output *intel_output = output->driver_private;
+       DRM_DEBUG("xxintel_sdvo_destroy\n");
+
+       if (intel_output->i2c_bus)
+               intel_i2c_destroy(intel_output->i2c_bus);
+
+       if (intel_output) {
+               kfree(intel_output);
+               output->driver_private = NULL;
+       }
+}
+
+static const struct drm_output_funcs intel_sdvo_output_funcs = {
+       .dpms = intel_sdvo_dpms,
+       .save = intel_sdvo_save,
+       .restore = intel_sdvo_restore,
+       .mode_valid = intel_sdvo_mode_valid,
+       .mode_fixup = intel_sdvo_mode_fixup,
+       .prepare = intel_output_prepare,
+       .mode_set = intel_sdvo_mode_set,
+       .commit = intel_output_commit,
+       .detect = intel_sdvo_detect,
+       .get_modes = intel_sdvo_get_modes,
+       .cleanup = intel_sdvo_destroy
+};
+
+extern char hotplug_env;
+static int intel_sdvo_proc_read_hotplug(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+{
+        memset(buf, 0, count);
+        
+        if (count < 1)
+           return 0;
+   
+        wait_event_interruptible(hotplug_queue, hotplug_env == '1');
+        buf[0] = hotplug_env;
+
+        return count; 
+}
+
+static int intel_sdvo_proc_write_hotplug(struct file *file, const char * user_buffer, unsigned long count, void *data)
+{
+        hotplug_env = '0';
+       return count;
+}
+
+void intel_sdvo_init(struct drm_device *dev, int output_device)
+{
+       struct drm_output *output;
+       struct intel_output *intel_output;
+       struct intel_sdvo_priv *sdvo_priv;
+       struct intel_i2c_chan *i2cbus = NULL;
+       u8 ch[0x40];
+       int i;
+       char name[DRM_OUTPUT_LEN];
+       char *name_prefix;
+       char *name_suffix;
+
+       int count = 3;
+       u8 response[2];
+       u8 status;      
+       unsigned char bytes[2];
+
+       struct proc_dir_entry *ent;
+       char name_hotplug[64] = "dri/sdvo";
+       char name_file[64] = "hotplug";
+
+       DRM_DEBUG("xxintel_sdvo_init\n");
+       
+       init_waitqueue_head(&hotplug_queue);
+
+       if (IS_POULSBO(dev)) {
+               struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
+               u32 sku_value = 0;
+               bool sku_bSDVOEnable = true;
+               if(pci_root)
+               {
+                       pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
+                       pci_read_config_dword(pci_root, 0xD4, &sku_value);
+                       sku_bSDVOEnable = (sku_value & PCI_PORT5_REG80_SDVO_DISABLE)?false : true;
+                       DRM_INFO("intel_sdvo_init: sku_value is 0x%08x\n", sku_value);
+                       DRM_INFO("intel_sdvo_init: sku_bSDVOEnable is %d\n", sku_bSDVOEnable);
+                       if (sku_bSDVOEnable == false)
+                               return;
+               }
+       }
+
+       output = drm_output_create(dev, &intel_sdvo_output_funcs, NULL);
+       if (!output)
+               return;
+
+       intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
+       if (!intel_output) {
+               drm_output_destroy(output);
+               return;
+       }
+
+       sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
+       intel_output->type = INTEL_OUTPUT_SDVO;
+       output->driver_private = intel_output;
+       output->interlace_allowed = 0;
+       output->doublescan_allowed = 0;
+
+       /* setup the DDC bus. */
+       if (output_device == SDVOB)
+               i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+       else
+               i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+
+       if (i2cbus == NULL) {
+               drm_output_destroy(output);
+               return;
+       }
+
+       sdvo_priv->i2c_bus = i2cbus;
+
+       if (output_device == SDVOB) {
+               name_suffix = "-1";
+               sdvo_priv->i2c_bus->slave_addr = 0x38;
+               sdvo_priv->byInputWiring = SDVOB_IN0;
+       } else {
+               name_suffix = "-2";
+               sdvo_priv->i2c_bus->slave_addr = 0x39;
+       }
+
+       sdvo_priv->output_device = output_device;
+       intel_output->i2c_bus = i2cbus;
+       intel_output->dev_priv = sdvo_priv;
+
+
+       /* Read the regs to test if we can talk to the device */
+       for (i = 0; i < 0x40; i++) {
+               if (!intel_sdvo_read_byte(output, i, &ch[i])) {
+                       DRM_DEBUG("No SDVO device found on SDVO%c\n",
+                                 output_device == SDVOB ? 'B' : 'C');
+                       drm_output_destroy(output);
+                       return;
+               }
+       } 
+
+       proc_sdvo_dir = proc_mkdir(name_hotplug, NULL);
+       if (!proc_sdvo_dir) {
+               printk("create /proc/dri/sdvo folder error\n");
+       }
+
+       ent = create_proc_entry(name_file,
+                       S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH, proc_sdvo_dir);
+       if (!ent) {
+               printk("create /proc/dri/sdvo/hotplug error\n");
+       }
+
+       ent->read_proc = intel_sdvo_proc_read_hotplug;
+       ent->write_proc = intel_sdvo_proc_write_hotplug;
+       ent->data = dev;
+
+       intel_sdvo_get_capabilities(output, &sdvo_priv->caps);
+
+       // Set active hot-plug OpCode.
+       uint8_t  state_orig;
+       uint8_t  state_set;
+       uint8_t  byArgs_orig[2];
+       uint8_t  byArgs_set[2];
+       uint32_t value;
+
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+       state_orig = intel_sdvo_read_response(output, byArgs_orig, 2);
+
+       value = (uint32_t)byArgs_orig[1];
+       value = (value << 8);
+       value |= (uint32_t)byArgs_orig[0];
+
+       value = value | (0x1);
+
+       byArgs_orig[0] = (uint8_t)(value & 0xFF);
+       byArgs_orig[1] = (uint8_t)((value >> 8) & 0xFF);
+       intel_sdvo_write_cmd(output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, byArgs_orig, 2);
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+       state_set = intel_sdvo_read_response(output, byArgs_set, 2);
+
+#ifdef SII_1392_WA
+       if ((sdvo_priv->caps.vendor_id == 0x04) && (sdvo_priv->caps.device_id==0xAE)){
+           /*Leave the control of 1392 to X server*/
+               SII_1392=1;
+               printk("%s: detect 1392 card, leave the setting to up level\n", __FUNCTION__);
+               if (drm_psb_no_fb == 0)
+                       intel_sdvo_dpms(output, DPMSModeOff);
+               sdvo_priv->active_outputs = 0;
+               output->subpixel_order = SubPixelHorizontalRGB;
+               name_prefix = "SDVO";
+               sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
+               strcpy(name, name_prefix);
+               strcat(name, name_suffix);
+               if (!drm_output_rename(output, name)) {
+                       drm_output_destroy(output);
+                       return;
+               }
+               return;
+       }
+#endif
+       memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs));
+
+    while (count--) {
+       intel_sdvo_write_cmd(output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+       status = intel_sdvo_read_response(output, &response, 2);
+
+       if (status != SDVO_CMD_STATUS_SUCCESS) {
+           udelay(1000);
+           continue;
+       }
+       if (status == SDVO_CMD_STATUS_SUCCESS)
+               break;
+    }
+    if (response[0] != 0 || response[1] != 0) {
+       /*Check what device types are connected to the hardware CRT/HDTV/S-Video/Composite */
+       /*in case of CRT and multiple TV's attached give preference in the order mentioned below */
+       /* 1. RGB */
+       /* 2. HDTV */
+       /* 3. S-Video */
+       /* 4. composite */
+       if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "TMDS";
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
+       } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "TMDS";
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
+       } else if (response[0] & SDVO_OUTPUT_RGB0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "RGB0";
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
+       } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_RGB1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
+           output->subpixel_order = SubPixelHorizontalRGB;
+           name_prefix = "RGB1";
+           sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
+       } else if (response[0] & SDVO_OUTPUT_YPRPB0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB0;
+       } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_YPRPB1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB1;
+       }
+       /* SCART is given Second preference */
+       else if (response[0] & SDVO_OUTPUT_SCART0) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_SCART0;
+
+       } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SCART1) {
+           sdvo_priv->active_outputs = SDVO_OUTPUT_SCART1;
+       }
+        /* if S-Video type TV is connected along with Composite type TV give preference to S-Video */
+        else if (response[0] & SDVO_OUTPUT_SVID0) {
+            sdvo_priv->active_outputs = SDVO_OUTPUT_SVID0;
+
+        } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SVID1) {
+            sdvo_priv->active_outputs = SDVO_OUTPUT_SVID1;
+        }
+        /* Composite is given least preference */
+        else if (response[0] & SDVO_OUTPUT_CVBS0) {
+            sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS0;
+        } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_CVBS1) {
+            sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS1;
+        } else {
+            DRM_DEBUG("no display attached\n");
+
+            memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
+            DRM_INFO("%s: No active TMDS or RGB outputs (0x%02x%02x) 0x%08x\n",
+                       SDVO_NAME(sdvo_priv), bytes[0], bytes[1],
+                       sdvo_priv->caps.output_flags);
+            name_prefix = "Unknown";
+        }
+
+         /* init para for TV connector */
+        if (sdvo_priv->active_outputs & SDVO_OUTPUT_TV0) {
+                       DRM_INFO("TV is attaced\n");
+            output->subpixel_order = SubPixelHorizontalRGB;
+            name_prefix = "TV0";
+            /* Init TV mode setting para */
+            sdvo_priv->ActiveDevice = SDVO_DEVICE_TV;
+            sdvo_priv->bGetClk = TRUE;
+            if (sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB0 ||
+                                         sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB1) {
+                sdvo_priv->TVStandard = HDTV_SMPTE_274M_1080i60;
+                sdvo_priv->TVMode = TVMODE_HDTV;
+            } else {
+                sdvo_priv->TVStandard = TVSTANDARD_NTSC_M;
+                sdvo_priv->TVMode = TVMODE_SDTV;
+            }
+            /*intel_output->pDevice->TVEnabled = TRUE;*/
+             /*Init Display parameter for TV */
+            sdvo_priv->OverScanX.Value = 0xffffffff;
+            sdvo_priv->OverScanY.Value = 0xffffffff;
+            sdvo_priv->dispParams.Brightness.Value = 0x80;
+            sdvo_priv->dispParams.FlickerFilter.Value = 0xffffffff;
+            sdvo_priv->dispParams.AdaptiveFF.Value = 7;
+            sdvo_priv->dispParams.TwoD_FlickerFilter.Value = 0xffffffff;
+            sdvo_priv->dispParams.Contrast.Value = 0x40;
+            sdvo_priv->dispParams.PositionX.Value = 0x200;
+            sdvo_priv->dispParams.PositionY.Value = 0x200;
+            sdvo_priv->dispParams.DotCrawl.Value = 1;
+            sdvo_priv->dispParams.ChromaFilter.Value = 1;
+            sdvo_priv->dispParams.LumaFilter.Value = 2;
+            sdvo_priv->dispParams.Sharpness.Value = 4;
+            sdvo_priv->dispParams.Saturation.Value = 0x45;
+            sdvo_priv->dispParams.Hue.Value = 0x40;
+            sdvo_priv->dispParams.Dither.Value = 0;
+        }
+        else {
+            name_prefix = "RGB0";
+           DRM_INFO("non TV is attaced\n");
+        }
+        
+        strcpy(name, name_prefix);
+        strcat(name, name_suffix);
+        if (!drm_output_rename(output, name)) {
+            drm_output_destroy(output);
+            return;
+        }
+    } else {
+        /*No SDVO display device attached */
+        intel_sdvo_dpms(output, DPMSModeOff);
+        sdvo_priv->active_outputs = 0;
+        output->subpixel_order = SubPixelHorizontalRGB;
+        name_prefix = "SDVO";
+        sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
+        strcpy(name, name_prefix);
+        strcat(name, name_suffix);
+        if (!drm_output_rename(output, name)) {
+            drm_output_destroy(output);
+            return;
+        }
+
+    }
+
+       /*(void)intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);*/
+
+       /* Set the input timing to the screen. Assume always input 0. */
+       intel_sdvo_set_target_input(output, true, false);
+       
+       intel_sdvo_get_input_pixel_clock_range(output,
+                                              &sdvo_priv->pixel_clock_min,
+                                              &sdvo_priv->pixel_clock_max);
+
+
+       DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
+                 "clock range %dMHz - %dMHz, "
+                 "input 1: %c, input 2: %c, "
+                 "output 1: %c, output 2: %c\n",
+                 SDVO_NAME(sdvo_priv),
+                 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
+                 sdvo_priv->caps.device_rev_id,
+                 sdvo_priv->pixel_clock_min / 1000,
+                 sdvo_priv->pixel_clock_max / 1000,
+                 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+                 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+                 /* check currently supported outputs */
+                 sdvo_priv->caps.output_flags & 
+                       (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+                 sdvo_priv->caps.output_flags & 
+                       (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+
+       intel_output->ddc_bus = i2cbus; 
+}
diff --git a/psb-kernel-source-4.41.1/intel_sdvo_regs.h b/psb-kernel-source-4.41.1/intel_sdvo_regs.h
new file mode 100644 (file)
index 0000000..748d4a3
--- /dev/null
@@ -0,0 +1,580 @@
+/*
+ * Copyright ?2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST   (0)
+#define SDVO_OUTPUT_TMDS0   (1 << 0)
+#define SDVO_OUTPUT_RGB0    (1 << 1)
+#define SDVO_OUTPUT_CVBS0   (1 << 2)
+#define SDVO_OUTPUT_SVID0   (1 << 3)
+#define SDVO_OUTPUT_YPRPB0  (1 << 4)
+#define SDVO_OUTPUT_SCART0  (1 << 5)
+#define SDVO_OUTPUT_LVDS0   (1 << 6)
+#define SDVO_OUTPUT_TMDS1   (1 << 8)
+#define SDVO_OUTPUT_RGB1    (1 << 9)
+#define SDVO_OUTPUT_CVBS1   (1 << 10)
+#define SDVO_OUTPUT_SVID1   (1 << 11)
+#define SDVO_OUTPUT_YPRPB1  (1 << 12)
+#define SDVO_OUTPUT_SCART1  (1 << 13)
+#define SDVO_OUTPUT_LVDS1   (1 << 14)
+#define SDVO_OUTPUT_LAST    (14)
+
+struct intel_sdvo_caps {
+    u8 vendor_id;
+    u8 device_id;
+    u8 device_rev_id;
+    u8 sdvo_version_major;
+    u8 sdvo_version_minor;
+    unsigned int sdvo_inputs_mask:2;
+    unsigned int smooth_scaling:1;
+    unsigned int sharp_scaling:1;
+    unsigned int up_scaling:1;
+    unsigned int down_scaling:1;
+    unsigned int stall_support:1;
+    unsigned int pad:1;
+    u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct intel_sdvo_dtd {
+    struct {
+       u16 clock;              /**< pixel clock, in 10kHz units */
+       u8 h_active;            /**< lower 8 bits (pixels) */
+       u8 h_blank;             /**< lower 8 bits (pixels) */
+       u8 h_high;              /**< upper 4 bits each h_active, h_blank */
+       u8 v_active;            /**< lower 8 bits (lines) */
+       u8 v_blank;             /**< lower 8 bits (lines) */
+       u8 v_high;              /**< upper 4 bits each v_active, v_blank */
+    } part1;
+
+    struct {
+       u8 h_sync_off;  /**< lower 8 bits, from hblank start */
+       u8 h_sync_width;        /**< lower 8 bits (pixels) */
+       /** lower 4 bits each vsync offset, vsync width */
+       u8 v_sync_off_width;
+       /**
+        * 2 high bits of hsync offset, 2 high bits of hsync width,
+        * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+        */
+       u8 sync_off_width_high;
+       u8 dtd_flags;
+       u8 sdvo_flags;
+       /** bits 6-7 of vsync offset at bits 6-7 */
+       u8 v_sync_off_high;
+       u8 reserved;
+    } part2;
+} __attribute__((packed));
+
+struct intel_sdvo_pixel_clock_range {
+    u16 min;                   /**< pixel clock, in 10kHz units */
+    u16 max;                   /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct intel_sdvo_preferred_input_timing_args {
+    u16 clock;
+    u16 width;
+    u16 height;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0                         0x07
+#define SDVO_I2C_ARG_1                         0x06
+#define SDVO_I2C_ARG_2                         0x05
+#define SDVO_I2C_ARG_3                         0x04
+#define SDVO_I2C_ARG_4                         0x03
+#define SDVO_I2C_ARG_5                         0x02
+#define SDVO_I2C_ARG_6                         0x01
+#define SDVO_I2C_ARG_7                         0x00
+#define SDVO_I2C_OPCODE                                0x08
+#define SDVO_I2C_CMD_STATUS                    0x09
+#define SDVO_I2C_RETURN_0                      0x0a
+#define SDVO_I2C_RETURN_1                      0x0b
+#define SDVO_I2C_RETURN_2                      0x0c
+#define SDVO_I2C_RETURN_3                      0x0d
+#define SDVO_I2C_RETURN_4                      0x0e
+#define SDVO_I2C_RETURN_5                      0x0f
+#define SDVO_I2C_RETURN_6                      0x10
+#define SDVO_I2C_RETURN_7                      0x11
+#define SDVO_I2C_VENDOR_BEGIN                  0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON               0x0
+#define SDVO_CMD_STATUS_SUCCESS                        0x1
+#define SDVO_CMD_STATUS_NOTSUPP                        0x2
+#define SDVO_CMD_STATUS_INVALID_ARG            0x3
+#define SDVO_CMD_STATUS_PENDING                        0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED   0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP       0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET                                 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS                       0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV                      0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR                    SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR                    SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH                    SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS                    0x03
+struct intel_sdvo_get_trained_inputs_response {
+    unsigned int input0_trained:1;
+    unsigned int input1_trained:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS                    0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS                    0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP                                0x06
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP                                0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS                 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT                  0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG                   0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG                   0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE            0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+    u16 interrupt_status;
+    unsigned int ambient_light_interrupt:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT                      0x10
+struct intel_sdvo_set_target_input_args {
+    unsigned int target_1:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT                     0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1               0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2               0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1               0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2               0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1              0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2              0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1              0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2              0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW                            SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH                           SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE                             SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK                              SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH                               SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE                             SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK                              SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH                               SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF                            SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH                          SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH                      SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH                  SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS                            SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED                          (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK                         (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK                          (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK                           (3 << 1)
+# define SDVO_DTD_SDVO_FLAS                            SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL                              (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED                           (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT                         (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK                       (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE                       (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP                      (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH                     (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH                       SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING         0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW         SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH                SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW         SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH                SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW                SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH       SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS             SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED          (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED              (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1      0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2      0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE           0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE          0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS                0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT                   0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT                   0x21
+# define SDVO_CLOCK_RATE_MULT_1X                               (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X                               (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X                               (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS              0x27
+
+#define SDVO_CMD_GET_TV_FORMAT                         0x28
+
+#define SDVO_CMD_SET_TV_FORMAT                         0x29
+
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES            0x2a
+#define SDVO_CMD_GET_ENCODER_POWER_STATE               0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE               0x2c
+# define SDVO_ENCODER_STATE_ON                                 (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY                            (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND                            (1 << 2)
+# define SDVO_ENCODER_STATE_OFF                                        (1 << 3)
+
+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT             0x93
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH                        0x7a
+# define SDVO_CONTROL_BUS_PROM                         0x0
+# define SDVO_CONTROL_BUS_DDC1                         0x1
+# define SDVO_CONTROL_BUS_DDC2                         0x2
+# define SDVO_CONTROL_BUS_DDC3                         0x3
+
+/* xiaolin, to support add-on SDVO TV Encoder */
+/* SDVO Bus & SDVO Inputs wiring details*/
+/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
+/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
+/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
+/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
+#define SDVOB_IN0 0x01
+#define SDVOB_IN1 0x02
+#define SDVOC_IN0 0x04
+#define SDVOC_IN1 0x08
+
+#define SDVO_OUTPUT_TV0     0x003C
+#define SDVO_OUTPUT_TV1     0x3C00
+#define SDVO_OUTPUT_LAST    (14)
+
+#define SDVO_OUTPUT_CRT     (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1 )
+#define SDVO_OUTPUT_TV      (SDVO_OUTPUT_TV0 | SDVO_OUTPUT_TV1)
+#define SDVO_OUTPUT_LVDS    (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_OUTPUT_TMDS    (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+
+
+
+#define SDVO_DEVICE_NONE 0x00
+#define        SDVO_DEVICE_CRT 0x01
+#define        SDVO_DEVICE_TV 0x02
+#define        SDVO_DEVICE_LVDS 0x04
+#define        SDVO_DEVICE_TMDS 0x08
+
+/* Different TV mode*/
+#define        TVMODE_OFF              0x0000
+#define        TVMODE_SDTV     0x0001
+#define        TVMODE_HDTV     0x0002
+
+#define TVSTANDARD_NONE 0x00
+#define TVSTANDARD_NTSC_M   0x0001  //        75 IRE Setup
+#define TVSTANDARD_NTSC_M_J 0x0002  // Japan,  0 IRE Setup
+#define TVSTANDARD_PAL_B    0x0004
+#define TVSTANDARD_PAL_D    0x0008
+#define TVSTANDARD_PAL_H    0x0010
+#define TVSTANDARD_PAL_I    0x0020
+#define TVSTANDARD_PAL_M    0x0040
+#define TVSTANDARD_PAL_N    0x0080
+#define TVSTANDARD_SECAM_B  0x0100
+#define TVSTANDARD_SECAM_D  0x0200
+#define TVSTANDARD_SECAM_G  0x0400
+#define TVSTANDARD_SECAM_H  0x0800
+#define TVSTANDARD_SECAM_K  0x1000
+#define TVSTANDARD_SECAM_K1 0x2000
+#define TVSTANDARD_SECAM_L  0x4000
+#define TVSTANDARD_WIN_VGA  0x8000
+/*and the rest*/
+#define TVSTANDARD_NTSC_433 0x00010000
+#define TVSTANDARD_PAL_G    0x00020000
+#define TVSTANDARD_PAL_60   0x00040000
+#define TVSTANDARD_SECAM_L1 0x00080000
+#define TVSTANDARD_SDTV_ALL 0x000FFFFF
+
+
+/*HDTV standard defination added using the unused upper 12 bits of dwTVStandard*/
+#define HDTV_SMPTE_170M_480i59         0x00100000
+#define HDTV_SMPTE_293M_480p60         0x00200000
+#define HDTV_SMPTE_293M_480p59         0x00400000
+#define HDTV_ITURBT601_576i50          0x00800000
+#define HDTV_ITURBT601_576p50          0x01000000
+#define HDTV_SMPTE_296M_720p50         0x02000000
+#define HDTV_SMPTE_296M_720p59         0x04000000
+#define HDTV_SMPTE_296M_720p60         0x08000000
+#define HDTV_SMPTE_274M_1080i50                0x10000000
+#define HDTV_SMPTE_274M_1080i59                0x20000000
+#define HDTV_SMPTE_274M_1080i60                0x40000000
+#define HDTV_SMPTE_274M_1080p60                0x80000000
+#define TVSTANDARD_HDTV_ALL                    0xFFF00000
+
+
+#define TVSTANDARD_NTSC 0x01
+#define TVSTANDARD_PAL 0x02
+
+#define TVOUTPUT_NONE 0x00
+#define TVOUTPUT_COMPOSITE 0x01
+#define TVOUTPUT_SVIDEO 0x02
+#define TVOUTPUT_RGB 0x04
+#define TVOUTPUT_YCBCR 0x08
+#define TVOUTPUT_SC 0x16
+
+/* Encoder supported TV standard bit mask per SDVO ED*/
+#define SDVO_NTSC_M                                     0x00000001
+#define SDVO_NTSC_M_J                                   0x00000002
+#define SDVO_NTSC_433                                   0x00000004
+#define SDVO_PAL_B                                      0x00000008
+#define SDVO_PAL_D                                      0x00000010
+#define SDVO_PAL_G                                      0x00000020
+#define SDVO_PAL_H                                      0x00000040
+#define SDVO_PAL_I                                      0x00000080
+#define SDVO_PAL_M                                      0x00000100
+#define SDVO_PAL_N                                      0x00000200
+#define SDVO_PAL_NC                                     0x00000400
+#define SDVO_PAL_60                                     0x00000800
+#define SDVO_SECAM_B                                    0x00001000
+#define SDVO_SECAM_D                                    0x00002000
+#define SDVO_SECAM_G                                    0x00004000
+#define SDVO_SECAM_K                                    0x00008000
+#define SDVO_SECAM_K1                                   0x00010000
+#define SDVO_SECAM_L                                    0x00020000
+#define SDVO_SECAM_60                                                                  0x00040000
+
+/* Number of SDTV format*/
+#define SDTV_NUM_STANDARDS                              19
+
+/* Encoder supported HDTV standard bit mask per SDVO ED*/
+#define SDVO_HDTV_STD_240M_1080i59                      0x00000008
+#define SDVO_HDTV_STD_240M_1080i60                      0x00000010
+#define SDVO_HDTV_STD_260M_1080i59                      0x00000020
+#define SDVO_HDTV_STD_260M_1080i60                      0x00000040
+#define SDVO_HDTV_STD_274M_1080i50                      0x00000080
+#define SDVO_HDTV_STD_274M_1080i59                      0x00000100
+#define SDVO_HDTV_STD_274M_1080i60                      0x00000200
+#define SDVO_HDTV_STD_274M_1080p23                      0x00000400
+#define SDVO_HDTV_STD_274M_1080p24                      0x00000800
+#define SDVO_HDTV_STD_274M_1080p25                      0x00001000
+#define SDVO_HDTV_STD_274M_1080p29                      0x00002000
+#define SDVO_HDTV_STD_274M_1080p30                      0x00004000
+#define SDVO_HDTV_STD_274M_1080p50                      0x00008000
+#define SDVO_HDTV_STD_274M_1080p59                      0x00010000
+#define SDVO_HDTV_STD_274M_1080p60                      0x00020000
+#define SDVO_HDTV_STD_295M_1080i50                      0x00040000
+#define SDVO_HDTV_STD_295M_1080p50                      0x00080000
+#define SDVO_HDTV_STD_296M_720p59                       0x00100000
+#define SDVO_HDTV_STD_296M_720p60                       0x00200000
+#define SDVO_HDTV_STD_296M_720p50                          0x00400000
+#define SDVO_HDTV_STD_293M_480p59                       0x00800000
+#define SDVO_HDTV_STD_170M_480i59                       0x01000000
+#define SDVO_HDTV_STD_ITURBT601_576i50                         0x02000000
+#define SDVO_HDTV_STD_ITURBT601_576p50                         0x04000000
+#define SDVO_HDTV_STD_EIA_7702A_480i60                  0x08000000
+#define SDVO_HDTV_STD_EIA_7702A_480p60                  0x10000000
+
+/* SDTV resolution*/
+#define SDVO_SDTV_320x200                               0x00000001
+#define SDVO_SDTV_320x240                               0x00000002
+#define SDVO_SDTV_400x300                               0x00000004
+#define SDVO_SDTV_640x350                               0x00000008
+#define SDVO_SDTV_640x400                               0x00000010
+#define SDVO_SDTV_640x480                               0x00000020
+#define SDVO_SDTV_704x480                               0x00000040
+#define SDVO_SDTV_704x576                               0x00000080
+#define SDVO_SDTV_720x350                               0x00000100
+#define SDVO_SDTV_720x400                               0x00000200
+#define SDVO_SDTV_720x480                               0x00000400
+#define SDVO_SDTV_720x540                               0x00000800
+#define SDVO_SDTV_720x576                               0x00001000
+#define SDVO_SDTV_768x576                               0x00002000
+#define SDVO_SDTV_800x600                               0x00004000
+#define SDVO_SDTV_832x624                               0x00008000
+#define SDVO_SDTV_920x766                               0x00010000
+#define SDVO_SDTV_1024x768                              0x00020000
+#define SDVO_SDTV_1280x1024                                                            0x00040000
+
+
+#define SDVO_HDTV_640x480                               0x00000001
+#define SDVO_HDTV_800x600                               0x00000002
+#define SDVO_HDTV_1024x768                              0x00000004
+#define SDVO_HDTV_1064x600                              0x00020000
+#define SDVO_HDTV_1280x720                              0x00040000
+#define SDVO_HDTV_1704x960                              0x00100000
+#define SDVO_HDTV_1864x1050                             0x00200000
+#define SDVO_HDTV_1920x1080                             0x00400000
+#define SDVO_HDTV_640x400                               0x02000000
+
+/* Number of SDTV mode*/
+#define SDTV_NUM_MODES                                  19
+
+/* sdvo cmd for sdvo tv */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMINGS 0x1A
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS      0x27
+#define SDVO_CMD_GET_TV_FORMATS                                0x28
+#define SDVO_CMD_SET_TV_FORMATS                                0x29
+
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES            0x2a
+#define SDVO_CMD_GET_ENCODER_POWER_STATE               0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE               0x2c
+#define SDVO_ENCODER_STATE_ON                                  (1 << 0)
+#define SDVO_ENCODER_STATE_STANDBY                             (1 << 1)
+#define SDVO_ENCODER_STATE_SUSPEND                             (1 << 2)
+#define SDVO_ENCODER_STATE_OFF                                 (1 << 3)
+
+/* Bit mask of picture enhancement*/
+#define SDVO_FLICKER_FILTER                             0x00000001
+#define SDVO_ADAPTIVE_FLICKER_FILTER                    0x00000002
+#define SDVO_2D_FLICKER_FILTER                          0x00000004
+#define SDVO_SATURATION                                 0x00000008
+#define SDVO_HUE                                        0x00000010
+#define SDVO_BRIGHTNESS                                 0x00000020
+#define SDVO_CONTRAST                                   0x00000040
+#define SDVO_HORIZONTAL_OVERSCAN                        0x00000080
+#define SDVO_VERTICAL_OVERSCAN                          0x00000100
+#define SDVO_HORIZONTAL_POSITION                        0x00000200
+#define SDVO_VERTICAL_POSITION                          0x00000400
+#define SDVO_SHARPNESS                                  0x00000800
+#define SDVO_DOT_CRAWL                                  0x00001000
+#define SDVO_DITHER                                     0x00002000
+#define SDVO_MAX_TV_CHROMA_FILTER                       0x00004000
+#define SDVO_TV_MAX_LUMA_FILTER                         0x00008000
+
+#define SDVO_CMD_GET_ANCILLARY_VIDEO_INFORMATION        0x3A
+#define SDVO_CMD_SET_ANCILLARY_VIDEO_INFORMATION        0x3B
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS              0x84
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER                  0x4D
+#define SDVO_CMD_GET_FLICKER_FILTER                      0x4E
+#define SDVO_CMD_SET_FLICKER_FILTER                      0x4F
+#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FILTER             0x50
+#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FILTER             0x51
+#define SDVO_CMD_GET_MAX_2D_FLICKER_FILTER               0x52
+#define SDVO_CMD_GET_2D_FLICKER_FILTER                   0x53
+#define SDVO_CMD_SET_2D_FLICKER_FILTER                   0x54
+#define SDVO_CMD_GET_MAX_SATURATION                      0x55
+#define SDVO_CMD_GET_SATURATION                          0x56
+#define SDVO_CMD_SET_SATURATION                          0x57
+#define SDVO_CMD_GET_MAX_HUE                             0x58
+#define SDVO_CMD_GET_HUE                                 0x59
+#define SDVO_CMD_SET_HUE                                 0x5A
+#define SDVO_CMD_GET_MAX_BRIGHTNESS                      0x5B
+#define SDVO_CMD_GET_BRIGHTNESS                          0x5C
+#define SDVO_CMD_SET_BRIGHTNESS                          0x5D
+#define SDVO_CMD_GET_MAX_CONTRAST                        0x5E
+#define SDVO_CMD_GET_CONTRAST                            0x5F
+#define SDVO_CMD_SET_CONTRAST                            0x60
+
+#define SDVO_CMD_GET_MAX_HORIZONTAL_OVERSCAN             0x61
+#define SDVO_CMD_GET_HORIZONTAL_OVERSCAN                 0x62
+#define SDVO_CMD_SET_HORIZONTAL_OVERSCAN                 0x63
+#define SDVO_CMD_GET_MAX_VERTICAL_OVERSCAN               0x64
+#define SDVO_CMD_GET_VERTICAL_OVERSCAN                   0x65
+#define SDVO_CMD_SET_VERTICAL_OVERSCAN                   0x66
+#define SDVO_CMD_GET_MAX_HORIZONTAL_POSITION             0x67
+#define SDVO_CMD_GET_HORIZONTAL_POSITION                 0x68
+#define SDVO_CMD_SET_HORIZONTAL_POSITION                 0x69
+#define SDVO_CMD_GET_MAX_VERTICAL_POSITION               0x6A
+#define SDVO_CMD_GET_VERTICAL_POSITION                   0x6B
+#define SDVO_CMD_SET_VERTICAL_POSITION                   0x6C
+#define SDVO_CMD_GET_MAX_SHARPNESS                       0x6D
+#define SDVO_CMD_GET_SHARPNESS                           0x6E
+#define SDVO_CMD_SET_SHARPNESS                           0x6F
+#define SDVO_CMD_GET_DOT_CRAWL                           0x70
+#define SDVO_CMD_SET_DOT_CRAWL                           0x71
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER                0x74
+#define SDVO_CMD_GET_TV_CHROMA_FILTER                    0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER                    0x76
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER                  0x77
+#define SDVO_CMD_GET_TV_LUMA_FILTER                      0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER                      0x79
+#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FILTER         0x7B
diff --git a/psb-kernel-source-4.41.1/intel_setup.c b/psb-kernel-source-4.41.1/intel_setup.c
new file mode 100644 (file)
index 0000000..7412e90
--- /dev/null
@@ -0,0 +1,18 @@
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_crt.c"
+
+/* don't define */
+#define ACPI_EDID_LCD NULL
+#define ACPI_DOD NULL
+
+#include "intel_lvds.c"
+#include "intel_sdvo.c"
+#include "intel_display.c"
+#include "intel_modes.c"
diff --git a/psb-kernel-source-4.41.1/psb_buffer.c b/psb-kernel-source-4.41.1/psb_buffer.c
new file mode 100644 (file)
index 0000000..eb79614
--- /dev/null
@@ -0,0 +1,425 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+#include "drmP.h"
+#include "psb_drv.h"
+#include "psb_schedule.h"
+
+struct drm_psb_ttm_backend {
+       struct drm_ttm_backend base;
+       struct page **pages;
+       unsigned int desired_tile_stride;
+       unsigned int hw_tile_stride;
+       int mem_type;
+       unsigned long offset;
+       unsigned long num_pages;
+};
+
+int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
+                   uint32_t * type)
+{
+       switch (*class) {
+       case PSB_ENGINE_TA:
+               *type = DRM_FENCE_TYPE_EXE |
+                   _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
+               if (bo->mem.mask & PSB_BO_FLAG_TA)
+                       *type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
+               if (bo->mem.mask & PSB_BO_FLAG_SCENE)
+                       *type |= _PSB_FENCE_TYPE_SCENE_DONE;
+               if (bo->mem.mask & PSB_BO_FLAG_FEEDBACK)
+                       *type |= _PSB_FENCE_TYPE_FEEDBACK;
+               break;
+       default:
+               *type = DRM_FENCE_TYPE_EXE;
+       }
+       return 0;
+}
+
+/*
+ * Poulsbo GPU virtual space looks like this
+ * (We currently use only one MMU context).
+ *
+ * gatt_start = Start of GATT aperture in bus space.
+ * stolen_end = End of GATT populated by stolen memory in bus space.
+ * gatt_end   = End of GATT
+ * twod_end   = MIN(gatt_start + 256_MEM, gatt_end)
+ *
+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling- and copy operations.
+ *                          This space is not managed and is protected by the
+ *                          temp_mem mutex.
+ *
+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
+ *
+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU    For generic MMU-only use.
+ *
+ * gatt_start -> stolen_end DRM_BO_MEM_VRAM    Pre-populated GATT pages.
+ *
+ * stolen_end -> twod_end   DRM_BO_MEM_TT      GATT memory usable by 2D engine.
+ *
+ * twod_end -> gatt_end     DRM_BO_MEM_APER    GATT memory not usable by 2D engine.
+ *
+ * gatt_end ->   0xffffffff Currently unused.
+ */
+
+int psb_init_mem_type(struct drm_device *dev, uint32_t type,
+                     struct drm_mem_type_manager *man)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct psb_gtt *pg = dev_priv->pg;
+
+       switch (type) {
+       case DRM_BO_MEM_LOCAL:
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CACHED;
+               man->drm_bus_maptype = 0;
+               break;
+       case DRM_PSB_MEM_KERNEL:
+               man->io_offset = 0x00000000;
+               man->io_size = 0x00000000;
+               man->io_addr = NULL;
+               man->drm_bus_maptype = _DRM_TTM;
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CMA;
+               man->gpu_offset = PSB_MEM_KERNEL_START;
+               break;
+       case DRM_PSB_MEM_MMU:
+               man->io_offset = 0x00000000;
+               man->io_size = 0x00000000;
+               man->io_addr = NULL;
+               man->drm_bus_maptype = _DRM_TTM;
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CMA;
+               man->gpu_offset = PSB_MEM_MMU_START;
+               break;
+       case DRM_PSB_MEM_PDS:
+               man->io_offset = 0x00000000;
+               man->io_size = 0x00000000;
+               man->io_addr = NULL;
+               man->drm_bus_maptype = _DRM_TTM;
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CMA;
+               man->gpu_offset = PSB_MEM_PDS_START;
+               break;
+       case DRM_PSB_MEM_RASTGEOM:
+               man->io_offset = 0x00000000;
+               man->io_size = 0x00000000;
+               man->io_addr = NULL;
+               man->drm_bus_maptype = _DRM_TTM;
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CMA;
+               man->gpu_offset = PSB_MEM_RASTGEOM_START;
+               break;
+       case DRM_BO_MEM_VRAM:
+               man->io_addr = NULL;
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
+#ifdef PSB_WORKING_HOST_MMU_ACCESS
+               man->drm_bus_maptype = _DRM_AGP;
+               man->io_offset = pg->gatt_start;
+               man->io_size = pg->gatt_pages << PAGE_SHIFT;
+#else
+               man->drm_bus_maptype = _DRM_TTM;        /* Forces uncached */
+               man->io_offset = pg->stolen_base;
+               man->io_size = pg->stolen_size;
+#endif
+               man->gpu_offset = pg->gatt_start;
+               break;
+       case DRM_BO_MEM_TT:     /* Mappable GATT memory */
+               man->io_offset = pg->gatt_start;
+               man->io_size = pg->gatt_pages << PAGE_SHIFT;
+               man->io_addr = NULL;
+#ifdef PSB_WORKING_HOST_MMU_ACCESS
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_NEEDS_IOREMAP;
+               man->drm_bus_maptype = _DRM_AGP;
+#else
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CMA;
+               man->drm_bus_maptype = _DRM_TTM;
+#endif
+               man->gpu_offset = pg->gatt_start;
+               break;
+       case DRM_PSB_MEM_APER:  /*MMU memory. Mappable. Not usable for 2D. */
+               man->io_offset = pg->gatt_start;
+               man->io_size = pg->gatt_pages << PAGE_SHIFT;
+               man->io_addr = NULL;
+#ifdef PSB_WORKING_HOST_MMU_ACCESS
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_NEEDS_IOREMAP;
+               man->drm_bus_maptype = _DRM_AGP;
+#else
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_CMA;
+               man->drm_bus_maptype = _DRM_TTM;
+#endif
+               man->gpu_offset = pg->gatt_start;
+               break;
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+uint32_t psb_evict_mask(struct drm_buffer_object * bo)
+{
+       switch (bo->mem.mem_type) {
+       case DRM_BO_MEM_VRAM:
+               return DRM_BO_FLAG_MEM_TT;
+       default:
+               return DRM_BO_FLAG_MEM_LOCAL;
+       }
+}
+
+int psb_invalidate_caches(struct drm_device *dev, uint64_t flags)
+{
+       return 0;
+}
+
+static int psb_move_blit(struct drm_buffer_object *bo,
+                        int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
+{
+       struct drm_bo_mem_reg *old_mem = &bo->mem;
+       int dir = 0;
+
+       if ((old_mem->mem_type == new_mem->mem_type) &&
+           (new_mem->mm_node->start <
+            old_mem->mm_node->start + old_mem->mm_node->size)) {
+               dir = 1;
+       }
+
+       psb_emit_2d_copy_blit(bo->dev,
+                             old_mem->mm_node->start << PAGE_SHIFT,
+                             new_mem->mm_node->start << PAGE_SHIFT,
+                             new_mem->num_pages, dir);
+
+       return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
+                                        DRM_FENCE_TYPE_EXE, 0, new_mem);
+}
+
+/*
+ * Flip destination ttm into GATT,
+ * then blit and subsequently move out again.
+ */
+
+static int psb_move_flip(struct drm_buffer_object *bo,
+                        int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_bo_mem_reg tmp_mem;
+       int ret;
+
+       tmp_mem = *new_mem;
+       tmp_mem.mm_node = NULL;
+       tmp_mem.mask = DRM_BO_FLAG_MEM_TT;
+
+       ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
+       if (ret)
+               return ret;
+       ret = drm_bind_ttm(bo->ttm, &tmp_mem);
+       if (ret)
+               goto out_cleanup;
+       ret = psb_move_blit(bo, 1, no_wait, &tmp_mem);
+       if (ret)
+               goto out_cleanup;
+
+       ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
+      out_cleanup:
+       if (tmp_mem.mm_node) {
+               mutex_lock(&dev->struct_mutex);
+               if (tmp_mem.mm_node != bo->pinned_node)
+                       drm_mm_put_block(tmp_mem.mm_node);
+               tmp_mem.mm_node = NULL;
+               mutex_unlock(&dev->struct_mutex);
+       }
+       return ret;
+}
+
+int psb_move(struct drm_buffer_object *bo,
+            int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
+{
+       struct drm_bo_mem_reg *old_mem = &bo->mem;
+
+       if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
+               return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
+               if (psb_move_flip(bo, evict, no_wait, new_mem))
+                       return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       } else {
+               if (psb_move_blit(bo, evict, no_wait, new_mem))
+                       return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       }
+       return 0;
+}
+
+static int drm_psb_tbe_nca(struct drm_ttm_backend *backend)
+{
+       return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+}
+
+static int drm_psb_tbe_populate(struct drm_ttm_backend *backend,
+                               unsigned long num_pages, struct page **pages)
+{
+       struct drm_psb_ttm_backend *psb_be =
+           container_of(backend, struct drm_psb_ttm_backend, base);
+
+       psb_be->pages = pages;
+       return 0;
+}
+
+static int drm_psb_tbe_unbind(struct drm_ttm_backend *backend)
+{
+       struct drm_device *dev = backend->dev;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct drm_psb_ttm_backend *psb_be =
+           container_of(backend, struct drm_psb_ttm_backend, base);
+       struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
+       struct drm_mem_type_manager *man = &dev->bm.man[psb_be->mem_type];
+
+       PSB_DEBUG_RENDER("MMU unbind.\n");
+
+       if (psb_be->mem_type == DRM_BO_MEM_TT) {
+               uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
+                   PAGE_SHIFT;
+
+               (void)psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
+                                          psb_be->num_pages,
+                                          psb_be->desired_tile_stride,
+                                          psb_be->hw_tile_stride);
+       }
+
+       psb_mmu_remove_pages(pd, psb_be->offset,
+                            psb_be->num_pages,
+                            psb_be->desired_tile_stride,
+                            psb_be->hw_tile_stride);
+
+       return 0;
+}
+
+static int drm_psb_tbe_bind(struct drm_ttm_backend *backend,
+                           struct drm_bo_mem_reg *bo_mem)
+{
+       struct drm_device *dev = backend->dev;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct drm_psb_ttm_backend *psb_be =
+           container_of(backend, struct drm_psb_ttm_backend, base);
+       struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
+       struct drm_mem_type_manager *man = &dev->bm.man[bo_mem->mem_type];
+       int type;
+       int ret = 0;
+
+       psb_be->mem_type = bo_mem->mem_type;
+       psb_be->num_pages = bo_mem->num_pages;
+       psb_be->desired_tile_stride = bo_mem->desired_tile_stride;
+       psb_be->hw_tile_stride = bo_mem->hw_tile_stride;
+       psb_be->desired_tile_stride = 0;
+       psb_be->hw_tile_stride = 0;
+       psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
+           man->gpu_offset;
+
+       type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
+
+       PSB_DEBUG_RENDER("MMU bind.\n");
+       if (psb_be->mem_type == DRM_BO_MEM_TT) {
+               uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
+                   PAGE_SHIFT;
+
+               ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
+                                          gatt_p_offset,
+                                          psb_be->num_pages,
+                                          psb_be->desired_tile_stride,
+                                          psb_be->hw_tile_stride, type);
+       }
+
+       ret = psb_mmu_insert_pages(pd, psb_be->pages,
+                                  psb_be->offset, psb_be->num_pages,
+                                  psb_be->desired_tile_stride,
+                                  psb_be->hw_tile_stride, type);
+       if (ret)
+               goto out_err;
+
+       DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
+                       DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED);
+
+       return 0;
+      out_err:
+       drm_psb_tbe_unbind(backend);
+       return ret;
+
+}
+
+static void drm_psb_tbe_clear(struct drm_ttm_backend *backend)
+{
+       struct drm_psb_ttm_backend *psb_be =
+           container_of(backend, struct drm_psb_ttm_backend, base);
+
+       psb_be->pages = NULL;
+       return;
+}
+
+static void drm_psb_tbe_destroy(struct drm_ttm_backend *backend)
+{
+       struct drm_psb_ttm_backend *psb_be =
+           container_of(backend, struct drm_psb_ttm_backend, base);
+
+       if (backend)
+               drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
+}
+
+static struct drm_ttm_backend_func psb_ttm_backend = {
+       .needs_ub_cache_adjust = drm_psb_tbe_nca,
+       .populate = drm_psb_tbe_populate,
+       .clear = drm_psb_tbe_clear,
+       .bind = drm_psb_tbe_bind,
+       .unbind = drm_psb_tbe_unbind,
+       .destroy = drm_psb_tbe_destroy,
+};
+
+struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev)
+{
+       struct drm_psb_ttm_backend *psb_be;
+
+       psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
+       if (!psb_be)
+               return NULL;
+       psb_be->pages = NULL;
+       psb_be->base.func = &psb_ttm_backend;
+       psb_be->base.dev = dev;
+
+       return &psb_be->base;
+}
+
+int psb_tbe_size(struct drm_device *dev, unsigned long num_pages)
+{
+       /*
+        * Return the size of the structures themselves and the
+        * estimated size of the pagedir and pagetable entries.
+        */
+
+       return drm_size_align(sizeof(struct drm_psb_ttm_backend)) +
+               8*num_pages;
+}
diff --git a/psb-kernel-source-4.41.1/psb_detear.c b/psb-kernel-source-4.41.1/psb_detear.c
new file mode 100644 (file)
index 0000000..3a8e2e0
--- /dev/null
@@ -0,0 +1,39 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Super Zhang <super.zhang@intel.com>
+ *          John Ye <john.ye@intel.com>
+ */
+
+#include "psb_detear.h"
+
+kern_blit_info psb_blit_info;
+
+void psb_blit_2d_reg_write(struct drm_psb_private *dev_priv, uint32_t * cmdbuf)
+{
+       int i;
+
+       for (i = 0; i < VIDEO_BLIT_2D_SIZE; i += 4) {
+               PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+       }
+       (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+}
diff --git a/psb-kernel-source-4.41.1/psb_detear.h b/psb-kernel-source-4.41.1/psb_detear.h
new file mode 100644 (file)
index 0000000..da5e6fb
--- /dev/null
@@ -0,0 +1,47 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Super Zhang <super.zhang@intel.com>
+ *          John Ye <john.ye@intel.com>
+ */
+
+#ifndef _PSB_DETEAR_H_
+#define _PSB_DETEAR_H_
+
+#include "drmP.h"
+#include "drm.h"
+#include "psb_drm.h"
+#include "psb_drv.h"
+
+#define VIDEO_BLIT_2D_SIZE 40
+
+typedef struct kern_blit_info
+{
+       int vdc_bit;
+       int cmd_ready;
+       unsigned char cmdbuf[40]; /* Video blit 2D cmd size is 40 bytes */
+} kern_blit_info;
+
+extern kern_blit_info psb_blit_info;
+extern void psb_blit_2d_reg_write(struct drm_psb_private *dev_priv, uint32_t * cmdbuf);
+
+#endif
diff --git a/psb-kernel-source-4.41.1/psb_drm.h b/psb-kernel-source-4.41.1/psb_drm.h
new file mode 100644 (file)
index 0000000..4395829
--- /dev/null
@@ -0,0 +1,395 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#ifndef _PSB_DRM_H_
+#define _PSB_DRM_H_
+
+#if defined(__linux__) && !defined(__KERNEL__)
+#include<stdint.h>
+#endif
+
+/*
+ * Intel Poulsbo driver package version.
+ *
+ */
+/* #define PSB_PACKAGE_VERSION "ED"__DATE__*/
+#define PSB_PACKAGE_VERSION "5.0.1.0046"
+
+#define DRM_PSB_SAREA_MAJOR 0
+#define DRM_PSB_SAREA_MINOR 1
+#define PSB_FIXED_SHIFT 16
+
+/*
+ * Public memory types.
+ */
+
+#define DRM_PSB_MEM_MMU DRM_BO_MEM_PRIV1
+#define DRM_PSB_FLAG_MEM_MMU DRM_BO_FLAG_MEM_PRIV1
+#define DRM_PSB_MEM_PDS DRM_BO_MEM_PRIV2
+#define DRM_PSB_FLAG_MEM_PDS DRM_BO_FLAG_MEM_PRIV2
+#define DRM_PSB_MEM_APER DRM_BO_MEM_PRIV3
+#define DRM_PSB_FLAG_MEM_APER DRM_BO_FLAG_MEM_PRIV3
+#define DRM_PSB_MEM_RASTGEOM DRM_BO_MEM_PRIV4
+#define DRM_PSB_FLAG_MEM_RASTGEOM DRM_BO_FLAG_MEM_PRIV4
+#define PSB_MEM_RASTGEOM_START   0x30000000
+
+typedef int32_t psb_fixed;
+typedef uint32_t psb_ufixed;
+
+static inline psb_fixed psb_int_to_fixed(int a)
+{
+       return a * (1 << PSB_FIXED_SHIFT);
+}
+
+static inline psb_ufixed psb_unsigned_to_ufixed(unsigned int a)
+{
+       return a << PSB_FIXED_SHIFT;
+}
+
+/*Status of the command sent to the gfx device.*/
+typedef enum {
+       DRM_CMD_SUCCESS,
+       DRM_CMD_FAILED,
+       DRM_CMD_HANG
+} drm_cmd_status_t;
+
+struct drm_psb_scanout {
+       uint32_t buffer_id;     /* DRM buffer object ID */
+       uint32_t rotation;      /* Rotation as in RR_rotation definitions */
+       uint32_t stride;        /* Buffer stride in bytes */
+       uint32_t depth;         /* Buffer depth in bits (NOT) bpp */
+       uint32_t width;         /* Buffer width in pixels */
+       uint32_t height;        /* Buffer height in lines */
+       psb_fixed transform[3][3];      /* Buffer composite transform */
+       /* (scaling, rot, reflect) */
+};
+
+#define DRM_PSB_SAREA_OWNERS 16
+#define DRM_PSB_SAREA_OWNER_2D 0
+#define DRM_PSB_SAREA_OWNER_3D 1
+
+#define DRM_PSB_SAREA_SCANOUTS 3
+
+struct drm_psb_sarea {
+       /* Track changes of this data structure */
+
+       uint32_t major;
+       uint32_t minor;
+
+       /* Last context to touch part of hw */
+       uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
+
+       /* Definition of front- and rotated buffers */
+       uint32_t num_scanouts;
+       struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
+
+       int planeA_x;
+       int planeA_y;
+       int planeA_w;
+       int planeA_h;
+       int planeB_x;
+       int planeB_y;
+       int planeB_w;
+       int planeB_h;
+       uint32_t msvdx_state;
+       uint32_t msvdx_context;
+};
+
+#define PSB_RELOC_MAGIC         0x67676767
+#define PSB_RELOC_SHIFT_MASK    0x0000FFFF
+#define PSB_RELOC_SHIFT_SHIFT   0
+#define PSB_RELOC_ALSHIFT_MASK  0xFFFF0000
+#define PSB_RELOC_ALSHIFT_SHIFT 16
+
+#define PSB_RELOC_OP_OFFSET     0      /* Offset of the indicated
+                                        * buffer
+                                        */
+#define PSB_RELOC_OP_2D_OFFSET  1      /* Offset of the indicated
+                                        *  buffer, relative to 2D
+                                        *  base address
+                                        */
+#define PSB_RELOC_OP_PDS_OFFSET 2      /* Offset of the indicated buffer,
+                                        *  relative to PDS base address
+                                        */
+#define PSB_RELOC_OP_STRIDE     3      /* Stride of the indicated
+                                        * buffer (for tiling)
+                                        */
+#define PSB_RELOC_OP_USE_OFFSET 4      /* Offset of USE buffer
+                                        * relative to base reg
+                                        */
+#define PSB_RELOC_OP_USE_REG    5      /* Base reg of USE buffer */
+
+struct drm_psb_reloc {
+       uint32_t reloc_op;
+       uint32_t where;         /* offset in destination buffer */
+       uint32_t buffer;        /* Buffer reloc applies to */
+       uint32_t mask;          /* Destination format: */
+       uint32_t shift;         /* Destination format: */
+       uint32_t pre_add;       /* Destination format: */
+       uint32_t background;    /* Destination add */
+       uint32_t dst_buffer;    /* Destination buffer. Index into buffer_list */
+       uint32_t arg0;          /* Reloc-op dependant */
+       uint32_t arg1;
+};
+
+#define PSB_BO_FLAG_TA              (1ULL << 48)
+#define PSB_BO_FLAG_SCENE           (1ULL << 49)
+#define PSB_BO_FLAG_FEEDBACK        (1ULL << 50)
+#define PSB_BO_FLAG_USSE            (1ULL << 51)
+
+#define PSB_ENGINE_2D 0
+#define PSB_ENGINE_VIDEO 1
+#define PSB_ENGINE_RASTERIZER 2
+#define PSB_ENGINE_TA 3
+#define PSB_ENGINE_HPRAST 4
+
+/*
+ * For this fence class we have a couple of
+ * fence types.
+ */
+
+#define _PSB_FENCE_EXE_SHIFT           0
+#define _PSB_FENCE_TA_DONE_SHIFT       1
+#define _PSB_FENCE_RASTER_DONE_SHIFT   2
+#define _PSB_FENCE_SCENE_DONE_SHIFT    3
+#define _PSB_FENCE_FEEDBACK_SHIFT      4
+
+#define _PSB_ENGINE_TA_FENCE_TYPES   5
+#define _PSB_FENCE_TYPE_TA_DONE     (1 << _PSB_FENCE_TA_DONE_SHIFT)
+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
+#define _PSB_FENCE_TYPE_SCENE_DONE  (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
+#define _PSB_FENCE_TYPE_FEEDBACK    (1 << _PSB_FENCE_FEEDBACK_SHIFT)
+
+#define PSB_ENGINE_HPRAST 4
+#define PSB_NUM_ENGINES 5
+
+#define PSB_TA_FLAG_FIRSTPASS    (1 << 0)
+#define PSB_TA_FLAG_LASTPASS     (1 << 1)
+
+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
+
+/* to eliminate video playback tearing */
+#define PSB_DETEAR
+#ifdef  PSB_DETEAR
+#define PSB_VIDEO_BLIT                  0x0001
+#define PSB_DELAYED_2D_BLIT             0x0002
+typedef struct video_info
+{
+       uint32_t flag;
+       uint32_t x, y, w, h;
+       uint32_t pFBBOHandle;
+       void * pFBVirtAddr;
+} video_info;
+#endif /* PSB_DETEAR */
+
+struct drm_psb_scene {
+       int handle_valid;
+       uint32_t handle;
+       uint32_t w;
+       uint32_t h;
+       uint32_t num_buffers;
+};
+
+struct drm_psb_hw_info
+{
+        uint32_t rev_id;
+        uint32_t caps;
+};
+
+
+typedef struct drm_psb_cmdbuf_arg {
+       uint64_t buffer_list;   /* List of buffers to validate */
+       uint64_t clip_rects;    /* See i915 counterpart */
+       uint64_t scene_arg;
+       uint64_t fence_arg;
+
+       uint32_t ta_flags;
+
+       uint32_t ta_handle;     /* TA reg-value pairs */
+       uint32_t ta_offset;
+       uint32_t ta_size;
+
+       uint32_t oom_handle;
+       uint32_t oom_offset;
+       uint32_t oom_size;
+
+       uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
+       uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
+       uint32_t cmdbuf_size;
+
+       uint32_t reloc_handle;  /* Reloc buffer object */
+       uint32_t reloc_offset;
+       uint32_t num_relocs;
+
+       int32_t damage;         /* Damage front buffer with cliprects */
+       /* Not implemented yet */
+       uint32_t fence_flags;
+       uint32_t engine;
+
+       /*
+        * Feedback;
+        */
+
+       uint32_t feedback_ops;
+       uint32_t feedback_handle;
+       uint32_t feedback_offset;
+       uint32_t feedback_breakpoints;
+       uint32_t feedback_size;
+
+#ifdef PSB_DETEAR
+       video_info sVideoInfo;
+#endif
+} drm_psb_cmdbuf_arg_t;
+
+struct drm_psb_xhw_init_arg {
+       uint32_t operation;
+       uint32_t buffer_handle;
+#ifdef PSB_DETEAR
+       uint32_t tmpBOHandle;
+       void *fbPhys;
+       uint32_t fbSize;
+#endif
+};
+
+/*
+ * Feedback components:
+ */
+
+/*
+ * Vistest component. The number of these in the feedback buffer
+ * equals the number of vistest breakpoints + 1.
+ * This is currently the only feedback component.
+ */
+
+struct drm_psb_vistest {
+       uint32_t vt[8];
+};
+
+#define PSB_HW_COOKIE_SIZE 16
+#define PSB_HW_FEEDBACK_SIZE 8
+#define PSB_HW_OOM_CMD_SIZE 6
+
+struct drm_psb_xhw_arg {
+       uint32_t op;
+       int ret;
+       uint32_t irq_op;
+       uint32_t issue_irq;
+       uint32_t cookie[PSB_HW_COOKIE_SIZE];
+       union {
+               struct {
+                       uint32_t w;
+                       uint32_t h;
+                       uint32_t size;
+                       uint32_t clear_p_start;
+                       uint32_t clear_num_pages;
+               } si;
+               struct {
+                       uint32_t fire_flags;
+                       uint32_t hw_context;
+                       uint32_t offset;
+                       uint32_t engine;
+                       uint32_t flags;
+                       uint32_t rca;
+                       uint32_t num_oom_cmds;
+                       uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
+               } sb;
+               struct {
+                       uint32_t pages;
+                       uint32_t size;
+               } bi;
+               struct {
+                       uint32_t bca;
+                       uint32_t rca;
+                       uint32_t flags;
+               } oom;
+               struct {
+                       uint32_t pt_offset;
+                       uint32_t param_offset;
+                       uint32_t flags;
+               } bl;
+               struct {
+                       uint32_t value;
+               } cl;
+               uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
+       } arg;
+};
+
+#define DRM_PSB_CMDBUF          0x00
+#define DRM_PSB_XHW_INIT        0x01
+#define DRM_PSB_XHW             0x02
+#define DRM_PSB_SCENE_UNREF     0x03
+/* Controlling the kernel modesetting buffers */
+#define DRM_PSB_KMS_OFF                0x04
+#define DRM_PSB_KMS_ON         0x05
+#define DRM_PSB_HW_INFO         0x06
+
+#define PSB_XHW_INIT            0x00
+#define PSB_XHW_TAKEDOWN        0x01
+
+#define PSB_XHW_FIRE_RASTER     0x00
+#define PSB_XHW_SCENE_INFO      0x01
+#define PSB_XHW_SCENE_BIND_FIRE 0x02
+#define PSB_XHW_TA_MEM_INFO     0x03
+#define PSB_XHW_RESET_DPM       0x04
+#define PSB_XHW_OOM             0x05
+#define PSB_XHW_TERMINATE       0x06
+#define PSB_XHW_VISTEST         0x07
+#define PSB_XHW_RESUME          0x08
+#define PSB_XHW_TA_MEM_LOAD    0x09
+#define PSB_XHW_CHECK_LOCKUP    0x0a
+#define PSB_XHW_HOTPLUG         0x0b
+
+#define PSB_SCENE_FLAG_DIRTY       (1 << 0)
+#define PSB_SCENE_FLAG_COMPLETE    (1 << 1)
+#define PSB_SCENE_FLAG_SETUP       (1 << 2)
+#define PSB_SCENE_FLAG_SETUP_ONLY  (1 << 3)
+#define PSB_SCENE_FLAG_CLEARED     (1 << 4)
+
+#define PSB_TA_MEM_FLAG_TA            (1 << 0)
+#define PSB_TA_MEM_FLAG_RASTER        (1 << 1)
+#define PSB_TA_MEM_FLAG_HOSTA         (1 << 2)
+#define PSB_TA_MEM_FLAG_HOSTD         (1 << 3)
+#define PSB_TA_MEM_FLAG_INIT          (1 << 4)
+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
+
+/*Raster fire will deallocate memory */
+#define PSB_FIRE_FLAG_RASTER_DEALLOC  (1 << 0)
+/*Isp reset needed due to change in ZLS format */
+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
+/*These are set by Xpsb. */
+#define PSB_FIRE_FLAG_XHW_MASK        0xff000000
+/*The task has had at least one OOM and Xpsb will
+  send back messages on each fire. */
+#define PSB_FIRE_FLAG_XHW_OOM         (1 << 24)
+
+#define PSB_SCENE_ENGINE_TA    0
+#define PSB_SCENE_ENGINE_RASTER    1
+#define PSB_SCENE_NUM_ENGINES      2
+
+struct drm_psb_dev_info_arg {
+       uint32_t num_use_attribute_registers;
+};
+#define DRM_PSB_DEVINFO         0x01
+
+#endif
diff --git a/psb-kernel-source-4.41.1/psb_drv.c b/psb-kernel-source-4.41.1/psb_drv.c
new file mode 100644 (file)
index 0000000..2a2e8d4
--- /dev/null
@@ -0,0 +1,1030 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "i915_reg.h"
+#include "psb_msvdx.h"
+#include "drm_pciids.h"
+#include "psb_scene.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/fb.h>
+
+int drm_psb_debug = 0;
+EXPORT_SYMBOL(drm_psb_debug);
+static int drm_psb_trap_pagefaults = 0;
+static int drm_psb_clock_gating = 0;
+static int drm_psb_ta_mem_size = 32 * 1024;
+int drm_psb_disable_vsync = 0;
+int drm_psb_detear = 0;
+int drm_psb_no_fb = 0;
+int drm_psb_force_pipeb = 0;
+char* psb_init_mode;
+int psb_init_xres;
+int psb_init_yres;
+/*
+ *
+ */
+#define SII_1392_WA
+#ifdef SII_1392_WA
+extern int SII_1392;
+#endif
+
+MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(clock_gating, "clock gating");
+MODULE_PARM_DESC(no_fb, "Disable FBdev");
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
+MODULE_PARM_DESC(detear, "eliminate video playback tearing");
+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
+MODULE_PARM_DESC(mode, "initial mode name");
+MODULE_PARM_DESC(xres, "initial mode width");
+MODULE_PARM_DESC(yres, "initial mode height");
+
+module_param_named(debug, drm_psb_debug, int, 0600);
+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
+module_param_named(detear, drm_psb_detear, int, 0600);
+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
+module_param_named(mode, psb_init_mode, charp, 0600);
+module_param_named(xres, psb_init_xres, int, 0600);
+module_param_named(yres, psb_init_yres, int, 0600);
+
+static struct pci_device_id pciidlist[] = {
+       psb_PCI_IDS
+};
+
+#define DRM_PSB_CMDBUF_IOCTL    DRM_IOW(DRM_PSB_CMDBUF, \
+                                       struct drm_psb_cmdbuf_arg)
+#define DRM_PSB_XHW_INIT_IOCTL  DRM_IOR(DRM_PSB_XHW_INIT, \
+                                       struct drm_psb_xhw_init_arg)
+#define DRM_PSB_XHW_IOCTL       DRM_IO(DRM_PSB_XHW)
+
+#define DRM_PSB_SCENE_UNREF_IOCTL DRM_IOWR(DRM_PSB_SCENE_UNREF, \
+                                          struct drm_psb_scene)
+#define DRM_PSB_HW_INFO_IOCTL DRM_IOR(DRM_PSB_HW_INFO, \
+                                           struct drm_psb_hw_info)
+
+#define DRM_PSB_KMS_OFF_IOCTL  DRM_IO(DRM_PSB_KMS_OFF)
+#define DRM_PSB_KMS_ON_IOCTL   DRM_IO(DRM_PSB_KMS_ON)
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+       DRM_IOCTL_DEF(DRM_PSB_CMDBUF_IOCTL, psb_cmdbuf_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_PSB_XHW_INIT_IOCTL, psb_xhw_init_ioctl,
+                     DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_PSB_XHW_IOCTL, psb_xhw_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_PSB_SCENE_UNREF_IOCTL, drm_psb_scene_unref_ioctl,
+                     DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_PSB_KMS_OFF_IOCTL, psbfb_kms_off_ioctl,
+                     DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_PSB_KMS_ON_IOCTL, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_PSB_HW_INFO_IOCTL, psb_hw_info_ioctl, DRM_AUTH),
+};
+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+#ifdef USE_PAT_WC
+#warning Init pat
+static int __cpuinit psb_cpu_callback(struct notifier_block *nfb,
+                           unsigned long action,
+                           void *hcpu)
+{
+       if (action == CPU_ONLINE)
+               drm_init_pat();
+
+       return 0;
+}
+
+static struct notifier_block __cpuinitdata psb_nb = {
+       .notifier_call = psb_cpu_callback,
+       .priority = 1
+};
+#endif
+
+static int dri_library_name(struct drm_device *dev, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "psb\n");
+}
+
+static void psb_set_uopt(struct drm_psb_uopt *uopt)
+{
+       uopt->clock_gating = drm_psb_clock_gating;
+}
+
+static void psb_lastclose(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+
+       if (!dev->dev_private)
+               return;
+
+       mutex_lock(&dev->struct_mutex);
+       if (dev_priv->ta_mem)
+               psb_ta_mem_unref_devlocked(&dev_priv->ta_mem);
+       mutex_unlock(&dev->struct_mutex);
+       mutex_lock(&dev_priv->cmdbuf_mutex);
+       if (dev_priv->buffers) {
+               vfree(dev_priv->buffers);
+               dev_priv->buffers = NULL;
+       }
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
+
+static void psb_do_takedown(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+
+       mutex_lock(&dev->struct_mutex);
+       if (dev->bm.initialized) {
+               if (dev_priv->have_mem_rastgeom) {
+                       drm_bo_clean_mm(dev, DRM_PSB_MEM_RASTGEOM);
+                       dev_priv->have_mem_rastgeom = 0;
+               }
+               if (dev_priv->have_mem_mmu) {
+                       drm_bo_clean_mm(dev, DRM_PSB_MEM_MMU);
+                       dev_priv->have_mem_mmu = 0;
+               }
+               if (dev_priv->have_mem_aper) {
+                       drm_bo_clean_mm(dev, DRM_PSB_MEM_APER);
+                       dev_priv->have_mem_aper = 0;
+               }
+               if (dev_priv->have_tt) {
+                       drm_bo_clean_mm(dev, DRM_BO_MEM_TT);
+                       dev_priv->have_tt = 0;
+               }
+               if (dev_priv->have_vram) {
+                       drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM);
+                       dev_priv->have_vram = 0;
+               }
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       if (dev_priv->has_msvdx)
+               psb_msvdx_uninit(dev);
+
+       if (dev_priv->comm) {
+               kunmap(dev_priv->comm_page);
+               dev_priv->comm = NULL;
+       }
+       if (dev_priv->comm_page) {
+               __free_page(dev_priv->comm_page);
+               dev_priv->comm_page = NULL;
+       }
+}
+
+void psb_clockgating(struct drm_psb_private *dev_priv)
+{
+       uint32_t clock_gating;
+
+       if (dev_priv->uopt.clock_gating == 1) {
+               PSB_DEBUG_INIT("Disabling clock gating.\n");
+
+               clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
+                               _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
+                    _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
+                    _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
+                    _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
+                    _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
+                    _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
+
+       } else if (dev_priv->uopt.clock_gating == 2) {
+               PSB_DEBUG_INIT("Enabling clock gating.\n");
+
+               clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
+                               _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_AUTO <<
+                    _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_AUTO <<
+                    _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_AUTO <<
+                    _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_AUTO <<
+                    _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
+                   (_PSB_C_CLKGATECTL_CLKG_AUTO <<
+                    _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
+       } else
+               clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+
+#ifdef FIX_TG_2D_CLOCKGATE
+       clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
+       clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
+                        _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
+#endif
+       PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
+       (void)PSB_RSGX32(PSB_CR_CLKGATECTL);
+}
+
+static int psb_do_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct psb_gtt *pg = dev_priv->pg;
+
+       uint32_t stolen_gtt;
+       uint32_t tt_start;
+       uint32_t tt_pages;
+
+       int ret = -ENOMEM;
+
+       DRM_ERROR("Debug is 0x%08x\n", drm_psb_debug);
+
+       dev_priv->ta_mem_pages =
+           PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, PAGE_SIZE) >> PAGE_SHIFT;
+       dev_priv->comm_page = alloc_page(GFP_KERNEL);
+       if (!dev_priv->comm_page)
+               goto out_err;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
+       change_page_attr(dev_priv->comm_page, 1, PAGE_KERNEL_NOCACHE);
+#else
+       map_page_into_agp(dev_priv->comm_page);
+#endif
+
+       dev_priv->comm = kmap(dev_priv->comm_page);
+       memset((void *)dev_priv->comm, 0, PAGE_SIZE);
+
+       dev_priv->has_msvdx = 1;
+       if (psb_msvdx_init(dev))
+               dev_priv->has_msvdx = 0;
+
+       /*
+        * Initialize sequence numbers for the different command
+        * submission mechanisms.
+        */
+
+       dev_priv->sequence[PSB_ENGINE_2D] = 0;
+       dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
+       dev_priv->sequence[PSB_ENGINE_TA] = 0;
+       dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
+
+       if (pg->gatt_start & 0x0FFFFFFF) {
+               DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+       stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+       dev_priv->gatt_free_offset = pg->gatt_start +
+           (stolen_gtt << PAGE_SHIFT) * 1024;
+
+       /*
+        * Insert a cache-coherent communications page in mmu space
+        * just after the stolen area. Will be used for fencing etc.
+        */
+
+       dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
+       dev_priv->gatt_free_offset += PAGE_SIZE;
+
+       ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
+                                  &dev_priv->comm_page,
+                                  dev_priv->comm_mmu_offset, 1, 0, 0, 0);
+
+       if (ret)
+               goto out_err;
+
+       if (1 || drm_debug) {
+               uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
+               uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
+               DRM_INFO("SGX core id = 0x%08x\n", core_id);
+               DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
+                        (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
+                        _PSB_CC_REVISION_MAJOR_SHIFT,
+                        (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
+                        _PSB_CC_REVISION_MINOR_SHIFT);
+               DRM_INFO
+                   ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
+                    (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
+                    _PSB_CC_REVISION_MAINTENANCE_SHIFT,
+                    (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
+                    _PSB_CC_REVISION_DESIGNER_SHIFT);
+       }
+
+       dev_priv->irqmask_lock = SPIN_LOCK_UNLOCKED;
+       dev_priv->fence0_irq_on = 0;
+
+       tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+           pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
+       tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
+       tt_pages -= tt_start >> PAGE_SHIFT;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (!drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0,
+                           pg->stolen_size >> PAGE_SHIFT)) {
+               dev_priv->have_vram = 1;
+       }
+
+       if (!drm_bo_init_mm(dev, DRM_BO_MEM_TT, tt_start >> PAGE_SHIFT,
+                           tt_pages)) {
+               dev_priv->have_tt = 1;
+       }
+
+       if (!drm_bo_init_mm(dev, DRM_PSB_MEM_MMU, 0x00000000,
+                           (pg->gatt_start -
+                            PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
+               dev_priv->have_mem_mmu = 1;
+       }
+
+       if (!drm_bo_init_mm(dev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
+                           (PSB_MEM_MMU_START -
+                            PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
+               dev_priv->have_mem_rastgeom = 1;
+       }
+#if 0
+       if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
+               if (!drm_bo_init_mm(dev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
+                                   pg->gatt_pages - PSB_TT_PRIV0_PLIMIT)) {
+                       dev_priv->have_mem_aper = 1;
+               }
+       }
+#endif
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+      out_err:
+       psb_do_takedown(dev);
+       return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+
+#ifdef USE_PAT_WC
+#warning Init pat
+//     if (num_present_cpus() > 1)
+       unregister_cpu_notifier(&psb_nb);
+#endif
+
+       intel_modeset_cleanup(dev);
+
+       if (dev_priv) {
+               psb_watchdog_takedown(dev_priv);
+               psb_do_takedown(dev);
+               psb_xhw_takedown(dev_priv);
+               psb_scheduler_takedown(&dev_priv->scheduler);
+
+               mutex_lock(&dev->struct_mutex);
+               if (dev_priv->have_mem_pds) {
+                       drm_bo_clean_mm(dev, DRM_PSB_MEM_PDS);
+                       dev_priv->have_mem_pds = 0;
+               }
+               if (dev_priv->have_mem_kernel) {
+                       drm_bo_clean_mm(dev, DRM_PSB_MEM_KERNEL);
+                       dev_priv->have_mem_kernel = 0;
+               }
+               mutex_unlock(&dev->struct_mutex);
+
+               (void)drm_bo_driver_finish(dev);
+
+               if (dev_priv->pf_pd) {
+                       psb_mmu_free_pagedir(dev_priv->pf_pd);
+                       dev_priv->pf_pd = NULL;
+               }
+               if (dev_priv->mmu) {
+                       struct psb_gtt *pg = dev_priv->pg;
+
+                       down_read(&pg->sem);
+                       psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
+                                                   (dev_priv->mmu),
+                                                   pg->gatt_start,
+                                                   pg->
+                                                   stolen_size >> PAGE_SHIFT);
+                       up_read(&pg->sem);
+                       psb_mmu_driver_takedown(dev_priv->mmu);
+                       dev_priv->mmu = NULL;
+               }
+               psb_gtt_takedown(dev_priv->pg, 1);
+               if (dev_priv->scratch_page) {
+                       __free_page(dev_priv->scratch_page);
+                       dev_priv->scratch_page = NULL;
+               }
+               psb_takedown_use_base(dev_priv);
+               if (dev_priv->vdc_reg) {
+                       iounmap(dev_priv->vdc_reg);
+                       dev_priv->vdc_reg = NULL;
+               }
+               if (dev_priv->sgx_reg) {
+                       iounmap(dev_priv->sgx_reg);
+                       dev_priv->sgx_reg = NULL;
+               }
+               if (dev_priv->msvdx_reg) {
+                       iounmap(dev_priv->msvdx_reg);
+                       dev_priv->msvdx_reg = NULL;
+               }
+
+               drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+               dev->dev_private = NULL;
+       }
+       return 0;
+}
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+extern char drm_init_mode[32];
+extern int drm_init_xres;
+extern int drm_init_yres;
+
+static int psb_initial_config(struct drm_device *dev, bool can_grow)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_output *output;
+       struct drm_crtc *crtc;
+       int ret = false;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       drm_crtc_probe_output_modes(dev, 2048, 2048);
+       
+       /* strncpy(drm_init_mode, psb_init_mode, strlen(psb_init_mode)); */
+       drm_init_xres = psb_init_xres;
+       drm_init_yres = psb_init_yres;
+       printk(KERN_ERR "detear is %sabled\n", drm_psb_detear ? "en" : "dis" );
+
+       drm_pick_crtcs(dev);
+
+       if ((I915_READ(PIPEACONF) & PIPEACONF_ENABLE) && !drm_psb_force_pipeb)
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (!crtc->desired_mode)
+                       continue;
+
+               dev->driver->fb_probe(dev, crtc);
+       } else
+               list_for_each_entry_reverse(crtc, &dev->mode_config.crtc_list,
+                                           head) {
+               if (!crtc->desired_mode)
+                       continue;
+
+               dev->driver->fb_probe(dev, crtc);
+               }
+
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+
+               if (!output->crtc || !output->crtc->desired_mode)
+                       continue;
+
+               if (output->crtc->fb)
+                       drm_crtc_set_mode(output->crtc,
+                                         output->crtc->desired_mode, 0, 0);
+       }
+
+#ifdef SII_1392_WA
+       if((SII_1392 != 1) || (drm_psb_no_fb==0))
+               drm_disable_unused_functions(dev);
+#else
+       drm_disable_unused_functions(dev);
+#endif
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return ret;
+
+}
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+       struct drm_psb_private *dev_priv;
+       unsigned long resource_start;
+       struct psb_gtt *pg;
+       int ret = -ENOMEM;
+
+       DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
+       dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
+       if (dev_priv == NULL)
+               return -ENOMEM;
+
+       mutex_init(&dev_priv->temp_mem);
+       mutex_init(&dev_priv->cmdbuf_mutex);
+       mutex_init(&dev_priv->reset_mutex);
+       psb_init_disallowed();
+
+       atomic_set(&dev_priv->msvdx_mmu_invaldc, 0);
+
+#ifdef FIX_TG_16
+       atomic_set(&dev_priv->lock_2d, 0);
+       atomic_set(&dev_priv->ta_wait_2d, 0);
+       atomic_set(&dev_priv->ta_wait_2d_irq, 0);
+       atomic_set(&dev_priv->waiters_2d, 0);;
+       DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
+#else
+       mutex_init(&dev_priv->mutex_2d);
+#endif
+
+       spin_lock_init(&dev_priv->reloc_lock);
+
+       DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
+       DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
+
+       dev->dev_private = (void *)dev_priv;
+       dev_priv->chipset = chipset;
+       psb_set_uopt(&dev_priv->uopt);
+
+       psb_watchdog_init(dev_priv);
+       psb_scheduler_init(dev, &dev_priv->scheduler);
+
+       resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+       
+       dev_priv->msvdx_reg =
+           ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
+       if (!dev_priv->msvdx_reg)
+               goto out_err;
+
+       dev_priv->vdc_reg =
+           ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+       if (!dev_priv->vdc_reg)
+               goto out_err;
+
+       dev_priv->sgx_reg =
+           ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
+       if (!dev_priv->sgx_reg)
+               goto out_err;
+
+       psb_clockgating(dev_priv);
+       if (psb_init_use_base(dev_priv, 3, 13))
+               goto out_err;
+
+       dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+       if (!dev_priv->scratch_page)
+               goto out_err;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
+       change_page_attr(dev_priv->scratch_page, 1, PAGE_KERNEL_NOCACHE);
+#else
+       map_page_into_agp(dev_priv->scratch_page);
+#endif
+
+       dev_priv->pg = psb_gtt_alloc(dev);
+       if (!dev_priv->pg)
+               goto out_err;
+
+       ret = psb_gtt_init(dev_priv->pg, 0);
+       if (ret)
+               goto out_err;
+
+       dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
+                                           drm_psb_trap_pagefaults, 0,
+                                           &dev_priv->msvdx_mmu_invaldc);
+       if (!dev_priv->mmu)
+               goto out_err;
+
+       pg = dev_priv->pg;
+
+       /*
+        * Make sgx MMU aware of the stolen memory area we call VRAM.
+        */
+
+       down_read(&pg->sem);
+       ret =
+           psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
+                                       pg->stolen_base >> PAGE_SHIFT,
+                                       pg->gatt_start,
+                                       pg->stolen_size >> PAGE_SHIFT, 0);
+       up_read(&pg->sem);
+       if (ret)
+               goto out_err;
+
+       dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+       if (!dev_priv->pf_pd)
+               goto out_err;
+
+       /*
+        * Make all presumably unused requestors page-fault by making them
+        * use context 1 which does not have any valid mappings.
+        */
+
+       PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+       PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+       PSB_RSGX32(PSB_CR_BIF_BANK1);
+
+       psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+       psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+       psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
+
+       psb_init_2d(dev_priv);
+
+       ret = drm_bo_driver_init(dev);
+       if (ret)
+               goto out_err;
+
+       ret = drm_bo_init_mm(dev, DRM_PSB_MEM_KERNEL, 0x00000000,
+                            (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
+                            >> PAGE_SHIFT);
+       if (ret)
+               goto out_err;
+       dev_priv->have_mem_kernel = 1;
+
+       ret = drm_bo_init_mm(dev, DRM_PSB_MEM_PDS, 0x00000000,
+                            (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
+                            >> PAGE_SHIFT);
+       if (ret)
+               goto out_err;
+       dev_priv->have_mem_pds = 1;
+
+       ret = psb_do_init(dev);
+       if (ret)
+               return ret;
+
+       ret = psb_xhw_init(dev);
+       if (ret)
+               return ret;
+
+       PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
+       PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
+
+       intel_modeset_init(dev);
+       psb_initial_config(dev, false);
+
+#ifdef USE_PAT_WC
+#warning Init pat
+//     if (num_present_cpus() > 1)
+       register_cpu_notifier(&psb_nb);
+#endif
+
+       return 0;
+      out_err:
+       psb_driver_unload(dev);
+       return ret;
+}
+
+int psb_driver_device_is_agp(struct drm_device *dev)
+{
+       return 0;
+}
+
+static int psb_prepare_msvdx_suspend(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[PSB_ENGINE_VIDEO];
+       struct drm_fence_object *fence;
+       int ret = 0;
+       int signaled = 0;
+       int count = 0;
+       unsigned long _end = jiffies + 3 * DRM_HZ;
+
+       PSB_DEBUG_GENERAL("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
+
+       /*set the msvdx-reset flag here.. */
+       dev_priv->msvdx_needs_reset = 1;
+
+       /*Ensure that all pending IRQs are serviced, */
+       list_for_each_entry(fence, &fc->ring, ring) {
+               count++;
+               do {
+                       DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
+                                   (signaled =
+                                    drm_fence_object_signaled(fence,
+                                                              DRM_FENCE_TYPE_EXE)));
+                       if (signaled)
+                               break;
+                       if (time_after_eq(jiffies, _end))
+                               PSB_DEBUG_GENERAL
+                                   ("MSVDXACPI: fence 0x%x didn't get signaled for 3 secs; we will suspend anyways\n",
+                                    (unsigned int)fence);
+               } while (ret == -EINTR);
+
+       }
+       
+       /* Issue software reset */
+        PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
+
+        ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0,
+                           MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
+
+       PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
+                         count);
+       return 0;
+}
+
+static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+        struct drm_output *output;
+
+       //if (drm_psb_no_fb == 0)
+       //      psbfb_suspend(dev);
+#ifdef WA_NO_FB_GARBAGE_DISPLAY
+       //else {
+       if (drm_psb_no_fb != 0) {
+               if(num_registered_fb)
+               {
+                       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+                               if(output->crtc != NULL)
+                                       intel_crtc_mode_save(output->crtc);
+                               //if(output->funcs->save)
+                               //      output->funcs->save(output);
+                       }
+               }
+       }
+#endif
+
+       dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
+       (void)psb_idle_3d(dev);
+       (void)psb_idle_2d(dev);
+       flush_scheduled_work();
+
+       psb_takedown_use_base(dev_priv);
+
+       if (dev_priv->has_msvdx)
+               psb_prepare_msvdx_suspend(dev);
+
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+static int psb_resume(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct psb_gtt *pg = dev_priv->pg;
+        struct drm_output *output;
+       int ret;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
+
+#ifdef USE_PAT_WC
+#warning Init pat
+       /* for single CPU's we do it here, then for more than one CPU we
+        * use the CPU notifier to reinit PAT on those CPU's. 
+        */
+//     if (num_present_cpus() == 1)
+       drm_init_pat();
+#endif
+
+       INIT_LIST_HEAD(&dev_priv->resume_buf.head);
+       dev_priv->msvdx_needs_reset = 1;
+
+       PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+       pci_write_config_word(pdev, PSB_GMCH_CTRL,
+                             pg->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+       /*
+        * The GTT page tables are probably not saved.
+        * However, TT and VRAM is empty at this point.
+        */
+
+       psb_gtt_init(dev_priv->pg, 1);
+
+       /*
+        * The SGX loses it's register contents.
+        * Restore BIF registers. The MMU page tables are
+        * "normal" pages, so their contents should be kept.
+        */
+
+       PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
+       PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+       PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+       PSB_RSGX32(PSB_CR_BIF_BANK1);
+
+       psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+       psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+       psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
+
+       /*
+        * 2D Base registers..
+        */
+       psb_init_2d(dev_priv);
+
+       if (drm_psb_no_fb == 0) {
+            list_for_each_entry(output, &dev->mode_config.output_list, head) {
+                if(output->crtc != NULL)
+                    drm_crtc_set_mode(output->crtc, &output->crtc->mode,
+                              output->crtc->x, output->crtc->y);
+            }
+        }
+
+       /*
+        * Persistant 3D base registers and USSE base registers..
+        */
+
+       PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
+       PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
+       psb_init_use_base(dev_priv, 3, 13);
+
+       /*
+        * Now, re-initialize the 3D engine.
+        */
+
+       psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
+
+       psb_scheduler_ta_mem_check(dev_priv);
+       if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
+               psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
+                                   PSB_TA_MEM_FLAG_TA |
+                                   PSB_TA_MEM_FLAG_RASTER |
+                                   PSB_TA_MEM_FLAG_HOSTA |
+                                   PSB_TA_MEM_FLAG_HOSTD |
+                                   PSB_TA_MEM_FLAG_INIT,
+                                   dev_priv->ta_mem->ta_memory->offset,
+                                   dev_priv->ta_mem->hw_data->offset,
+                                   dev_priv->ta_mem->hw_cookie);
+       }
+
+       //if (drm_psb_no_fb == 0)
+       //      psbfb_resume(dev);
+#ifdef WA_NO_FB_GARBAGE_DISPLAY
+       //else {
+       if (drm_psb_no_fb != 0) {
+               if(num_registered_fb)
+               {
+                       struct fb_info *fb_info=registered_fb[0];
+                       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+                               if(output->crtc != NULL)
+                                       intel_crtc_mode_restore(output->crtc);
+                       }
+                       if(fb_info)
+                       {
+                               fb_set_suspend(fb_info, 0);
+                               printk("set the fb_set_suspend resume end\n");
+                       }
+               }
+       } 
+#endif
+
+       return 0;
+}
+
+/* always available as we are SIGIO'd */
+static unsigned int psb_poll(struct file *filp, struct poll_table_struct *wait)
+{
+       return (POLLIN | POLLRDNORM);
+}
+
+static int psb_release(struct inode *inode, struct file *filp)
+{
+       struct drm_file *file_priv = (struct drm_file *)filp->private_data;
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+
+       if (dev_priv && dev_priv->xhw_file) {
+               psb_xhw_init_takedown(dev_priv, file_priv, 1);
+       }
+       return drm_release(inode, filp);
+}
+
+extern struct drm_fence_driver psb_fence_driver;
+
+/*
+ * Use this memory type priority if no eviction is needed.
+ */
+static uint32_t psb_mem_prios[] = { DRM_BO_MEM_VRAM,
+       DRM_BO_MEM_TT,
+       DRM_PSB_MEM_KERNEL,
+       DRM_PSB_MEM_MMU,
+       DRM_PSB_MEM_RASTGEOM,
+       DRM_PSB_MEM_PDS,
+       DRM_PSB_MEM_APER,
+       DRM_BO_MEM_LOCAL
+};
+
+/*
+ * Use this memory type priority if need to evict.
+ */
+static uint32_t psb_busy_prios[] = { DRM_BO_MEM_TT,
+       DRM_BO_MEM_VRAM,
+       DRM_PSB_MEM_KERNEL,
+       DRM_PSB_MEM_MMU,
+       DRM_PSB_MEM_RASTGEOM,
+       DRM_PSB_MEM_PDS,
+       DRM_PSB_MEM_APER,
+       DRM_BO_MEM_LOCAL
+};
+
+static struct drm_bo_driver psb_bo_driver = {
+       .mem_type_prio = psb_mem_prios,
+       .mem_busy_prio = psb_busy_prios,
+       .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
+       .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
+       .create_ttm_backend_entry = drm_psb_tbe_init,
+       .fence_type = psb_fence_types,
+       .invalidate_caches = psb_invalidate_caches,
+       .init_mem_type = psb_init_mem_type,
+       .evict_mask = psb_evict_mask,
+       .move = psb_move,
+       .backend_size = psb_tbe_size,
+       .command_stream_barrier = NULL,
+};
+
+static struct drm_driver driver = {
+       .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+           DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
+       .load = psb_driver_load,
+       .unload = psb_driver_unload,
+       .dri_library_name = dri_library_name,
+       .get_reg_ofs = drm_core_get_reg_ofs,
+       .ioctls = psb_ioctls,
+       .device_is_agp = psb_driver_device_is_agp,
+       .vblank_wait = psb_vblank_wait2,
+       .vblank_wait2 = psb_vblank_wait2,
+       .irq_preinstall = psb_irq_preinstall,
+       .irq_postinstall = psb_irq_postinstall,
+       .irq_uninstall = psb_irq_uninstall,
+       .irq_handler = psb_irq_handler,
+       .fb_probe = psbfb_probe,
+       .fb_remove = psbfb_remove,
+       .firstopen = NULL,
+       .lastclose = psb_lastclose,
+       .fops = {
+                .owner = THIS_MODULE,
+                .open = drm_open,
+                .release = psb_release,
+                .ioctl = drm_ioctl,
+                .mmap = drm_mmap,
+                .poll = psb_poll,
+                .fasync = drm_fasync,
+                },
+       .pci_driver = {
+                      .name = DRIVER_NAME,
+                      .id_table = pciidlist,
+                      .probe = probe,
+                      .remove = __devexit_p(drm_cleanup_pci),
+                      .resume = psb_resume,
+                      .suspend = psb_suspend,
+                      },
+       .fence_driver = &psb_fence_driver,
+       .bo_driver = &psb_bo_driver,
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = PSB_DRM_DRIVER_DATE,
+       .major = PSB_DRM_DRIVER_MAJOR,
+       .minor = PSB_DRM_DRIVER_MINOR,
+       .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       return drm_get_dev(pdev, ent, &driver);
+}
+
+static int __init psb_init(void)
+{
+       driver.num_ioctls = psb_max_ioctl;
+
+       return drm_init(&driver, pciidlist);
+}
+
+static void __exit psb_exit(void)
+{
+       drm_exit(&driver);
+}
+
+module_init(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/psb-kernel-source-4.41.1/psb_drv.h b/psb-kernel-source-4.41.1/psb_drv.h
new file mode 100644 (file)
index 0000000..f2c2657
--- /dev/null
@@ -0,0 +1,819 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include "drmP.h"
+#include "psb_drm.h"
+#include "psb_reg.h"
+#include "psb_schedule.h"
+#include "intel_drv.h"
+
+#ifdef PSB_DETEAR
+
+#define MAX_BLIT_REQ_SIZE 16*4
+#define PSB_BLIT_QUEUE_LEN 100
+
+typedef struct delayed_2d_blit_req
+{
+       unsigned char BlitReqData[MAX_BLIT_REQ_SIZE];
+       int gnBlitCmdSize;      //always 40 bytes now!
+}delayed_2d_blit_req_t, *delayed_2d_blit_req_ptr;
+
+typedef struct psb_2d_blit_queue
+{
+       delayed_2d_blit_req_t sBlitReq[PSB_BLIT_QUEUE_LEN];
+       int nHead, nTail;
+       spinlock_t sLock;
+}psb_2d_blit_queue_t, *psb_2d_blit_queue_ptr;
+
+extern int psb_blit_queue_init(psb_2d_blit_queue_ptr q);
+extern int psb_blit_queue_is_empty(psb_2d_blit_queue_ptr q);
+extern int psb_blit_queue_is_full(psb_2d_blit_queue_ptr q);
+extern delayed_2d_blit_req_ptr psb_blit_queue_get_item(psb_2d_blit_queue_ptr q);
+extern int psb_blit_queue_put_item(psb_2d_blit_queue_ptr q, delayed_2d_blit_req_ptr elem);
+void psb_blit_queue_clear(psb_2d_blit_queue_ptr q);
+
+#endif /* PSB_DETEAR */
+
+enum {
+       CHIP_PSB_8108 = 0,
+       CHIP_PSB_8109 = 1
+};
+
+/*
+ * Hardware bugfixes
+ */
+
+#define FIX_TG_16
+#define FIX_TG_2D_CLOCKGATE
+
+#define DRIVER_NAME "psb"
+#define DRIVER_DESC "drm driver for the Intel GMA500"
+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
+
+#define PSB_DRM_DRIVER_DATE "20090416"
+#define PSB_DRM_DRIVER_MAJOR 4
+#define PSB_DRM_DRIVER_MINOR 41
+#define PSB_DRM_DRIVER_PATCHLEVEL 1
+
+#define PSB_VDC_OFFSET           0x00000000
+#define PSB_VDC_SIZE             0x000080000
+#define PSB_SGX_SIZE             0x8000
+#define PSB_SGX_OFFSET           0x00040000
+#define PSB_MMIO_RESOURCE        0
+#define PSB_GATT_RESOURCE        2
+#define PSB_GTT_RESOURCE         3
+#define PSB_GMCH_CTRL            0x52
+#define PSB_BSM                  0x5C
+#define _PSB_GMCH_ENABLED        0x4
+#define PSB_PGETBL_CTL           0x2020
+#define _PSB_PGETBL_ENABLED      0x00000001
+#define PSB_SGX_2D_SLAVE_PORT    0x4000
+#define PSB_TT_PRIV0_LIMIT       (256*1024*1024)
+#define PSB_TT_PRIV0_PLIMIT      (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+#define PSB_NUM_VALIDATE_BUFFERS 1024
+#define PSB_MEM_KERNEL_START     0x10000000
+#define PSB_MEM_PDS_START        0x20000000
+#define PSB_MEM_MMU_START        0x40000000
+
+#define DRM_PSB_MEM_KERNEL       DRM_BO_MEM_PRIV0
+#define DRM_PSB_FLAG_MEM_KERNEL  DRM_BO_FLAG_MEM_PRIV0
+
+/*
+ * Flags for external memory type field.
+ */
+
+#define PSB_MSVDX_OFFSET        0x50000        /*MSVDX Base offset */
+#define PSB_MSVDX_SIZE          0x8000 /*MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
+
+#define PSB_MMU_CACHED_MEMORY     0x0001       /* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY         0x0002       /* MMU RO memory */
+#define PSB_MMU_WO_MEMORY         0x0004       /* MMU WO memory */
+
+/*
+ * PTE's and PDE's
+ */
+
+#define PSB_PDE_MASK              0x003FFFFF
+#define PSB_PDE_SHIFT             22
+#define PSB_PTE_SHIFT             12
+
+#define PSB_PTE_VALID             0x0001       /* PTE / PDE valid */
+#define PSB_PTE_WO                0x0002       /* Write only */
+#define PSB_PTE_RO                0x0004       /* Read only */
+#define PSB_PTE_CACHED            0x0008       /* CPU cache coherent */
+
+/*
+ * VDC registers and bits
+ */
+#define PSB_HWSTAM                0x2098
+#define PSB_INSTPM                0x20C0
+#define PSB_INT_IDENTITY_R        0x20A4
+#define _PSB_VSYNC_PIPEB_FLAG     (1<<5)
+#define _PSB_VSYNC_PIPEA_FLAG     (1<<7)
+#define _PSB_HOTPLUG_INTERRUPT_FLAG (1<<17)
+#define _PSB_IRQ_SGX_FLAG         (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG       (1<<19)
+#define PSB_INT_MASK_R            0x20A8
+#define PSB_INT_ENABLE_R          0x20A0
+#define _PSB_HOTPLUG_INTERRUPT_ENABLE (1<<17)
+#define PSB_PIPEASTAT             0x70024
+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
+#define _PSB_VBLANK_CLEAR         (1 << 1)
+#define PSB_PIPEBSTAT             0x71024
+
+#define PORT_HOTPLUG_ENABLE_REG      0x61110
+#define SDVOB_HOTPLUG_DETECT_ENABLE  (1 << 26)
+#define PORT_HOTPLUG_STATUS_REG      0x61114
+#define SDVOB_HOTPLUG_STATUS_ISPLUG  (1 << 15)
+#define SDVOB_HOTPLUG_STATUS         (1 << 6)
+
+#define _PSB_MMU_ER_MASK      0x0001FF00
+#define _PSB_MMU_ER_HOST      (1 << 16)
+#define GPIOA                  0x5010
+#define GPIOB                  0x5014
+#define GPIOC                  0x5018
+#define GPIOD                  0x501c
+#define GPIOE                  0x5020
+#define GPIOF                  0x5024
+#define GPIOG                  0x5028
+#define GPIOH                  0x502c
+#define GPIO_CLOCK_DIR_MASK            (1 << 0)
+#define GPIO_CLOCK_DIR_IN              (0 << 1)
+#define GPIO_CLOCK_DIR_OUT             (1 << 1)
+#define GPIO_CLOCK_VAL_MASK            (1 << 2)
+#define GPIO_CLOCK_VAL_OUT             (1 << 3)
+#define GPIO_CLOCK_VAL_IN              (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE      (1 << 5)
+#define GPIO_DATA_DIR_MASK             (1 << 8)
+#define GPIO_DATA_DIR_IN               (0 << 9)
+#define GPIO_DATA_DIR_OUT              (1 << 9)
+#define GPIO_DATA_VAL_MASK             (1 << 10)
+#define GPIO_DATA_VAL_OUT              (1 << 11)
+#define GPIO_DATA_VAL_IN               (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE       (1 << 13)
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV       0x6010
+
+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
+#define I915_WRITE(_offs, _val) \
+  iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define I915_READ(_offs) \
+  ioread32(dev_priv->vdc_reg + (_offs))
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST               1
+#define PSB_UIRQ_OOM_REPLY             2
+#define PSB_UIRQ_FIRE_TA_REPLY         3
+#define PSB_UIRQ_FIRE_RASTER_REPLY     4
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
+
+/*
+ * User options.
+ */
+
+struct drm_psb_uopt {
+       int clock_gating;
+};
+
+struct psb_gtt {
+       struct drm_device *dev;
+       int initialized;
+       uint32_t gatt_start;
+       uint32_t gtt_start;
+       uint32_t gtt_phys_start;
+       unsigned gtt_pages;
+       unsigned gatt_pages;
+       uint32_t stolen_base;
+       uint32_t pge_ctl;
+       u16 gmch_ctrl;
+       unsigned long stolen_size;
+       uint32_t *gtt_map;
+       struct rw_semaphore sem;
+};
+
+struct psb_use_base {
+       struct list_head head;
+       struct drm_fence_object *fence;
+       unsigned int reg;
+       unsigned long offset;
+       unsigned int dm;
+};
+
+struct psb_buflist_item;
+
+struct psb_msvdx_cmd_queue {
+       struct list_head head;
+       void *cmd;
+       unsigned long cmd_size;
+       uint32_t sequence;
+};
+
+struct drm_psb_private {
+       unsigned long chipset;
+        uint8_t psb_rev_id;
+
+       struct psb_xhw_buf resume_buf;
+       struct drm_psb_dev_info_arg dev_info;
+       struct drm_psb_uopt uopt;
+
+       struct psb_gtt *pg;
+
+       struct page *scratch_page;
+       struct page *comm_page;
+
+       volatile uint32_t *comm;
+       uint32_t comm_mmu_offset;
+       uint32_t mmu_2d_offset;
+       uint32_t sequence[PSB_NUM_ENGINES];
+       uint32_t last_sequence[PSB_NUM_ENGINES];
+       int idle[PSB_NUM_ENGINES];
+       uint32_t last_submitted_seq[PSB_NUM_ENGINES];
+       int engine_lockup_2d;
+
+       struct psb_mmu_driver *mmu;
+       struct psb_mmu_pd *pf_pd;
+
+       uint8_t *sgx_reg;
+       uint8_t *vdc_reg;
+       uint8_t *msvdx_reg;
+
+       /*
+        * MSVDX
+        */
+       int msvdx_needs_reset;
+       int has_msvdx;
+       uint32_t gatt_free_offset;
+       atomic_t msvdx_mmu_invaldc;
+       int msvdx_power_saving;
+
+       /*
+        * Fencing / irq.
+        */
+
+       uint32_t sgx_irq_mask;
+       uint32_t sgx2_irq_mask;
+       uint32_t vdc_irq_mask;
+
+       spinlock_t irqmask_lock;
+       spinlock_t sequence_lock;
+       int fence0_irq_on;
+       int irq_enabled;
+       unsigned int irqen_count_2d;
+       wait_queue_head_t event_2d_queue;
+
+#ifdef FIX_TG_16
+       wait_queue_head_t queue_2d;
+       atomic_t lock_2d;
+       atomic_t ta_wait_2d;
+       atomic_t ta_wait_2d_irq;
+       atomic_t waiters_2d;
+#else
+       struct mutex mutex_2d;
+#endif
+       uint32_t msvdx_current_sequence;
+       uint32_t msvdx_last_sequence;
+#define MSVDX_MAX_IDELTIME HZ*30
+       uint32_t msvdx_finished_sequence;
+       uint32_t msvdx_start_idle;
+       unsigned long msvdx_idle_start_jiffies;
+
+       int fence2_irq_on;
+
+       /*
+        * MSVDX Rendec Memory
+        */
+       struct drm_buffer_object *ccb0;
+       uint32_t base_addr0;
+       struct drm_buffer_object *ccb1;
+       uint32_t base_addr1;
+
+       /*
+        * Memory managers
+        */
+
+       int have_vram;
+       int have_tt;
+       int have_mem_mmu;
+       int have_mem_aper;
+       int have_mem_kernel;
+       int have_mem_pds;
+       int have_mem_rastgeom;
+       struct mutex temp_mem;
+
+       /*
+        * Relocation buffer mapping.
+        */
+
+       spinlock_t reloc_lock;
+       unsigned int rel_mapped_pages;
+       wait_queue_head_t rel_mapped_queue;
+
+       /*
+        * SAREA
+        */
+       struct drm_psb_sarea *sarea_priv;
+
+       /*
+        * LVDS info
+        */
+       int backlight_duty_cycle;       /* restore backlight to this value */
+       bool panel_wants_dither;
+       struct drm_display_mode *panel_fixed_mode;
+
+       /*
+        * Register state
+        */
+       uint32_t saveDSPACNTR;
+       uint32_t saveDSPBCNTR;
+       uint32_t savePIPEACONF;
+       uint32_t savePIPEBCONF;
+       uint32_t savePIPEASRC;
+       uint32_t savePIPEBSRC;
+       uint32_t saveFPA0;
+       uint32_t saveFPA1;
+       uint32_t saveDPLL_A;
+       uint32_t saveDPLL_A_MD;
+       uint32_t saveHTOTAL_A;
+       uint32_t saveHBLANK_A;
+       uint32_t saveHSYNC_A;
+       uint32_t saveVTOTAL_A;
+       uint32_t saveVBLANK_A;
+       uint32_t saveVSYNC_A;
+       uint32_t saveDSPASTRIDE;
+       uint32_t saveDSPASIZE;
+       uint32_t saveDSPAPOS;
+       uint32_t saveDSPABASE;
+       uint32_t saveDSPASURF;
+       uint32_t saveFPB0;
+       uint32_t saveFPB1;
+       uint32_t saveDPLL_B;
+       uint32_t saveDPLL_B_MD;
+       uint32_t saveHTOTAL_B;
+       uint32_t saveHBLANK_B;
+       uint32_t saveHSYNC_B;
+       uint32_t saveVTOTAL_B;
+       uint32_t saveVBLANK_B;
+       uint32_t saveVSYNC_B;
+       uint32_t saveDSPBSTRIDE;
+       uint32_t saveDSPBSIZE;
+       uint32_t saveDSPBPOS;
+       uint32_t saveDSPBBASE;
+       uint32_t saveDSPBSURF;
+       uint32_t saveVCLK_DIVISOR_VGA0;
+       uint32_t saveVCLK_DIVISOR_VGA1;
+       uint32_t saveVCLK_POST_DIV;
+       uint32_t saveVGACNTRL;
+       uint32_t saveADPA;
+       uint32_t saveLVDS;
+       uint32_t saveDVOA;
+       uint32_t saveDVOB;
+       uint32_t saveDVOC;
+       uint32_t savePP_ON;
+       uint32_t savePP_OFF;
+       uint32_t savePP_CONTROL;
+       uint32_t savePP_CYCLE;
+       uint32_t savePFIT_CONTROL;
+       uint32_t savePaletteA[256];
+       uint32_t savePaletteB[256];
+       uint32_t saveBLC_PWM_CTL;
+       uint32_t saveCLOCKGATING;
+
+       /*
+        * USE code base register management.
+        */
+
+       struct drm_reg_manager use_manager;
+
+       /*
+        * Xhw
+        */
+
+       uint32_t *xhw;
+       struct drm_buffer_object *xhw_bo;
+       struct drm_bo_kmap_obj xhw_kmap;
+       struct list_head xhw_in;
+       spinlock_t xhw_lock;
+       atomic_t xhw_client;
+       struct drm_file *xhw_file;
+       wait_queue_head_t xhw_queue;
+       wait_queue_head_t xhw_caller_queue;
+       struct mutex xhw_mutex;
+       struct psb_xhw_buf *xhw_cur_buf;
+       int xhw_submit_ok;
+       int xhw_on;
+
+       /*
+        * Scheduling.
+        */
+
+       struct mutex reset_mutex;
+       struct mutex cmdbuf_mutex;
+       struct psb_scheduler scheduler;
+        struct psb_buflist_item *buffers;
+       uint32_t ta_mem_pages;
+       struct psb_ta_mem *ta_mem;
+       int force_ta_mem_load;
+
+       /*
+        * Watchdog
+        */
+
+       spinlock_t watchdog_lock;
+       struct timer_list watchdog_timer;
+       struct work_struct watchdog_wq;
+       struct work_struct msvdx_watchdog_wq;
+       int timer_available;
+
+       /*
+        * msvdx command queue
+        */
+       spinlock_t msvdx_lock;
+       struct mutex msvdx_mutex;
+       struct list_head msvdx_queue;
+       int msvdx_busy;
+
+       /*
+       * DVD detear performance evalue
+       */
+       struct timeval latest_vblank;   
+       wait_queue_head_t blit_2d_queue;
+       int blit_2d;
+};
+
+struct psb_mmu_driver;
+
+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+                                                 int trap_pagefaults,
+                                                 int invalid_type,
+                                                 atomic_t *msvdx_mmu_invaldc);
+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver);
+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
+                              uint32_t gtt_start, uint32_t gtt_pages);
+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+                                          int trap_pagefaults,
+                                          int invalid_type);
+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+                                       unsigned long address,
+                                       uint32_t num_pages);
+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+                                      uint32_t start_pfn,
+                                      unsigned long address,
+                                      uint32_t num_pages, int type);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+                                 unsigned long *pfn);
+
+/*
+ * Enable / disable MMU for different requestors.
+ */
+
+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
+                                    uint32_t mask);
+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
+                                     uint32_t mask);
+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+                               unsigned long address, uint32_t num_pages,
+                               uint32_t desired_tile_stride,
+                               uint32_t hw_tile_stride, int type);
+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+                                uint32_t num_pages,
+                                uint32_t desired_tile_stride,
+                                uint32_t hw_tile_stride);
+/*
+ * psb_sgx.c
+ */
+
+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
+                            uint32_t sequence);
+extern void psb_init_2d(struct drm_psb_private *dev_priv);
+extern int psb_idle_2d(struct drm_device *dev);
+extern int psb_idle_3d(struct drm_device *dev);
+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
+                                uint32_t src_offset,
+                                uint32_t dst_offset, uint32_t pages,
+                                int direction);
+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
+extern int psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
+                         unsigned int cmds);
+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
+                                 struct drm_buffer_object *cmd_buffer,
+                                 unsigned long cmd_offset,
+                                 unsigned long cmd_size, int engine,
+                                 uint32_t * copy_buffer);
+extern void psb_fence_or_sync(struct drm_file *priv,
+                             int engine,
+                             struct drm_psb_cmdbuf_arg *arg,
+                             struct drm_fence_arg *fence_arg,
+                             struct drm_fence_object **fence_p);
+extern void psb_init_disallowed(void);
+
+/*
+ * psb_irq.c
+ */
+
+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern void psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+
+/*
+ * psb_fence.c
+ */
+
+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
+                                          uint32_t class);
+extern int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
+                                  uint32_t flags, uint32_t * sequence,
+                                  uint32_t * native_type);
+extern void psb_fence_error(struct drm_device *dev,
+                           uint32_t class,
+                           uint32_t sequence, uint32_t type, int error);
+
+/*MSVDX stuff*/
+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
+extern int psb_hw_info_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv);
+
+/*
+ * psb_buffer.c
+ */
+extern struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev);
+extern int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
+                          uint32_t * type);
+extern uint32_t psb_evict_mask(struct drm_buffer_object *bo);
+extern int psb_invalidate_caches(struct drm_device *dev, uint64_t flags);
+extern int psb_init_mem_type(struct drm_device *dev, uint32_t type,
+                            struct drm_mem_type_manager *man);
+extern int psb_move(struct drm_buffer_object *bo,
+                   int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
+extern int psb_tbe_size(struct drm_device *dev, unsigned long num_pages);
+
+/*
+ * psb_gtt.c
+ */
+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
+                               unsigned offset_pages, unsigned num_pages,
+                               unsigned desired_tile_stride,
+                               unsigned hw_tile_stride, int type);
+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
+                               unsigned num_pages,
+                               unsigned desired_tile_stride,
+                               unsigned hw_tile_stride);
+
+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
+
+/*
+ * psb_fb.c
+ */
+extern int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
+extern int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file_priv);
+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+extern void psbfb_suspend(struct drm_device *dev);
+extern void psbfb_resume(struct drm_device *dev);
+
+/*
+ * psb_reset.c
+ */
+
+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+
+/*
+ * psb_regman.c
+ */
+
+extern void psb_takedown_use_base(struct drm_psb_private *dev_priv);
+extern int psb_grab_use_base(struct drm_psb_private *dev_priv,
+                            unsigned long dev_virtual,
+                            unsigned long size,
+                            unsigned int data_master,
+                            uint32_t fence_class,
+                            uint32_t fence_type,
+                            int no_wait,
+                            int ignore_signals,
+                            int *r_reg, uint32_t * r_offset);
+extern int psb_init_use_base(struct drm_psb_private *dev_priv,
+                            unsigned int reg_start, unsigned int reg_num);
+
+/*
+ * psb_xhw.c
+ */
+
+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+extern int psb_xhw_init(struct drm_device *dev);
+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
+                                 struct drm_file *file_priv, int closing);
+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
+                                  struct psb_xhw_buf *buf,
+                                  uint32_t fire_flags,
+                                  uint32_t hw_context,
+                                  uint32_t * cookie,
+                                  uint32_t * oom_cmds,
+                                  uint32_t num_oom_cmds,
+                                  uint32_t offset,
+                                  uint32_t engine, uint32_t flags);
+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
+                              struct psb_xhw_buf *buf, uint32_t fire_flags);
+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
+                             struct psb_xhw_buf *buf,
+                             uint32_t w,
+                             uint32_t h,
+                             uint32_t * hw_cookie,
+                             uint32_t * bo_size,
+                             uint32_t * clear_p_start,
+                             uint32_t * clear_num_pages);
+
+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
+                            struct psb_xhw_buf *buf);
+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
+                               struct psb_xhw_buf *buf, uint32_t * value);
+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
+                              struct psb_xhw_buf *buf,
+                              uint32_t pages,
+                              uint32_t * hw_cookie, uint32_t * size);
+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
+                         struct psb_xhw_buf *buf, uint32_t * cookie);
+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
+                                struct psb_xhw_buf *buf,
+                                uint32_t * cookie,
+                                uint32_t * bca,
+                                uint32_t * rca, uint32_t * flags);
+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
+                          struct psb_xhw_buf *buf);
+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
+                         struct psb_xhw_buf *buf);
+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
+                              struct psb_xhw_buf *buf, uint32_t * cookie);
+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
+                              struct psb_xhw_buf *buf,
+                              uint32_t flags,
+                              uint32_t param_offset,
+                              uint32_t pt_offset, uint32_t * hw_cookie);
+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
+                             struct psb_xhw_buf *buf);
+
+extern int psb_xhw_hotplug(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf);
+/*
+ * psb_schedule.c: HW bug fixing.
+ */
+
+#ifdef FIX_TG_16
+
+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
+
+#else
+
+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
+
+#endif
+
+/*
+ * Utilities
+ */
+
+#define PSB_ALIGN_TO(_val, _align) \
+  (((_val) + ((_align) - 1)) & ~((_align) - 1))
+#define PSB_WVDC32(_val, _offs) \
+  iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs) \
+  ioread32(dev_priv->vdc_reg + (_offs))
+#define PSB_WSGX32(_val, _offs) \
+  iowrite32(_val, dev_priv->sgx_reg + (_offs))
+#define PSB_RSGX32(_offs) \
+  ioread32(dev_priv->sgx_reg + (_offs))
+#define PSB_WMSVDX32(_val, _offs) \
+  iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs) \
+  ioread32(dev_priv->msvdx_reg + (_offs))
+
+#define PSB_ALPL(_val, _base)                  \
+  (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
+#define PSB_ALPLM(_val, _base)                 \
+  ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
+
+#define PSB_D_RENDER  (1 << 16)
+
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT    (1 << 1)
+#define PSB_D_IRQ     (1 << 2)
+#define PSB_D_FW      (1 << 3)
+#define PSB_D_PERF    (1 << 4)
+#define PSB_D_TMP    (1 << 5)
+#define PSB_D_RELOC   (1 << 6)
+
+extern int drm_psb_debug;
+extern int drm_psb_no_fb;
+extern int drm_psb_disable_vsync;
+extern int drm_psb_detear;
+
+#define PSB_DEBUG_FW(_fmt, _arg...) \
+       PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
+       PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
+#define PSB_DEBUG_INIT(_fmt, _arg...) \
+       PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
+       PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
+       PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
+#define PSB_DEBUG_PERF(_fmt, _arg...) \
+       PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
+#define PSB_DEBUG_TMP(_fmt, _arg...) \
+       PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
+#define PSB_DEBUG_RELOC(_fmt, _arg...) \
+       PSB_DEBUG(PSB_D_RELOC, _fmt, ##_arg)
+
+#if DRM_DEBUG_CODE
+#define PSB_DEBUG(_flag, _fmt, _arg...)                                        \
+       do {                                                            \
+         if (unlikely((_flag) & drm_psb_debug))                        \
+                       printk(KERN_DEBUG                               \
+                              "[psb:0x%02x:%s] " _fmt , _flag, \
+                              __FUNCTION__ , ##_arg);                  \
+       } while (0)
+#else
+#define PSB_DEBUG(_fmt, _arg...)     do { } while (0)
+#endif
+
+#endif
diff --git a/psb-kernel-source-4.41.1/psb_fb.c b/psb-kernel-source-4.41.1/psb_fb.c
new file mode 100644 (file)
index 0000000..ad853e0
--- /dev/null
@@ -0,0 +1,1357 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "psb_drv.h"
+#include "drm_compat.h"
+
+#define SII_1392_WA
+#ifdef SII_1392_WA
+extern int SII_1392;
+#endif
+
+struct psbfb_vm_info {
+       struct drm_buffer_object *bo;
+       struct address_space *f_mapping;
+       struct mutex vm_mutex;
+       atomic_t refcount;
+};
+
+struct psbfb_par {
+       struct drm_device *dev;
+       struct drm_crtc *crtc;
+       struct drm_output *output;
+       struct psbfb_vm_info *vi;
+       int dpms_state;
+};
+
+static void psbfb_vm_info_deref(struct psbfb_vm_info **vi)
+{
+       struct psbfb_vm_info *tmp = *vi;
+       *vi = NULL;
+       if (atomic_dec_and_test(&tmp->refcount)) {
+               drm_bo_usage_deref_unlocked(&tmp->bo);
+               drm_free(tmp, sizeof(*tmp), DRM_MEM_MAPS);
+       }
+}
+
+static struct psbfb_vm_info *psbfb_vm_info_ref(struct psbfb_vm_info *vi)
+{
+       atomic_inc(&vi->refcount);
+       return vi;
+}
+
+static struct psbfb_vm_info *psbfb_vm_info_create(void)
+{
+       struct psbfb_vm_info *vi;
+
+       vi = drm_calloc(1, sizeof(*vi), DRM_MEM_MAPS);
+       if (!vi)
+               return NULL;
+
+       mutex_init(&vi->vm_mutex);
+       atomic_set(&vi->refcount, 1);
+       return vi;
+}
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+                          unsigned blue, unsigned transp, struct fb_info *info)
+{
+       struct psbfb_par *par = info->par;
+       struct drm_crtc *crtc = par->crtc;
+       uint32_t v;
+
+       if (!crtc->fb)
+               return -ENOMEM;
+
+       if (regno > 15)
+               return 1;
+
+       if (crtc->funcs->gamma_set)
+               crtc->funcs->gamma_set(crtc, red, green, blue, regno);
+
+       red = CMAP_TOHW(red, info->var.red.length);
+       blue = CMAP_TOHW(blue, info->var.blue.length);
+       green = CMAP_TOHW(green, info->var.green.length);
+       transp = CMAP_TOHW(transp, info->var.transp.length);
+
+       v = (red << info->var.red.offset) |
+           (green << info->var.green.offset) |
+           (blue << info->var.blue.offset) |
+           (transp << info->var.transp.offset);
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 16:
+               ((uint32_t *) info->pseudo_palette)[regno] = v;
+               break;
+       case 24:
+       case 32:
+               ((uint32_t *) info->pseudo_palette)[regno] = v;
+               break;
+       }
+
+       return 0;
+}
+
+static int psbfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct psbfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_display_mode *drm_mode;
+       struct drm_output *output;
+       int depth;
+       int pitch;
+       int bpp = var->bits_per_pixel;
+
+       if (!fb)
+               return -ENOMEM;
+
+       if (!var->pixclock)
+               return -EINVAL;
+
+       /* don't support virtuals for now */
+       if (var->xres_virtual > var->xres)
+               return -EINVAL;
+
+       if (var->yres_virtual > var->yres)
+               return -EINVAL;
+
+       switch (bpp) {
+       case 8:
+               depth = 8;
+               break;
+       case 16:
+               depth = (var->green.length == 6) ? 16 : 15;
+               break;
+       case 24:                /* assume this is 32bpp / depth 24 */
+               bpp = 32;
+               /* fallthrough */
+       case 32:
+               depth = (var->transp.length > 0) ? 32 : 24;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
+
+       /* Check that we can resize */
+       if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
+#if 1
+               /* Need to resize the fb object.
+                * But the generic fbdev code doesn't really understand
+                * that we can do this. So disable for now.
+                */
+               DRM_INFO("Can't support requested size, too big!\n");
+               return -EINVAL;
+#else
+               int ret;
+               struct drm_buffer_object *fbo = NULL;
+               struct drm_bo_kmap_obj tmp_kmap;
+
+               /* a temporary BO to check if we could resize in setpar.
+                * Therefore no need to set NO_EVICT.
+                */
+               ret = drm_buffer_object_create(dev,
+                                              pitch * var->yres,
+                                              drm_bo_type_kernel,
+                                              DRM_BO_FLAG_READ |
+                                              DRM_BO_FLAG_WRITE |
+                                              DRM_BO_FLAG_MEM_TT |
+                                              DRM_BO_FLAG_MEM_VRAM,
+                                              DRM_BO_HINT_DONT_FENCE,
+                                              0, 0, &fbo);
+               if (ret || !fbo)
+                       return -ENOMEM;
+
+               ret = drm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
+               if (ret) {
+                       drm_bo_usage_deref_unlocked(&fbo);
+                       return -EINVAL;
+               }
+
+               drm_bo_kunmap(&tmp_kmap);
+               /* destroy our current fbo! */
+               drm_bo_usage_deref_unlocked(&fbo);
+#endif
+       }
+
+       switch (depth) {
+       case 8:
+               var->red.offset = 0;
+               var->green.offset = 0;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 15:
+               var->red.offset = 10;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 5;
+               var->blue.length = 5;
+               var->transp.length = 1;
+               var->transp.offset = 15;
+               break;
+       case 16:
+               var->red.offset = 11;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 6;
+               var->blue.length = 5;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 24:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 32:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 8;
+               var->transp.offset = 24;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+#if 0
+       /* Here we walk the output mode list and look for modes. If we haven't
+        * got it, then bail. Not very nice, so this is disabled.
+        * In the set_par code, we create our mode based on the incoming
+        * parameters. Nicer, but may not be desired by some.
+        */
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               if (output->crtc == par->crtc)
+                       break;
+       }
+
+       list_for_each_entry(drm_mode, &output->modes, head) {
+               if (drm_mode->hdisplay == var->xres &&
+                   drm_mode->vdisplay == var->yres && drm_mode->clock != 0)
+                       break;
+       }
+
+       if (!drm_mode)
+               return -EINVAL;
+#else
+       (void)dev;              /* silence warnings */
+       (void)output;
+       (void)drm_mode;
+#endif
+
+       return 0;
+}
+
+static int psbfb_move_fb_bo(struct fb_info *info, struct drm_buffer_object *bo,
+                           uint64_t mem_type_flags)
+{
+       struct psbfb_par *par;
+       loff_t holelen;
+       int ret;
+
+       /*
+        * Kill all user-space mappings of this device. They will be
+        * faulted back using nopfn when accessed.
+        */
+
+       par = info->par;
+       holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+       mutex_lock(&par->vi->vm_mutex);
+       if (par->vi->f_mapping) {
+               unmap_mapping_range(par->vi->f_mapping, 0, holelen, 1);
+       }
+
+       ret = drm_bo_do_validate(bo,
+                                mem_type_flags,
+                                DRM_BO_MASK_MEM |
+                                DRM_BO_FLAG_NO_EVICT,
+                                DRM_BO_HINT_DONT_FENCE, 0, 1, NULL);
+
+       mutex_unlock(&par->vi->vm_mutex);
+       return ret;
+}
+
+/* this will let fbcon do the mode init */
+static int psbfb_set_par(struct fb_info *info)
+{
+       struct psbfb_par *par = info->par;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_device *dev = par->dev;
+       struct drm_display_mode *drm_mode;
+       struct fb_var_screeninfo *var = &info->var;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_output *output;
+       int pitch;
+       int depth;
+       int bpp = var->bits_per_pixel;
+
+       if (!fb)
+               return -ENOMEM;
+
+       switch (bpp) {
+       case 8:
+               depth = 8;
+               break;
+       case 16:
+               depth = (var->green.length == 6) ? 16 : 15;
+               break;
+       case 24:                /* assume this is 32bpp / depth 24 */
+               bpp = 32;
+               /* fallthrough */
+       case 32:
+               depth = (var->transp.length > 0) ? 32 : 24;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
+
+       if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
+#if 1
+               /* Need to resize the fb object.
+                * But the generic fbdev code doesn't really understand
+                * that we can do this. So disable for now.
+                */
+               DRM_INFO("Can't support requested size, too big!\n");
+               return -EINVAL;
+#else
+               int ret;
+               struct drm_buffer_object *fbo = NULL, *tfbo;
+               struct drm_bo_kmap_obj tmp_kmap, tkmap;
+
+               ret = drm_buffer_object_create(dev,
+                                              pitch * var->yres,
+                                              drm_bo_type_kernel,
+                                              DRM_BO_FLAG_READ |
+                                              DRM_BO_FLAG_WRITE |
+                                              DRM_BO_FLAG_MEM_TT |
+                                              DRM_BO_FLAG_MEM_VRAM |
+                                              DRM_BO_FLAG_NO_EVICT,
+                                              DRM_BO_HINT_DONT_FENCE,
+                                              0, 0, &fbo);
+               if (ret || !fbo) {
+                       DRM_ERROR
+                           ("failed to allocate new resized framebuffer\n");
+                       return -ENOMEM;
+               }
+
+               ret = drm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
+               if (ret) {
+                       DRM_ERROR("failed to kmap framebuffer.\n");
+                       drm_bo_usage_deref_unlocked(&fbo);
+                       return -EINVAL;
+               }
+
+               DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
+                         fb->height, fb->offset, fbo);
+
+               /* set new screen base */
+               info->screen_base = tmp_kmap.virtual;
+
+               tkmap = fb->kmap;
+               fb->kmap = tmp_kmap;
+               drm_bo_kunmap(&tkmap);
+
+               tfbo = fb->bo;
+               fb->bo = fbo;
+               drm_bo_usage_deref_unlocked(&tfbo);
+#endif
+       }
+
+       fb->offset = fb->bo->offset - dev_priv->pg->gatt_start;
+       fb->width = var->xres;
+       fb->height = var->yres;
+       fb->bits_per_pixel = bpp;
+       fb->pitch = pitch;
+       fb->depth = depth;
+
+       info->fix.line_length = fb->pitch;
+       info->fix.visual =
+           (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
+
+       /* some fbdev's apps don't want these to change */
+       info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
+
+       /* we have to align the output base address because the fb->bo
+          may be moved in the previous drm_bo_do_validate().
+          Otherwise the output screens may go black when exit the X
+          window and re-enter the console */
+       info->screen_base = fb->kmap.virtual;
+
+#if 0
+       /* relates to resize - disable */
+       info->fix.smem_len = info->fix.line_length * var->yres;
+       info->screen_size = info->fix.smem_len; /* ??? */
+#endif
+
+       /* Should we walk the output's modelist or just create our own ???
+        * For now, we create and destroy a mode based on the incoming
+        * parameters. But there's commented out code below which scans
+        * the output list too.
+        */
+#if 0
+       list_for_each_entry(output, &dev->mode_config.output_list, head) {
+               if (output->crtc == par->crtc)
+                       break;
+       }
+
+       list_for_each_entry(drm_mode, &output->modes, head) {
+               if (drm_mode->hdisplay == var->xres &&
+                   drm_mode->vdisplay == var->yres && drm_mode->clock != 0)
+                       break;
+       }
+#else
+       (void)output;           /* silence warning */
+
+       drm_mode = drm_mode_create(dev);
+       drm_mode->hdisplay = var->xres;
+       drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
+       drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
+       drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
+       drm_mode->vdisplay = var->yres;
+       drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
+       drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
+       drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
+       drm_mode->clock = PICOS2KHZ(var->pixclock);
+       drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
+       drm_mode_set_name(drm_mode);
+       drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
+#endif
+
+       if (!drm_crtc_set_mode(par->crtc, drm_mode, 0, 0))
+               return -EINVAL;
+
+       /* Have to destroy our created mode if we're not searching the mode
+        * list for it.
+        */
+#if 1
+       drm_mode_destroy(dev, drm_mode);
+#endif
+
+       return 0;
+}
+
+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);;
+
+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
+                                uint32_t dst_offset, uint32_t dst_stride,
+                                uint32_t dst_format, uint16_t dst_x,
+                                uint16_t dst_y, uint16_t size_x,
+                                uint16_t size_y, uint32_t fill)
+{
+       uint32_t buffer[10];
+       uint32_t *buf;
+       int ret;
+
+       buf = buffer;
+
+       *buf++ = PSB_2D_FENCE_BH;
+
+       *buf++ =
+           PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+                                              PSB_2D_DST_STRIDE_SHIFT);
+       *buf++ = dst_offset;
+
+       *buf++ =
+           PSB_2D_BLIT_BH |
+           PSB_2D_ROT_NONE |
+           PSB_2D_COPYORDER_TL2BR |
+           PSB_2D_DSTCK_DISABLE |
+           PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
+
+       *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
+       *buf++ =
+           (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+                                                 PSB_2D_DST_YSTART_SHIFT);
+       *buf++ =
+           (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+                                                 PSB_2D_DST_YSIZE_SHIFT);
+       *buf++ = PSB_2D_FLUSH_BH;
+
+       psb_2d_lock(dev_priv);
+       ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
+       psb_2d_unlock(dev_priv);
+
+       return ret;
+}
+
+static void psbfb_fillrect_accel(struct fb_info *info,
+                                const struct fb_fillrect *r)
+{
+       struct psbfb_par *par = info->par;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_psb_private *dev_priv = par->dev->dev_private;
+       uint32_t offset;
+       uint32_t stride;
+       uint32_t format;
+
+       if (!fb)
+               return;
+
+       offset = fb->offset;
+       stride = fb->pitch;
+
+       switch (fb->depth) {
+       case 8:
+               format = PSB_2D_DST_332RGB;
+               break;
+       case 15:
+               format = PSB_2D_DST_555RGB;
+               break;
+       case 16:
+               format = PSB_2D_DST_565RGB;
+               break;
+       case 24:
+       case 32:
+               /* this is wrong but since we don't do blending its okay */
+               format = PSB_2D_DST_8888ARGB;
+               break;
+       default:
+               /* software fallback */
+               cfb_fillrect(info, r);
+               return;
+       }
+
+       psb_accel_2d_fillrect(dev_priv,
+                             offset, stride, format,
+                             r->dx, r->dy, r->width, r->height, r->color);
+}
+
+static void psbfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+       if (info->state != FBINFO_STATE_RUNNING)
+               return;
+       if (info->flags & FBINFO_HWACCEL_DISABLED) {
+               cfb_fillrect(info, rect);
+               return;
+       }
+       if (in_interrupt() || in_atomic()) {
+               /*
+                * Catch case when we're shutting down.
+                */
+               cfb_fillrect(info, rect);
+               return;
+       }
+       psbfb_fillrect_accel(info, rect);
+}
+
+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
+{
+       if (xdir < 0)
+               return ((ydir <
+                        0) ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TR2BL);
+       else
+               return ((ydir <
+                        0) ? PSB_2D_COPYORDER_BL2TR : PSB_2D_COPYORDER_TL2BR);
+}
+
+/*
+ * @srcOffset in bytes
+ * @srcStride in bytes
+ * @srcFormat psb 2D format defines
+ * @dstOffset in bytes
+ * @dstStride in bytes
+ * @dstFormat psb 2D format defines
+ * @srcX offset in pixels
+ * @srcY offset in pixels
+ * @dstX offset in pixels
+ * @dstY offset in pixels
+ * @sizeX of the copied area
+ * @sizeY of the copied area
+ */
+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
+                            uint32_t src_offset, uint32_t src_stride,
+                            uint32_t src_format, uint32_t dst_offset,
+                            uint32_t dst_stride, uint32_t dst_format,
+                            uint16_t src_x, uint16_t src_y, uint16_t dst_x,
+                            uint16_t dst_y, uint16_t size_x, uint16_t size_y)
+{
+       uint32_t blit_cmd;
+       uint32_t buffer[10];
+       uint32_t *buf;
+       uint32_t direction;
+       int ret;
+
+       buf = buffer;
+
+       direction = psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
+
+       if (direction == PSB_2D_COPYORDER_BR2TL ||
+           direction == PSB_2D_COPYORDER_TR2BL) {
+               src_x += size_x - 1;
+               dst_x += size_x - 1;
+       }
+       if (direction == PSB_2D_COPYORDER_BR2TL ||
+           direction == PSB_2D_COPYORDER_BL2TR) {
+               src_y += size_y - 1;
+               dst_y += size_y - 1;
+       }
+
+       blit_cmd =
+           PSB_2D_BLIT_BH |
+           PSB_2D_ROT_NONE |
+           PSB_2D_DSTCK_DISABLE |
+           PSB_2D_SRCCK_DISABLE |
+           PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
+
+       *buf++ = PSB_2D_FENCE_BH;
+       *buf++ =
+           PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+                                              PSB_2D_DST_STRIDE_SHIFT);
+       *buf++ = dst_offset;
+       *buf++ =
+           PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
+                                              PSB_2D_SRC_STRIDE_SHIFT);
+       *buf++ = src_offset;
+       *buf++ =
+           PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | (src_y
+                                                                        <<
+                                                                        PSB_2D_SRCOFF_YSTART_SHIFT);
+       *buf++ = blit_cmd;
+       *buf++ =
+           (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+                                                 PSB_2D_DST_YSTART_SHIFT);
+       *buf++ =
+           (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+                                                 PSB_2D_DST_YSIZE_SHIFT);
+       *buf++ = PSB_2D_FLUSH_BH;
+
+       psb_2d_lock(dev_priv);
+       ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
+       psb_2d_unlock(dev_priv);
+       return ret;
+}
+
+static void psbfb_copyarea_accel(struct fb_info *info,
+                                const struct fb_copyarea *a)
+{
+       struct psbfb_par *par = info->par;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_psb_private *dev_priv = par->dev->dev_private;
+       uint32_t offset;
+       uint32_t stride;
+       uint32_t src_format;
+       uint32_t dst_format;
+
+       if (!fb)
+               return;
+
+       offset = fb->offset;
+       stride = fb->pitch;
+
+       if (a->width == 8 || a->height == 8) {
+               psb_2d_lock(dev_priv);
+               psb_idle_2d(par->dev);
+               psb_2d_unlock(dev_priv);
+               cfb_copyarea(info, a);
+               return;
+       }
+
+       switch (fb->depth) {
+       case 8:
+               src_format = PSB_2D_SRC_332RGB;
+               dst_format = PSB_2D_DST_332RGB;
+               break;
+       case 15:
+               src_format = PSB_2D_SRC_555RGB;
+               dst_format = PSB_2D_DST_555RGB;
+               break;
+       case 16:
+               src_format = PSB_2D_SRC_565RGB;
+               dst_format = PSB_2D_DST_565RGB;
+               break;
+       case 24:
+       case 32:
+               /* this is wrong but since we don't do blending its okay */
+               src_format = PSB_2D_SRC_8888ARGB;
+               dst_format = PSB_2D_DST_8888ARGB;
+               break;
+       default:
+               /* software fallback */
+               cfb_copyarea(info, a);
+               return;
+       }
+
+       psb_accel_2d_copy(dev_priv,
+                         offset, stride, src_format,
+                         offset, stride, dst_format,
+                         a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+}
+
+static void psbfb_copyarea(struct fb_info *info,
+                          const struct fb_copyarea *region)
+{
+       if (info->state != FBINFO_STATE_RUNNING)
+               return;
+       if (info->flags & FBINFO_HWACCEL_DISABLED) {
+               cfb_copyarea(info, region);
+               return;
+       }
+       if (in_interrupt() || in_atomic()) {
+               /*
+                * Catch case when we're shutting down.
+                */
+               cfb_copyarea(info, region);
+               return;
+       }
+
+       psbfb_copyarea_accel(info, region);
+}
+
+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+       if (info->state != FBINFO_STATE_RUNNING)
+               return;
+       if (info->flags & FBINFO_HWACCEL_DISABLED) {
+               cfb_imageblit(info, image);
+               return;
+       }
+       if (in_interrupt() || in_atomic()) {
+               cfb_imageblit(info, image);
+               return;
+       }
+
+       cfb_imageblit(info, image);
+}
+
+static int psbfb_blank(int blank_mode, struct fb_info *info)
+{
+       int dpms_mode;
+       struct psbfb_par *par = info->par;
+       struct drm_output *output;
+
+       par->dpms_state = blank_mode;
+
+       switch(blank_mode) {
+       case FB_BLANK_UNBLANK:
+               dpms_mode = DPMSModeOn;
+               break;
+       case FB_BLANK_NORMAL:
+               if (!par->crtc)
+                       return 0;
+               (*par->crtc->funcs->dpms)(par->crtc, DPMSModeStandby);
+               return 0;
+       case FB_BLANK_HSYNC_SUSPEND:
+       default:
+               dpms_mode = DPMSModeStandby;
+               break;
+       case FB_BLANK_VSYNC_SUSPEND:
+               dpms_mode = DPMSModeSuspend;
+               break;
+       case FB_BLANK_POWERDOWN:
+               dpms_mode = DPMSModeOff;
+               break;
+       }
+
+       if (!par->crtc)
+               return 0;
+
+       list_for_each_entry(output, &par->dev->mode_config.output_list, head) {
+               if (output->crtc == par->crtc)
+                       (*output->funcs->dpms)(output, dpms_mode);
+       }
+
+       (*par->crtc->funcs->dpms)(par->crtc, dpms_mode);
+       return 0;
+}
+
+
+static int psbfb_kms_off(struct drm_device *dev, int suspend)
+{
+       struct drm_framebuffer *fb = 0;
+       struct drm_buffer_object *bo = 0;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret = 0;
+
+       DRM_DEBUG("psbfb_kms_off_ioctl\n");
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+               struct fb_info *info = fb->fbdev;
+               struct psbfb_par *par = info->par;
+               int save_dpms_state;
+
+               if (suspend)
+                       fb_set_suspend(info, 1);
+               else
+                       info->state &= ~FBINFO_STATE_RUNNING;
+
+               info->screen_base = NULL;
+
+               bo = fb->bo;
+
+               if (!bo)
+                       continue;
+
+               drm_bo_kunmap(&fb->kmap);
+
+               /*
+                * We don't take the 2D lock here as we assume that the
+                * 2D engine will eventually idle anyway.
+                */
+
+               if (!suspend) {
+                       uint32_t dummy2 = 0;
+                       (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
+                                                       &dummy2, &dummy2);
+                       psb_2d_lock(dev_priv);
+                       (void)psb_idle_2d(dev);
+                       psb_2d_unlock(dev_priv);
+               } else
+                       psb_idle_2d(dev);
+
+               save_dpms_state = par->dpms_state;
+               psbfb_blank(FB_BLANK_NORMAL, info);
+               par->dpms_state = save_dpms_state;
+
+               ret = psbfb_move_fb_bo(info, bo, DRM_BO_FLAG_MEM_LOCAL);
+
+               if (ret)
+                       goto out_err;
+       }
+      out_err:
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return ret;
+}
+
+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       int ret;
+
+       acquire_console_sem();
+       ret = psbfb_kms_off(dev, 0);
+       release_console_sem();
+
+       return ret;
+}
+
+static int psbfb_kms_on(struct drm_device *dev, int resume)
+{
+       struct drm_framebuffer *fb = 0;
+       struct drm_buffer_object *bo = 0;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret = 0;
+       int dummy;
+
+       DRM_DEBUG("psbfb_kms_on_ioctl\n");
+
+       if (!resume) {
+               uint32_t dummy2 = 0;
+               (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
+                                                      &dummy2, &dummy2);
+               psb_2d_lock(dev_priv);
+               (void)psb_idle_2d(dev);
+               psb_2d_unlock(dev_priv);
+       } else
+               psb_idle_2d(dev);
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+               struct fb_info *info = fb->fbdev;
+               struct psbfb_par *par = info->par;
+
+               bo = fb->bo;
+               if (!bo)
+                       continue;
+
+               ret = psbfb_move_fb_bo(info, bo,
+                                      DRM_BO_FLAG_MEM_TT |
+                                      DRM_BO_FLAG_MEM_VRAM |
+                                      DRM_BO_FLAG_NO_EVICT);
+               if (ret)
+                       goto out_err;
+
+               ret = drm_bo_kmap(bo, 0, bo->num_pages, &fb->kmap);
+               if (ret)
+                       goto out_err;
+
+               info->screen_base = drm_bmo_virtual(&fb->kmap, &dummy);
+               fb->offset = bo->offset - dev_priv->pg->gatt_start;
+
+               if (ret)
+                       goto out_err;
+
+               if (resume)
+                       fb_set_suspend(info, 0);
+               else
+                       info->state |= FBINFO_STATE_RUNNING;
+
+               /*
+                * Re-run modesetting here, since the VDS scanout offset may
+                * have changed.
+                */
+
+               if (par->crtc->enabled) {
+                       psbfb_set_par(info);
+                       psbfb_blank(par->dpms_state, info);
+               }
+       }
+      out_err:
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return ret;
+}
+
+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       int ret;
+
+       acquire_console_sem();
+       ret = psbfb_kms_on(dev, 0);
+       release_console_sem();
+#ifdef SII_1392_WA
+       if((SII_1392 != 1) || (drm_psb_no_fb==0))
+               drm_disable_unused_functions(dev);
+#else
+       drm_disable_unused_functions(dev);
+#endif
+       return ret;
+}
+
+void psbfb_suspend(struct drm_device *dev)
+{
+       acquire_console_sem();
+       psbfb_kms_off(dev, 1);
+       release_console_sem();
+}
+
+void psbfb_resume(struct drm_device *dev)
+{
+       acquire_console_sem();
+       psbfb_kms_on(dev, 1);
+       release_console_sem();
+#ifdef SII_1392_WA
+       if((SII_1392 != 1) || (drm_psb_no_fb==0))
+               drm_disable_unused_functions(dev);
+#else
+       drm_disable_unused_functions(dev);
+#endif
+}
+
+/*
+ * FIXME: Before kernel inclusion, migrate nopfn to fault.
+ * Also, these should be the default vm ops for buffer object type fbs.
+ */
+
+extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
+                                    unsigned long address);
+
+/*
+ * This wrapper is a bit ugly and is here because we need access to a mutex
+ * that we can lock both around nopfn and around unmap_mapping_range + move.
+ * Normally, this would've been done using the bo mutex, but unfortunately
+ * we cannot lock it around drm_bo_do_validate(), since that would imply
+ * recursive locking.
+ */
+
+static unsigned long psbfb_nopfn(struct vm_area_struct *vma,
+                                unsigned long address)
+{
+       struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
+       struct vm_area_struct tmp_vma;
+       unsigned long ret;
+
+       mutex_lock(&vi->vm_mutex);
+       tmp_vma = *vma;
+       tmp_vma.vm_private_data = vi->bo;
+       ret = drm_bo_vm_nopfn(&tmp_vma, address);
+       mutex_unlock(&vi->vm_mutex);
+       return ret;
+}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+static int psbfb_fault(struct vm_area_struct *vma,
+                                struct vm_fault *vmf)
+{
+       struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
+       struct vm_area_struct tmp_vma;
+       unsigned long ret;
+
+        unsigned long address = (unsigned long)vmf->virtual_address;
+
+       mutex_lock(&vi->vm_mutex);
+       tmp_vma = *vma;
+       tmp_vma.vm_private_data = vi->bo;
+       ret = drm_bo_vm_nopfn(&tmp_vma, address);
+       mutex_unlock(&vi->vm_mutex);
+       return ret;
+}
+#endif
+static void psbfb_vm_open(struct vm_area_struct *vma)
+{
+       struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
+
+       atomic_inc(&vi->refcount);
+}
+
+static void psbfb_vm_close(struct vm_area_struct *vma)
+{
+       psbfb_vm_info_deref((struct psbfb_vm_info **)&vma->vm_private_data);
+}
+
+static struct vm_operations_struct psbfb_vm_ops = {
+  #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+        .fault = psbfb_fault,
+  #else
+        .nopfn = psbfb_nopfn,
+  #endif
+       .open = psbfb_vm_open,
+       .close = psbfb_vm_close,
+};
+
+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       struct psbfb_par *par = info->par;
+       struct drm_framebuffer *fb = par->crtc->fb;
+       struct drm_buffer_object *bo = fb->bo;
+       unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       unsigned long offset = vma->vm_pgoff;
+
+       if (vma->vm_pgoff != 0)
+               return -EINVAL;
+       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+               return -EINVAL;
+       if (offset + size > bo->num_pages)
+               return -EINVAL;
+
+       mutex_lock(&par->vi->vm_mutex);
+       if (!par->vi->f_mapping)
+               par->vi->f_mapping = vma->vm_file->f_mapping;
+       mutex_unlock(&par->vi->vm_mutex);
+
+       vma->vm_private_data = psbfb_vm_info_ref(par->vi);
+
+       vma->vm_ops = &psbfb_vm_ops;
+       vma->vm_flags |= VM_PFNMAP;
+
+       return 0;
+}
+
+int psbfb_sync(struct fb_info *info)
+{
+       struct psbfb_par *par = info->par;
+       struct drm_psb_private *dev_priv = par->dev->dev_private;
+
+       psb_2d_lock(dev_priv);
+       psb_idle_2d(par->dev);
+       psb_2d_unlock(dev_priv);
+
+       return 0;
+}
+
+int psbfb_pan_display (struct fb_info *info)
+{
+       return 0;
+}
+
+static struct fb_ops psbfb_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = psbfb_check_var,
+       .fb_set_par = psbfb_set_par,
+       .fb_setcolreg = psbfb_setcolreg,
+       .fb_fillrect = psbfb_fillrect,
+       .fb_copyarea = psbfb_copyarea,
+       .fb_imageblit = psbfb_imageblit,
+//     .fb_mmap = psbfb_mmap,
+       .fb_sync = psbfb_sync,
+       .fb_blank = psbfb_blank,
+       .fb_pan_display = psbfb_pan_display,
+};
+
+int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct fb_info *info;
+       struct psbfb_par *par;
+       struct device *device = &dev->pdev->dev;
+       struct drm_framebuffer *fb;
+       struct drm_display_mode *mode = crtc->desired_mode;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct drm_buffer_object *fbo = NULL;
+       int ret;
+       int is_iomem;
+
+       if (drm_psb_no_fb) {
+               /* need to do this as the DRM will disable the output */
+               crtc->enabled = 1;
+               return 0;
+       }
+
+       info = framebuffer_alloc(sizeof(struct psbfb_par), device);
+       if (!info) {
+               return -ENOMEM;
+       }
+
+       fb = drm_framebuffer_create(dev);
+       if (!fb) {
+               framebuffer_release(info);
+               DRM_ERROR("failed to allocate fb.\n");
+               return -ENOMEM;
+       }
+       crtc->fb = fb;
+
+       fb->width = mode->hdisplay;
+       fb->height = mode->vdisplay;
+
+       fb->bits_per_pixel = 16;
+       fb->depth = 16;
+       fb->pitch =
+           ((fb->width * ((fb->bits_per_pixel + 1) / 8)) + 0x3f) & ~0x3f;
+
+       ret = drm_buffer_object_create(dev,
+                                      fb->pitch * fb->height,
+                                      drm_bo_type_kernel,
+                                      DRM_BO_FLAG_READ |
+                                      DRM_BO_FLAG_WRITE |
+                                      DRM_BO_FLAG_MEM_TT |
+                                      DRM_BO_FLAG_MEM_VRAM |
+                                      DRM_BO_FLAG_NO_EVICT,
+                                      DRM_BO_HINT_DONT_FENCE, 0, 0, &fbo);
+       if (ret || !fbo) {
+               DRM_ERROR("failed to allocate framebuffer\n");
+               goto out_err0;
+       }
+
+       fb->offset = fbo->offset - dev_priv->pg->gatt_start;
+       fb->bo = fbo;
+       DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
+                 fb->height, fb->offset, fbo);
+
+       fb->fbdev = info;
+
+       par = info->par;
+
+       par->dev = dev;
+       par->crtc = crtc;
+       par->vi = psbfb_vm_info_create();
+       if (!par->vi)
+               goto out_err1;
+
+       mutex_lock(&dev->struct_mutex);
+       par->vi->bo = fbo;
+       atomic_inc(&fbo->usage);
+       mutex_unlock(&dev->struct_mutex);
+
+       par->vi->f_mapping = NULL;
+       info->fbops = &psbfb_ops;
+
+       strcpy(info->fix.id, "psbfb");
+       info->fix.type = FB_TYPE_PACKED_PIXELS;
+       info->fix.visual = FB_VISUAL_DIRECTCOLOR;
+       info->fix.type_aux = 0;
+       info->fix.xpanstep = 1;
+       info->fix.ypanstep = 1;
+       info->fix.ywrapstep = 0;
+       info->fix.accel = FB_ACCEL_NONE;        /* ??? */
+       info->fix.type_aux = 0;
+       info->fix.mmio_start = 0;
+       info->fix.mmio_len = 0;
+       info->fix.line_length = fb->pitch;
+       info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
+       info->fix.smem_len = info->fix.line_length * fb->height;
+
+       info->flags = FBINFO_DEFAULT |
+           FBINFO_PARTIAL_PAN_OK /*| FBINFO_MISC_ALWAYS_SETPAR */ ;
+
+       ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
+       if (ret) {
+               DRM_ERROR("error mapping fb: %d\n", ret);
+               goto out_err2;
+       }
+
+       info->screen_base = drm_bmo_virtual(&fb->kmap, &is_iomem);
+       memset(info->screen_base, 0x00, fb->pitch*fb->height);
+       info->screen_size = info->fix.smem_len; /* FIXME */
+       info->pseudo_palette = fb->pseudo_palette;
+       info->var.xres_virtual = fb->width;
+       info->var.yres_virtual = fb->height;
+       info->var.bits_per_pixel = fb->bits_per_pixel;
+       info->var.xoffset = 0;
+       info->var.yoffset = 0;
+       info->var.activate = FB_ACTIVATE_NOW;
+       info->var.height = -1;
+       info->var.width = -1;
+       info->var.vmode = FB_VMODE_NONINTERLACED;
+
+       info->var.xres = mode->hdisplay;
+       info->var.right_margin = mode->hsync_start - mode->hdisplay;
+       info->var.hsync_len = mode->hsync_end - mode->hsync_start;
+       info->var.left_margin = mode->htotal - mode->hsync_end;
+       info->var.yres = mode->vdisplay;
+       info->var.lower_margin = mode->vsync_start - mode->vdisplay;
+       info->var.vsync_len = mode->vsync_end - mode->vsync_start;
+       info->var.upper_margin = mode->vtotal - mode->vsync_end;
+       info->var.pixclock = 10000000 / mode->htotal * 1000 /
+           mode->vtotal * 100;
+       /* avoid overflow */
+       info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
+
+       info->pixmap.size = 64 * 1024;
+       info->pixmap.buf_align = 8;
+       info->pixmap.access_align = 32;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+       info->pixmap.scan_align = 1;
+
+       DRM_DEBUG("fb depth is %d\n", fb->depth);
+       DRM_DEBUG("   pitch is %d\n", fb->pitch);
+       switch (fb->depth) {
+       case 8:
+               info->var.red.offset = 0;
+               info->var.green.offset = 0;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8;       /* 8bit DAC */
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 15:
+               info->var.red.offset = 10;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = info->var.green.length =
+                   info->var.blue.length = 5;
+               info->var.transp.offset = 15;
+               info->var.transp.length = 1;
+               break;
+       case 16:
+               info->var.red.offset = 11;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = 5;
+               info->var.green.length = 6;
+               info->var.blue.length = 5;
+               info->var.transp.offset = 0;
+               break;
+       case 24:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = info->var.green.length =
+                   info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 32:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = info->var.green.length =
+                   info->var.blue.length = 8;
+               info->var.transp.offset = 24;
+               info->var.transp.length = 8;
+               break;
+       default:
+               break;
+       }
+
+       if (register_framebuffer(info) < 0)
+               goto out_err3;
+
+       if (psbfb_check_var(&info->var, info) < 0)
+               goto out_err4;
+
+       psbfb_set_par(info);
+
+       DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id);
+
+       return 0;
+      out_err4:
+       unregister_framebuffer(info);
+      out_err3:
+       drm_bo_kunmap(&fb->kmap);
+      out_err2:
+       psbfb_vm_info_deref(&par->vi);
+      out_err1:
+       drm_bo_usage_deref_unlocked(&fb->bo);
+      out_err0:
+       drm_framebuffer_destroy(fb);
+       framebuffer_release(info);
+       crtc->fb = NULL;
+       return -EINVAL;
+}
+
+EXPORT_SYMBOL(psbfb_probe);
+
+int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct drm_framebuffer *fb;
+       struct fb_info *info;
+       struct psbfb_par *par;
+
+       if (drm_psb_no_fb)
+               return 0;
+
+       fb = crtc->fb;
+       info = fb->fbdev;
+
+       if (info) {
+               unregister_framebuffer(info);
+               drm_bo_kunmap(&fb->kmap);
+               par = info->par;
+               if (par)
+                       psbfb_vm_info_deref(&par->vi);
+               drm_bo_usage_deref_unlocked(&fb->bo);
+               drm_framebuffer_destroy(fb);
+               framebuffer_release(info);
+       }
+       return 0;
+}
+
+EXPORT_SYMBOL(psbfb_remove);
diff --git a/psb-kernel-source-4.41.1/psb_fence.c b/psb-kernel-source-4.41.1/psb_fence.c
new file mode 100644 (file)
index 0000000..0768a47
--- /dev/null
@@ -0,0 +1,285 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "psb_drv.h"
+
+static void psb_poll_ta(struct drm_device *dev, uint32_t waiting_types)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct drm_fence_driver *driver = dev->driver->fence_driver;
+       uint32_t cur_flag = 1;
+       uint32_t flags = 0;
+       uint32_t sequence = 0;
+       uint32_t remaining = 0xFFFFFFFF;
+       uint32_t diff;
+
+       struct psb_scheduler *scheduler;
+       struct psb_scheduler_seq *seq;
+       struct drm_fence_class_manager *fc =
+           &dev->fm.fence_class[PSB_ENGINE_TA];
+
+       if (unlikely(!dev_priv))
+               return;
+
+       scheduler = &dev_priv->scheduler;
+       seq = scheduler->seq;
+
+       while (likely(waiting_types & remaining)) {
+               if (!(waiting_types & cur_flag))
+                       goto skip;
+               if (seq->reported)
+                       goto skip;
+               if (flags == 0)
+                       sequence = seq->sequence;
+               else if (sequence != seq->sequence) {
+                       drm_fence_handler(dev, PSB_ENGINE_TA,
+                                         sequence, flags, 0);
+                       sequence = seq->sequence;
+                       flags = 0;
+               }
+               flags |= cur_flag;
+
+               /*
+                * Sequence may not have ended up on the ring yet.
+                * In that case, report it but don't mark it as
+                * reported. A subsequent poll will report it again.
+                */
+
+               diff = (fc->latest_queued_sequence - sequence) &
+                   driver->sequence_mask;
+               if (diff < driver->wrap_diff)
+                       seq->reported = 1;
+
+             skip:
+               cur_flag <<= 1;
+               remaining <<= 1;
+               seq++;
+       }
+
+       if (flags) {
+               drm_fence_handler(dev, PSB_ENGINE_TA, sequence, flags, 0);
+       }
+}
+
+static void psb_poll_other(struct drm_device *dev, uint32_t fence_class,
+                          uint32_t waiting_types)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
+       uint32_t sequence;
+
+       if (unlikely(!dev_priv))
+               return;
+
+       if (waiting_types) {
+               if (fence_class == PSB_ENGINE_VIDEO)
+                       sequence = dev_priv->msvdx_current_sequence;
+               else
+                       sequence = dev_priv->comm[fence_class << 4];
+
+               drm_fence_handler(dev, fence_class, sequence,
+                                 DRM_FENCE_TYPE_EXE, 0);
+
+               switch (fence_class) {
+               case PSB_ENGINE_2D:
+                       if (dev_priv->fence0_irq_on && !fc->waiting_types) {
+                               psb_2D_irq_off(dev_priv);
+                               dev_priv->fence0_irq_on = 0;
+                       } else if (!dev_priv->fence0_irq_on
+                                  && fc->waiting_types) {
+                               psb_2D_irq_on(dev_priv);
+                               dev_priv->fence0_irq_on = 1;
+                       }
+                       break;
+#if 0
+                       /*
+                        * FIXME: MSVDX irq switching
+                        */
+
+               case PSB_ENGINE_VIDEO:
+                       if (dev_priv->fence2_irq_on && !fc->waiting_types) {
+                               psb_msvdx_irq_off(dev_priv);
+                               dev_priv->fence2_irq_on = 0;
+                       } else if (!dev_priv->fence2_irq_on
+                                  && fc->pending_exe_flush) {
+                               psb_msvdx_irq_on(dev_priv);
+                               dev_priv->fence2_irq_on = 1;
+                       }
+                       break;
+#endif
+               default:
+                       return;
+               }
+       }
+}
+
+static void psb_fence_poll(struct drm_device *dev,
+                          uint32_t fence_class, uint32_t waiting_types)
+{
+       switch (fence_class) {
+       case PSB_ENGINE_TA:
+               psb_poll_ta(dev, waiting_types);
+               break;
+       default:
+               psb_poll_other(dev, fence_class, waiting_types);
+               break;
+       }
+}
+
+void psb_fence_error(struct drm_device *dev,
+                    uint32_t fence_class,
+                    uint32_t sequence, uint32_t type, int error)
+{
+       struct drm_fence_manager *fm = &dev->fm;
+       unsigned long irq_flags;
+
+       BUG_ON(fence_class >= PSB_NUM_ENGINES);
+       write_lock_irqsave(&fm->lock, irq_flags);
+       drm_fence_handler(dev, fence_class, sequence, type, error);
+       write_unlock_irqrestore(&fm->lock, irq_flags);
+}
+
+int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
+                           uint32_t flags, uint32_t * sequence,
+                           uint32_t * native_type)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       uint32_t seq = 0;
+       int ret;
+
+       if (!dev_priv)
+               return -EINVAL;
+
+       if (fence_class >= PSB_NUM_ENGINES)
+               return -EINVAL;
+
+       switch (fence_class) {
+       case PSB_ENGINE_2D:
+               spin_lock(&dev_priv->sequence_lock);
+               seq = ++dev_priv->sequence[fence_class];
+               spin_unlock(&dev_priv->sequence_lock);
+               ret = psb_blit_sequence(dev_priv, seq);
+               if (ret)
+                       return ret;
+               break;
+       case PSB_ENGINE_VIDEO:
+               spin_lock(&dev_priv->sequence_lock);
+               seq = ++dev_priv->sequence[fence_class];
+               spin_unlock(&dev_priv->sequence_lock);
+               break;
+       default:
+               spin_lock(&dev_priv->sequence_lock);
+               seq = dev_priv->sequence[fence_class];
+               spin_unlock(&dev_priv->sequence_lock);
+       }
+
+       *sequence = seq;
+       *native_type = DRM_FENCE_TYPE_EXE;
+
+       return 0;
+}
+
+uint32_t psb_fence_advance_sequence(struct drm_device * dev,
+                                   uint32_t fence_class)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       uint32_t sequence;
+
+       spin_lock(&dev_priv->sequence_lock);
+       sequence = ++dev_priv->sequence[fence_class];
+       spin_unlock(&dev_priv->sequence_lock);
+
+       return sequence;
+}
+
+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
+{
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
+
+#ifdef FIX_TG_16
+       if (fence_class == 0) {
+               struct drm_psb_private *dev_priv =
+                   (struct drm_psb_private *)dev->dev_private;
+
+               if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
+                   (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+                   ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+                     _PSB_C2B_STATUS_BUSY) == 0))
+                       psb_resume_ta_2d_idle(dev_priv);
+       }
+#endif
+       write_lock(&fm->lock);
+       psb_fence_poll(dev, fence_class, fc->waiting_types);
+       write_unlock(&fm->lock);
+}
+
+static int psb_fence_wait(struct drm_fence_object *fence,
+                         int lazy, int interruptible, uint32_t mask)
+{
+       struct drm_device *dev = fence->dev;
+       struct drm_fence_class_manager *fc =
+           &dev->fm.fence_class[fence->fence_class];
+       int ret = 0;
+       unsigned long timeout = DRM_HZ *
+           ((fence->fence_class == PSB_ENGINE_TA) ? 30 : 3);
+
+       drm_fence_object_flush(fence, mask);
+       if (interruptible)
+               ret = wait_event_interruptible_timeout
+                   (fc->fence_queue, drm_fence_object_signaled(fence, mask),
+                    timeout);
+       else
+               ret = wait_event_timeout
+                   (fc->fence_queue, drm_fence_object_signaled(fence, mask),
+                    timeout);
+
+       if (unlikely(ret == -ERESTARTSYS))
+               return -EAGAIN;
+
+       if (unlikely(ret == 0))
+               return -EBUSY;
+
+       return 0;
+}
+
+struct drm_fence_driver psb_fence_driver = {
+       .num_classes = PSB_NUM_ENGINES,
+       .wrap_diff = (1 << 30),
+       .flush_diff = (1 << 29),
+       .sequence_mask = 0xFFFFFFFFU,
+       .has_irq = NULL,
+       .emit = psb_fence_emit_sequence,
+       .flush = NULL,
+       .poll = psb_fence_poll,
+       .needed_flush = NULL,
+       .wait = psb_fence_wait
+};
diff --git a/psb-kernel-source-4.41.1/psb_gtt.c b/psb-kernel-source-4.41.1/psb_gtt.c
new file mode 100644 (file)
index 0000000..28a0b2e
--- /dev/null
@@ -0,0 +1,234 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+#include "drmP.h"
+#include "psb_drv.h"
+
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+       uint32_t mask = PSB_PTE_VALID;
+
+       if (type & PSB_MMU_CACHED_MEMORY)
+               mask |= PSB_PTE_CACHED;
+       if (type & PSB_MMU_RO_MEMORY)
+               mask |= PSB_PTE_RO;
+       if (type & PSB_MMU_WO_MEMORY)
+               mask |= PSB_PTE_WO;
+
+       return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
+{
+       struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
+
+       if (!tmp)
+               return NULL;
+
+       init_rwsem(&tmp->sem);
+       tmp->dev = dev;
+
+       return tmp;
+}
+
+void psb_gtt_takedown(struct psb_gtt *pg, int free)
+{
+       struct drm_psb_private *dev_priv = pg->dev->dev_private;
+
+       if (!pg)
+               return;
+
+       if (pg->gtt_map) {
+               iounmap(pg->gtt_map);
+               pg->gtt_map = NULL;
+       }
+       if (pg->initialized) {
+               pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
+                                     pg->gmch_ctrl);
+               PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
+               (void)PSB_RVDC32(PSB_PGETBL_CTL);
+       }
+       if (free)
+               drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
+}
+
+int psb_gtt_init(struct psb_gtt *pg, int resume)
+{
+       struct drm_device *dev = pg->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned gtt_pages;
+       unsigned long stolen_size;
+       unsigned i, num_pages;
+       unsigned pfn_base;
+
+       int ret = 0;
+       uint32_t pte;
+
+       pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
+       pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+                             pg->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+       pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+       PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+       (void)PSB_RVDC32(PSB_PGETBL_CTL);
+
+       pg->initialized = 1;
+
+       pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
+       pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+       pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+       gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
+       pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+           >> PAGE_SHIFT;
+       pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
+       stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
+
+       PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
+       PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
+       PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
+       PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
+       PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
+
+       if (resume && (gtt_pages != pg->gtt_pages) &&
+           (stolen_size != pg->stolen_size)) {
+               DRM_ERROR("GTT resume error.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       pg->gtt_pages = gtt_pages;
+       pg->stolen_size = stolen_size;
+       if(!resume)
+               pg->gtt_map =
+                       ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
+       if (!pg->gtt_map) {
+               DRM_ERROR("Failure to map gtt.\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       /*
+        * insert stolen pages.
+        */
+
+       pfn_base = pg->stolen_base >> PAGE_SHIFT;
+       num_pages = stolen_size >> PAGE_SHIFT;
+       PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
+                      num_pages, pfn_base);
+       for (i = 0; i < num_pages; ++i) {
+               pte = psb_gtt_mask_pte(pfn_base + i, 0);
+               iowrite32(pte, pg->gtt_map + i);
+       }
+
+       /*
+        * Init rest of gtt.
+        */
+
+       pfn_base = page_to_pfn(dev_priv->scratch_page);
+       pte = psb_gtt_mask_pte(pfn_base, 0);
+       PSB_DEBUG_INIT("Initializing the rest of a total "
+                      "of %d gtt pages.\n", pg->gatt_pages);
+
+       for (; i < pg->gatt_pages; ++i)
+               iowrite32(pte, pg->gtt_map + i);
+       (void)ioread32(pg->gtt_map + i - 1);
+
+       return 0;
+
+      out_err:
+       psb_gtt_takedown(pg, 0);
+       return ret;
+}
+
+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
+                        unsigned offset_pages, unsigned num_pages,
+                        unsigned desired_tile_stride, unsigned hw_tile_stride,
+                        int type)
+{
+       unsigned rows = 1;
+       unsigned add;
+       unsigned row_add;
+       unsigned i;
+       unsigned j;
+       uint32_t *cur_page = NULL;
+       uint32_t pte;
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride;
+       row_add = hw_tile_stride;
+
+       down_read(&pg->sem);
+       for (i = 0; i < rows; ++i) {
+               cur_page = pg->gtt_map + offset_pages;
+               for (j = 0; j < desired_tile_stride; ++j) {
+                       pte = psb_gtt_mask_pte(page_to_pfn(*pages++), type);
+                       iowrite32(pte, cur_page++);
+               }
+               offset_pages += add;
+       }
+       (void)ioread32(cur_page - 1);
+       up_read(&pg->sem);
+
+       return 0;
+}
+
+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
+                        unsigned num_pages, unsigned desired_tile_stride,
+                        unsigned hw_tile_stride)
+{
+       struct drm_psb_private *dev_priv = pg->dev->dev_private;
+       unsigned rows = 1;
+       unsigned add;
+       unsigned row_add;
+       unsigned i;
+       unsigned j;
+       uint32_t *cur_page = NULL;
+       unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
+       uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride;
+       row_add = hw_tile_stride;
+
+       down_read(&pg->sem);
+       for (i = 0; i < rows; ++i) {
+               cur_page = pg->gtt_map + offset_pages;
+               for (j = 0; j < desired_tile_stride; ++j) {
+                       iowrite32(pte, cur_page++);
+               }
+               offset_pages += add;
+       }
+       (void)ioread32(cur_page - 1);
+       up_read(&pg->sem);
+
+       return 0;
+}
diff --git a/psb-kernel-source-4.41.1/psb_i2c.c b/psb-kernel-source-4.41.1/psb_i2c.c
new file mode 100644 (file)
index 0000000..5a80243
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "intel_drv.h"
+#include "psb_drv.h"
+
+#define I2C_HW_B_INTELFB 0x010021; /* intel framebuffer driver */
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
+       uint32_t val;
+
+       val = PSB_RVDC32(chan->reg);
+       return ((val & GPIO_CLOCK_VAL_IN) != 0);
+}
+
+static int get_data(void *data)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
+       uint32_t val;
+
+       val = PSB_RVDC32(chan->reg);
+       return ((val & GPIO_DATA_VAL_IN) != 0);
+}
+
+static void set_clock(void *data, int state_high)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
+       uint32_t reserved = 0, clock_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       reserved = PSB_RVDC32(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                           GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+       else
+               clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+                   GPIO_CLOCK_VAL_MASK;
+       PSB_WVDC32(reserved | clock_bits, chan->reg);
+       udelay(I2C_RISEFALL_TIME);      /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
+       uint32_t reserved = 0, data_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       reserved = PSB_RVDC32(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                           GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+       else
+               data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+                   GPIO_DATA_VAL_MASK;
+
+       PSB_WVDC32(data_bits, chan->reg);
+       udelay(I2C_RISEFALL_TIME);      /* wait for the line to change state */
+}
+
+/**
+ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ *   %GPIOA
+ *   %GPIOB
+ *   %GPIOC
+ *   %GPIOD
+ *   %GPIOE
+ *   %GPIOF
+ *   %GPIOG
+ *   %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev,
+                                       const uint32_t reg, const char *name)
+{
+       struct intel_i2c_chan *chan;
+
+       chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
+       if (!chan)
+               goto out_free;
+
+       chan->drm_dev = dev;
+       chan->reg = reg;
+       snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+       chan->adapter.owner = THIS_MODULE;
+       chan->adapter.id = I2C_HW_B_INTELFB;
+       chan->adapter.algo_data = &chan->algo;
+       chan->adapter.dev.parent = &dev->pdev->dev;
+       chan->algo.setsda = set_data;
+       chan->algo.setscl = set_clock;
+       chan->algo.getsda = get_data;
+       chan->algo.getscl = get_clock;
+       chan->algo.udelay = 20;
+       chan->algo.timeout = usecs_to_jiffies(2200);
+       chan->algo.data = chan;
+
+       i2c_set_adapdata(&chan->adapter, chan);
+
+       if (i2c_bit_add_bus(&chan->adapter))
+               goto out_free;
+
+       /* JJJ:  raise SCL and SDA? */
+       set_data(chan, 1);
+       set_clock(chan, 1);
+       udelay(20);
+
+       return chan;
+
+      out_free:
+       kfree(chan);
+       return NULL;
+}
+
+/**
+ * intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void intel_i2c_destroy(struct intel_i2c_chan *chan)
+{
+       if (!chan)
+               return;
+
+       i2c_del_adapter(&chan->adapter);
+       kfree(chan);
+}
diff --git a/psb-kernel-source-4.41.1/psb_irq.c b/psb-kernel-source-4.41.1/psb_irq.c
new file mode 100644 (file)
index 0000000..f3e19c6
--- /dev/null
@@ -0,0 +1,435 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include "drmP.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_msvdx.h"
+#include "psb_detear.h"
+#include <linux/wait.h>
+
+extern wait_queue_head_t hotplug_queue;
+char hotplug_env = '0';
+
+
+/*
+ * Video display controller interrupt.
+ */
+static void psb_hotplug_irqhandler(struct drm_psb_private *dev_priv, uint32_t status)
+{
+        struct psb_xhw_buf buf;
+        INIT_LIST_HEAD(&buf.head);
+
+        if (status & _PSB_HOTPLUG_INTERRUPT_FLAG)
+               psb_xhw_hotplug(dev_priv, &buf);
+}
+
+static int underrun = 0;
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       uint32_t pipestat;
+       int wake = 0;
+       int vsync_a = 0;
+       int vsync_b = 0;
+       static int pipe_a_on = 0;
+       static int pipe_b_on = 0;
+       int trigger_2d_blit = 0;
+
+       pipestat = PSB_RVDC32(PSB_PIPEASTAT);
+       if (pipestat & (1<<31)) {
+               printk("buffer underrun 0x%x\n",underrun++);
+               PSB_WVDC32(1<<31 | 1<<15, PSB_PIPEASTAT);
+       }
+
+       if ((!drm_psb_disable_vsync) && 
+           (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
+               atomic_inc(&dev->vbl_received);
+               wake = 1;
+               PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE |
+                          _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
+       }
+
+       if ((!drm_psb_disable_vsync) &&
+           (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
+               atomic_inc(&dev->vbl_received2);
+               wake = 1;
+               PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE |
+                          _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
+       }
+
+       if (vdc_stat & _PSB_HOTPLUG_INTERRUPT_FLAG) {
+               // Clear 2nd status register
+               spin_lock(&dev_priv->irqmask_lock);
+               uint32_t hotplugstat = PSB_RVDC32(PORT_HOTPLUG_STATUS_REG);
+               PSB_WVDC32(hotplugstat, PORT_HOTPLUG_STATUS_REG);
+               spin_unlock(&dev_priv->irqmask_lock);
+               
+               hotplug_env = '1';
+               wake_up_interruptible(&hotplug_queue);
+       }
+
+       PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+       (void)PSB_RVDC32(PSB_INT_IDENTITY_R);
+       DRM_READMEMORYBARRIER();
+
+       if (wake) {
+               DRM_WAKEUP(&dev->vbl_queue);
+               drm_vbl_send_signals(dev);
+       }
+}
+
+/*
+ * SGX interrupt source 1.
+ */
+
+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
+                             uint32_t sgx_stat2)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+
+       if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
+               DRM_WAKEUP(&dev_priv->event_2d_queue);
+               psb_fence_handler(dev, 0);
+       }
+
+       if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
+               psb_print_pagefault(dev_priv);
+
+       psb_scheduler_handler(dev_priv, sgx_stat);
+}
+
+/*
+ * MSVDX interrupt.
+ */
+static void psb_msvdx_interrupt(struct drm_device *dev, uint32_t msvdx_stat)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+
+       if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
+               /*Ideally we should we should never get to this */
+               PSB_DEBUG_GENERAL
+                   ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MMU FAULT)\n",
+                    msvdx_stat, dev_priv->fence2_irq_on);
+
+               /* Pause MMU */
+               PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
+                            MSVDX_MMU_CONTROL0);
+               DRM_WRITEMEMORYBARRIER();
+
+               /* Clear this interupt bit only */
+               PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
+                            MSVDX_INTERRUPT_CLEAR);
+               PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
+               DRM_READMEMORYBARRIER();
+
+               dev_priv->msvdx_needs_reset = 1;
+       } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
+               PSB_DEBUG_GENERAL
+                   ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MTX)\n",
+                    msvdx_stat, dev_priv->fence2_irq_on);
+
+               /* Clear all interupt bits */
+               PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
+               PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
+               DRM_READMEMORYBARRIER();
+
+               psb_msvdx_mtx_interrupt(dev);
+       }
+}
+
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *)arg;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+
+       uint32_t vdc_stat;
+       uint32_t sgx_stat;
+       uint32_t sgx_stat2;
+       uint32_t msvdx_stat;
+       int handled = 0;
+
+       spin_lock(&dev_priv->irqmask_lock);
+
+       vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+       sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+       sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+       msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
+
+       sgx_stat2 &= dev_priv->sgx2_irq_mask;
+       sgx_stat &= dev_priv->sgx_irq_mask;
+       PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
+       PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
+       (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
+
+       vdc_stat &= dev_priv->vdc_irq_mask;
+       spin_unlock(&dev_priv->irqmask_lock);
+
+       if (msvdx_stat) {
+               psb_msvdx_interrupt(dev, msvdx_stat);
+               handled = 1;
+       }
+
+       if (vdc_stat) {
+#ifdef PSB_DETEAR
+               if(psb_blit_info.cmd_ready) {
+                       psb_blit_info.cmd_ready = 0;
+                       psb_blit_2d_reg_write(dev_priv, psb_blit_info.cmdbuf);
+                       /* to resume the blocked psb_cmdbuf_2d() */
+                       set_bit(0, &psb_blit_info.vdc_bit);
+               }
+#endif /* PSB_DETEAR */
+
+               /* MSVDX IRQ status is part of vdc_irq_mask */
+               psb_vdc_interrupt(dev, vdc_stat);
+               handled = 1;
+       }
+
+       if (sgx_stat || sgx_stat2) {
+               psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
+               handled = 1;
+       }
+
+       if (!handled) {
+               return IRQ_NONE;
+       }
+
+       return IRQ_HANDLED;
+}
+
+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
+{
+       unsigned long mtx_int = 0;
+       dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
+
+       /*Clear MTX interrupt */
+       REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
+       PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       spin_lock(&dev_priv->irqmask_lock);
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+       PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
+       PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+       PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
+       (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
+
+       dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
+           _PSB_CE_DPM_3D_MEM_FREE |
+           _PSB_CE_TA_FINISHED |
+           _PSB_CE_DPM_REACHED_MEM_THRESH |
+           _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
+           _PSB_CE_DPM_OUT_OF_MEMORY_MT |
+           _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
+
+       dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
+
+       dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG | _PSB_HOTPLUG_INTERRUPT_ENABLE;
+
+       if (!drm_psb_disable_vsync || drm_psb_detear)
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
+                   _PSB_VSYNC_PIPEB_FLAG;
+
+       /*Clear MTX interrupt */
+       {
+               unsigned long mtx_int = 0;
+               REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
+                                      CR_MTX_IRQ, 1);
+               PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
+       }
+       spin_unlock(&dev_priv->irqmask_lock);
+}
+
+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
+{
+       /* Enable Mtx Interupt to host */
+       unsigned long enables = 0;
+       PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
+       REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
+       PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
+}
+
+void psb_irq_postinstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
+       PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
+       (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
+       /****MSVDX IRQ Setup...*****/
+       /* Enable Mtx Interupt to host */
+       {
+               unsigned long enables = 0;
+               PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
+               REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
+                                      CR_MTX_IRQ, 1);
+               PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
+       }
+       dev_priv->irq_enabled = 1;
+       uint32_t hotplug_stat = PSB_RVDC32(PORT_HOTPLUG_ENABLE_REG);
+       PSB_WVDC32(hotplug_stat | SDVOB_HOTPLUG_DETECT_ENABLE, PORT_HOTPLUG_ENABLE_REG);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       dev_priv->sgx_irq_mask = 0x00000000;
+       dev_priv->sgx2_irq_mask = 0x00000000;
+       dev_priv->vdc_irq_mask = 0x00000000;
+
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+       PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
+       PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
+       wmb();
+       PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), PSB_CR_EVENT_HOST_CLEAR);
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), PSB_CR_EVENT_HOST_CLEAR2);
+
+       /****MSVDX IRQ Setup...*****/
+       /* Clear interrupt enabled flag */
+       PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
+
+       dev_priv->irq_enabled = 0;
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+}
+
+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
+{
+       unsigned long irqflags;
+       uint32_t old_mask;
+       uint32_t cleared_mask;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+       --dev_priv->irqen_count_2d;
+       if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
+
+               old_mask = dev_priv->sgx_irq_mask;
+               dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
+               PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
+               (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
+
+               cleared_mask = (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
+               PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
+               (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
+       }
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+       if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
+               dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
+               PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
+               (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
+       }
+       ++dev_priv->irqen_count_2d;
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+static int psb_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
+                             atomic_t * counter)
+{
+       unsigned int cur_vblank;
+       int ret = 0;
+
+       DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+                   (((cur_vblank = atomic_read(counter))
+                     - *sequence) <= (1 << 23)));
+
+       *sequence = cur_vblank;
+
+       return ret;
+}
+
+int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+{
+       int ret;
+
+       ret = psb_vblank_do_wait(dev, sequence, &dev->vbl_received);
+       /* printk(KERN_ERR "toe: seq = %d, drm_dev=0x%x ret=%d, %s",
+          *sequence, dev, ret, __FUNCTION__); */
+       return ret;
+}
+
+int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+{
+       int ret;
+
+       ret = psb_vblank_do_wait(dev, sequence, &dev->vbl_received2);
+       /* printk(KERN_ERR "toe: seq = %d, drm_dev=0x%x ret=%d, %s",
+          *sequence, dev, ret, __FUNCTION__); */
+       return ret;
+}
+
+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+       if (dev_priv->irq_enabled) {
+               dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
+               PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+               (void)PSB_RSGX32(PSB_INT_ENABLE_R);
+       }
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+       if (dev_priv->irq_enabled) {
+               dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
+               PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+               (void)PSB_RSGX32(PSB_INT_ENABLE_R);
+       }
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
diff --git a/psb-kernel-source-4.41.1/psb_mmu.c b/psb-kernel-source-4.41.1/psb_mmu.c
new file mode 100644 (file)
index 0000000..7e6f87c
--- /dev/null
@@ -0,0 +1,1034 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+#include "drmP.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+
+/*
+ * Code for the SGX MMU:
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * The usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+/*
+ * TODO: Inserting ptes from an interrupt handler:
+ * This may be desirable for some SGX functionality where the GPU can fault in
+ * needed pages. For that, we need to make an atomic insert_pages function, that
+ * may fail.
+ * If it fails, the caller need to insert the page using a workqueue function,
+ * but on average it should be fast.
+ */
+
+struct psb_mmu_driver {
+       /* protects driver- and pd structures. Always take in read mode
+        * before taking the page table spinlock.
+        */
+       struct rw_semaphore sem;
+
+       /* protects page tables, directory tables and pt tables.
+        * and pt structures.
+        */
+       spinlock_t lock;
+
+       atomic_t needs_tlbflush;
+       atomic_t *msvdx_mmu_invaldc;
+       uint8_t __iomem *register_map;
+       struct psb_mmu_pd *default_pd;
+       uint32_t bif_ctrl;
+       int has_clflush;
+       int clflush_add;
+       unsigned long clflush_mask;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+       struct psb_mmu_pd *pd;
+       uint32_t index;
+       uint32_t count;
+       struct page *p;
+       uint32_t *v;
+};
+
+struct psb_mmu_pd {
+       struct psb_mmu_driver *driver;
+       int hw_context;
+       struct psb_mmu_pt **tables;
+       struct page *p;
+       struct page *dummy_pt;
+       struct page *dummy_page;
+       uint32_t pd_mask;
+       uint32_t invalid_pde;
+       uint32_t invalid_pte;
+};
+
+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+{
+       return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+}
+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+{
+       return (offset >> PSB_PDE_SHIFT);
+}
+
+#if defined(CONFIG_X86)
+static inline void psb_clflush(void *addr)
+{
+       __asm__ __volatile__("clflush (%0)\n"::"r"(addr):"memory");
+}
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
+{
+       if (!driver->has_clflush)
+               return;
+
+       mb();
+       psb_clflush(addr);
+       mb();
+}
+#else
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
+{;
+}
+
+#endif
+
+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
+                                uint32_t val, uint32_t offset)
+{
+       iowrite32(val, d->register_map + offset);
+}
+
+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
+                                   uint32_t offset)
+{
+       return ioread32(d->register_map + offset);
+}
+
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
+{
+       if (atomic_read(&driver->needs_tlbflush) || force) {
+               uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
+               psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
+                             PSB_CR_BIF_CTRL);
+               wmb();
+               psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
+                             PSB_CR_BIF_CTRL);
+               (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
+               if (driver->msvdx_mmu_invaldc)
+                       atomic_set(driver->msvdx_mmu_invaldc, 1);
+       }
+       atomic_set(&driver->needs_tlbflush, 0);
+}
+
+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+{
+       down_write(&driver->sem);
+       psb_mmu_flush_pd_locked(driver, force);
+       up_write(&driver->sem);
+}
+
+void psb_mmu_flush(struct psb_mmu_driver *driver)
+{
+       uint32_t val;
+
+       down_write(&driver->sem);
+       val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
+       if (atomic_read(&driver->needs_tlbflush))
+               psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
+                             PSB_CR_BIF_CTRL);
+       else
+               psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
+                             PSB_CR_BIF_CTRL);
+       wmb();
+       psb_iowrite32(driver,
+                     val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
+                     PSB_CR_BIF_CTRL);
+       (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
+       atomic_set(&driver->needs_tlbflush, 0);
+       if (driver->msvdx_mmu_invaldc)
+               atomic_set(driver->msvdx_mmu_invaldc, 1);
+       up_write(&driver->sem);
+}
+
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+{
+       uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
+           PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
+
+       drm_ttm_cache_flush();
+       down_write(&pd->driver->sem);
+       psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), offset);
+       wmb();
+       psb_mmu_flush_pd_locked(pd->driver, 1);
+       pd->hw_context = hw_context;
+       up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long psb_pd_addr_end(unsigned long addr,
+                                           unsigned long end)
+{
+
+       addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+       return (addr < end) ? addr : end;
+}
+
+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+{
+       uint32_t mask = PSB_PTE_VALID;
+
+       if (type & PSB_MMU_CACHED_MEMORY)
+               mask |= PSB_PTE_CACHED;
+       if (type & PSB_MMU_RO_MEMORY)
+               mask |= PSB_PTE_RO;
+       if (type & PSB_MMU_WO_MEMORY)
+               mask |= PSB_PTE_WO;
+
+       return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+                                   int trap_pagefaults, int invalid_type)
+{
+       struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+       uint32_t *v;
+       int i;
+
+       if (!pd)
+               return NULL;
+
+       pd->p = alloc_page(GFP_DMA32);
+       if (!pd->p)
+               goto out_err1;
+       pd->dummy_pt = alloc_page(GFP_DMA32);
+       if (!pd->dummy_pt)
+               goto out_err2;
+       pd->dummy_page = alloc_page(GFP_DMA32);
+       if (!pd->dummy_page)
+               goto out_err3;
+
+       if (!trap_pagefaults) {
+               pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+                                                  invalid_type);
+               pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+                                                  invalid_type);
+       } else {
+               pd->invalid_pde = 0;
+               pd->invalid_pte = 0;
+       }
+
+       v = kmap(pd->dummy_pt);
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
+               v[i] = pd->invalid_pte;
+       }
+       kunmap(pd->dummy_pt);
+
+       v = kmap(pd->p);
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
+               v[i] = pd->invalid_pde;
+       }
+       kunmap(pd->p);
+
+       clear_page(kmap(pd->dummy_page));
+       kunmap(pd->dummy_page);
+
+       pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+       if (!pd->tables)
+               goto out_err4;
+
+       pd->hw_context = -1;
+       pd->pd_mask = PSB_PTE_VALID;
+       pd->driver = driver;
+
+       return pd;
+
+      out_err4:
+       __free_page(pd->dummy_page);
+      out_err3:
+       __free_page(pd->dummy_pt);
+      out_err2:
+       __free_page(pd->p);
+      out_err1:
+       kfree(pd);
+       return NULL;
+}
+
+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+{
+       __free_page(pt->p);
+       kfree(pt);
+}
+
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+{
+       struct psb_mmu_driver *driver = pd->driver;
+       struct psb_mmu_pt *pt;
+       int i;
+
+       down_write(&driver->sem);
+       if (pd->hw_context != -1) {
+               psb_iowrite32(driver, 0,
+                             PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
+               psb_mmu_flush_pd_locked(driver, 1);
+       }
+
+       /* Should take the spinlock here, but we don't need to do that
+          since we have the semaphore in write mode. */
+
+       for (i = 0; i < 1024; ++i) {
+               pt = pd->tables[i];
+               if (pt)
+                       psb_mmu_free_pt(pt);
+       }
+
+       vfree(pd->tables);
+       __free_page(pd->dummy_page);
+       __free_page(pd->dummy_pt);
+       __free_page(pd->p);
+       kfree(pd);
+       up_write(&driver->sem);
+}
+
+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+{
+       struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+       void *v;
+       uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+       uint32_t clflush_count = PAGE_SIZE / clflush_add;
+       spinlock_t *lock = &pd->driver->lock;
+       uint8_t *clf;
+       uint32_t *ptes;
+       int i;
+
+       if (!pt)
+               return NULL;
+
+       pt->p = alloc_page(GFP_DMA32);
+       if (!pt->p) {
+               kfree(pt);
+               return NULL;
+       }
+
+       spin_lock(lock);
+
+       v = kmap_atomic(pt->p, KM_USER0);
+       clf = (uint8_t *) v;
+       ptes = (uint32_t *) v;
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
+               *ptes++ = pd->invalid_pte;
+       }
+
+#if defined(CONFIG_X86)
+       if (pd->driver->has_clflush && pd->hw_context != -1) {
+               mb();
+               for (i = 0; i < clflush_count; ++i) {
+                       psb_clflush(clf);
+                       clf += clflush_add;
+               }
+               mb();
+       }
+#endif
+       kunmap_atomic(v, KM_USER0);
+       spin_unlock(lock);
+
+       pt->count = 0;
+       pt->pd = pd;
+       pt->index = 0;
+
+       return pt;
+}
+
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+                                            unsigned long addr)
+{
+       uint32_t index = psb_mmu_pd_index(addr);
+       struct psb_mmu_pt *pt;
+       volatile uint32_t *v;
+       spinlock_t *lock = &pd->driver->lock;
+
+       spin_lock(lock);
+       pt = pd->tables[index];
+       while (!pt) {
+               spin_unlock(lock);
+               pt = psb_mmu_alloc_pt(pd);
+               if (!pt)
+                       return NULL;
+               spin_lock(lock);
+
+               if (pd->tables[index]) {
+                       spin_unlock(lock);
+                       psb_mmu_free_pt(pt);
+                       spin_lock(lock);
+                       pt = pd->tables[index];
+                       continue;
+               }
+
+               v = kmap_atomic(pd->p, KM_USER0);
+               pd->tables[index] = pt;
+               v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
+               pt->index = index;
+               kunmap_atomic((void *)v, KM_USER0);
+
+               if (pd->hw_context != -1) {
+                       psb_mmu_clflush(pd->driver, (void *)&v[index]);
+                       atomic_set(&pd->driver->needs_tlbflush, 1);
+               }
+       }
+       pt->v = kmap_atomic(pt->p, KM_USER0);
+       return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+                                             unsigned long addr)
+{
+       uint32_t index = psb_mmu_pd_index(addr);
+       struct psb_mmu_pt *pt;
+       spinlock_t *lock = &pd->driver->lock;
+
+       spin_lock(lock);
+       pt = pd->tables[index];
+       if (!pt) {
+               spin_unlock(lock);
+               return NULL;
+       }
+       pt->v = kmap_atomic(pt->p, KM_USER0);
+       return pt;
+}
+
+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+{
+       struct psb_mmu_pd *pd = pt->pd;
+       volatile uint32_t *v;
+
+       kunmap_atomic(pt->v, KM_USER0);
+       if (pt->count == 0) {
+               v = kmap_atomic(pd->p, KM_USER0);
+               v[pt->index] = pd->invalid_pde;
+               pd->tables[pt->index] = NULL;
+
+               if (pd->hw_context != -1) {
+                       psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
+                       atomic_set(&pd->driver->needs_tlbflush, 1);
+               }
+               kunmap_atomic(pt->v, KM_USER0);
+               spin_unlock(&pd->driver->lock);
+               psb_mmu_free_pt(pt);
+               return;
+       }
+       spin_unlock(&pd->driver->lock);
+}
+
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
+                                  uint32_t pte)
+{
+       pt->v[psb_mmu_pt_index(addr)] = pte;
+}
+
+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+                                         unsigned long addr)
+{
+       pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+#if 0
+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
+                                        uint32_t mmu_offset)
+{
+       uint32_t *v;
+       uint32_t pfn;
+
+       v = kmap_atomic(pd->p, KM_USER0);
+       if (!v) {
+               printk(KERN_INFO "Could not kmap pde page.\n");
+               return 0;
+       }
+       pfn = v[psb_mmu_pd_index(mmu_offset)];
+       //      printk(KERN_INFO "pde is 0x%08x\n",pfn);
+       kunmap_atomic(v, KM_USER0);
+       if (((pfn & 0x0F) != PSB_PTE_VALID)) {
+               printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
+                      mmu_offset, pfn);
+       }
+       v = ioremap(pfn & 0xFFFFF000, 4096);
+       if (!v) {
+               printk(KERN_INFO "Could not kmap pte page.\n");
+               return 0;
+       }
+       pfn = v[psb_mmu_pt_index(mmu_offset)];
+       // printk(KERN_INFO "pte is 0x%08x\n",pfn);
+       iounmap(v);
+       if (((pfn & 0x0F) != PSB_PTE_VALID)) {
+               printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
+                      mmu_offset, pfn);
+       }
+       return pfn >> PAGE_SHIFT;
+}
+
+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
+                                      uint32_t mmu_offset, uint32_t gtt_pages)
+{
+       uint32_t start;
+       uint32_t next;
+
+       printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
+              mmu_offset, gtt_pages);
+       down_read(&pd->driver->sem);
+       start = psb_mmu_check_pte_locked(pd, mmu_offset);
+       mmu_offset += PAGE_SIZE;
+       gtt_pages -= 1;
+       while (gtt_pages--) {
+               next = psb_mmu_check_pte_locked(pd, mmu_offset);
+               if (next != start + 1) {
+                       printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n",
+                              start, next);
+               }
+               start = next;
+               mmu_offset += PAGE_SIZE;
+       }
+       up_read(&pd->driver->sem);
+}
+
+#endif
+
+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
+                       uint32_t mmu_offset, uint32_t gtt_start,
+                       uint32_t gtt_pages)
+{
+       uint32_t *v;
+       uint32_t start = psb_mmu_pd_index(mmu_offset);
+       struct psb_mmu_driver *driver = pd->driver;
+
+       down_read(&driver->sem);
+       spin_lock(&driver->lock);
+
+       v = kmap_atomic(pd->p, KM_USER0);
+       v += start;
+
+       while (gtt_pages--) {
+               *v++ = gtt_start | pd->pd_mask;
+               gtt_start += PAGE_SIZE;
+       }
+
+       drm_ttm_cache_flush();
+       kunmap_atomic(v, KM_USER0);
+       spin_unlock(&driver->lock);
+
+       if (pd->hw_context != -1)
+               atomic_set(&pd->driver->needs_tlbflush, 1);
+
+       up_read(&pd->driver->sem);
+       psb_mmu_flush_pd(pd->driver, 0);
+}
+
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+{
+       struct psb_mmu_pd *pd;
+
+       down_read(&driver->sem);
+       pd = driver->default_pd;
+       up_read(&driver->sem);
+
+       return pd;
+}
+
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver * driver)
+{
+       struct psb_mmu_pd *pd;
+
+       pd = psb_mmu_get_default_pd(driver);
+       return ((page_to_pfn(pd->p) << PAGE_SHIFT));
+}
+
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+{
+       psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
+       psb_mmu_free_pagedir(driver->default_pd);
+       kfree(driver);
+}
+
+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+                                          int trap_pagefaults,
+                                          int invalid_type,
+                                          atomic_t *msvdx_mmu_invaldc)
+{
+       struct psb_mmu_driver *driver;
+
+       driver = (struct psb_mmu_driver *)kmalloc(sizeof(*driver), GFP_KERNEL);
+
+       if (!driver)
+               return NULL;
+
+       driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+                                             invalid_type);
+       if (!driver->default_pd)
+               goto out_err1;
+
+       spin_lock_init(&driver->lock);
+       init_rwsem(&driver->sem);
+       down_write(&driver->sem);
+       driver->register_map = registers;
+       atomic_set(&driver->needs_tlbflush, 1);
+       driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
+
+       driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
+       psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
+                     PSB_CR_BIF_CTRL);
+       psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
+                     PSB_CR_BIF_CTRL);
+
+       driver->has_clflush = 0;
+
+#if defined(CONFIG_X86)
+       if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+               uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+               /*
+                * clflush size is determined at kernel setup for x86_64 but not for
+                * i386. We have to do it here.
+                */
+
+               cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+               clflush_size = ((misc >> 8) & 0xff) * 8;
+               driver->has_clflush = 1;
+               driver->clflush_add =
+                   PAGE_SIZE * clflush_size / sizeof(uint32_t);
+               driver->clflush_mask = driver->clflush_add - 1;
+               driver->clflush_mask = ~driver->clflush_mask;
+       }
+#endif
+
+       up_write(&driver->sem);
+       return driver;
+
+      out_err1:
+       kfree(driver);
+       return NULL;
+}
+
+#if defined(CONFIG_X86)
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
+                              uint32_t num_pages, uint32_t desired_tile_stride,
+                              uint32_t hw_tile_stride)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long clflush_add = pd->driver->clflush_add;
+       unsigned long clflush_mask = pd->driver->clflush_mask;
+
+       if (!pd->driver->has_clflush) {
+               drm_ttm_cache_flush();
+               return;
+       }
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+       mb();
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_map_lock(pd, addr);
+                       if (!pt)
+                               continue;
+                       do {
+                               psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
+                       } while (addr += clflush_add,
+                                (addr & clflush_mask) < next);
+
+                       psb_mmu_pt_unmap_unlock(pt);
+               } while (addr = next, next != end);
+               address += row_add;
+       }
+       mb();
+}
+#else
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
+                              uint32_t num_pages, uint32_t desired_tile_stride,
+                              uint32_t hw_tile_stride)
+{
+       drm_ttm_cache_flush();
+}
+#endif
+
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+                                unsigned long address, uint32_t num_pages)
+{
+       struct psb_mmu_pt *pt;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long f_address = address;
+
+       down_read(&pd->driver->sem);
+
+       addr = address;
+       end = addr + (num_pages << PAGE_SHIFT);
+
+       do {
+               next = psb_pd_addr_end(addr, end);
+               pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+               if (!pt)
+                       goto out;
+               do {
+                       psb_mmu_invalidate_pte(pt, addr);
+                       --pt->count;
+               } while (addr += PAGE_SIZE, addr < next);
+               psb_mmu_pt_unmap_unlock(pt);
+
+       } while (addr = next, next != end);
+
+      out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver);
+
+       return;
+}
+
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+                         uint32_t num_pages, uint32_t desired_tile_stride,
+                         uint32_t hw_tile_stride)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long f_address = address;
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+
+       down_read(&pd->driver->sem);
+
+       /* Make sure we only need to flush this processor's cache */
+
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_map_lock(pd, addr);
+                       if (!pt)
+                               continue;
+                       do {
+                               psb_mmu_invalidate_pte(pt, addr);
+                               --pt->count;
+
+                       } while (addr += PAGE_SIZE, addr < next);
+                       psb_mmu_pt_unmap_unlock(pt);
+
+               } while (addr = next, next != end);
+               address += row_add;
+       }
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages,
+                                  desired_tile_stride, hw_tile_stride);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver);
+}
+
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+                               unsigned long address, uint32_t num_pages,
+                               int type)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t pte;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long f_address = address;
+       int ret = -ENOMEM;
+
+       down_read(&pd->driver->sem);
+
+       addr = address;
+       end = addr + (num_pages << PAGE_SHIFT);
+
+       do {
+               next = psb_pd_addr_end(addr, end);
+               pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+               if (!pt) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               do {
+                       pte = psb_mmu_mask_pte(start_pfn++, type);
+                       psb_mmu_set_pte(pt, addr, pte);
+                       pt->count++;
+               } while (addr += PAGE_SIZE, addr < next);
+               psb_mmu_pt_unmap_unlock(pt);
+
+       } while (addr = next, next != end);
+       ret = 0;
+
+      out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver);
+
+       return 0;
+}
+
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+                        unsigned long address, uint32_t num_pages,
+                        uint32_t desired_tile_stride, uint32_t hw_tile_stride,
+                        int type)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       uint32_t pte;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long f_address = address;
+       int ret = -ENOMEM;
+
+       if (hw_tile_stride) {
+               if (num_pages % desired_tile_stride != 0)
+                       return -EINVAL;
+               rows = num_pages / desired_tile_stride;
+       } else {
+               desired_tile_stride = num_pages;
+       }
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+
+       down_read(&pd->driver->sem);
+
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+                       if (!pt)
+                               goto out;
+                       do {
+                               pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
+                                                      type);
+                               psb_mmu_set_pte(pt, addr, pte);
+                               pt->count++;
+                       } while (addr += PAGE_SIZE, addr < next);
+                       psb_mmu_pt_unmap_unlock(pt);
+
+               } while (addr = next, next != end);
+
+               address += row_add;
+       }
+       ret = 0;
+      out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages,
+                                  desired_tile_stride, hw_tile_stride);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver);
+
+       return 0;
+}
+
+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
+{
+       mask &= _PSB_MMU_ER_MASK;
+       psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
+                     PSB_CR_BIF_CTRL);
+       (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
+}
+
+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
+{
+       mask &= _PSB_MMU_ER_MASK;
+       psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
+                     PSB_CR_BIF_CTRL);
+       (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
+}
+
+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+                          unsigned long *pfn)
+{
+       int ret;
+       struct psb_mmu_pt *pt;
+       uint32_t tmp;
+       spinlock_t *lock = &pd->driver->lock;
+
+       down_read(&pd->driver->sem);
+       pt = psb_mmu_pt_map_lock(pd, virtual);
+       if (!pt) {
+               uint32_t *v;
+
+               spin_lock(lock);
+               v = kmap_atomic(pd->p, KM_USER0);
+               tmp = v[psb_mmu_pd_index(virtual)];
+               kunmap_atomic(v, KM_USER0);
+               spin_unlock(lock);
+
+               if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+                   !(pd->invalid_pte & PSB_PTE_VALID)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ret = 0;
+               *pfn = pd->invalid_pte >> PAGE_SHIFT;
+               goto out;
+       }
+       tmp = pt->v[psb_mmu_pt_index(virtual)];
+       if (!(tmp & PSB_PTE_VALID)) {
+               ret = -EINVAL;
+       } else {
+               ret = 0;
+               *pfn = tmp >> PAGE_SHIFT;
+       }
+       psb_mmu_pt_unmap_unlock(pt);
+      out:
+       up_read(&pd->driver->sem);
+       return ret;
+}
+
+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
+{
+       struct page *p;
+       unsigned long pfn;
+       int ret = 0;
+       struct psb_mmu_pd *pd;
+       uint32_t *v;
+       uint32_t *vmmu;
+
+       pd = driver->default_pd;
+       if (!pd) {
+               printk(KERN_WARNING "Could not get default pd\n");
+       }
+
+       p = alloc_page(GFP_DMA32);
+
+       if (!p) {
+               printk(KERN_WARNING "Failed allocating page\n");
+               return;
+       }
+
+       v = kmap(p);
+       memset(v, 0x67, PAGE_SIZE);
+
+       pfn = (offset >> PAGE_SHIFT);
+
+       ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
+       if (ret) {
+               printk(KERN_WARNING "Failed inserting mmu page\n");
+               goto out_err1;
+       }
+
+       /* Ioremap the page through the GART aperture */
+
+       vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+       if (!vmmu) {
+               printk(KERN_WARNING "Failed ioremapping page\n");
+               goto out_err2;
+       }
+
+       /* Read from the page with mmu disabled. */
+       printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
+
+       /* Enable the mmu for host accesses and read again. */
+       psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
+
+       printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
+              ioread32(vmmu));
+       *v = 0x15243705;
+       printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
+              ioread32(vmmu));
+       iowrite32(0x16243355, vmmu);
+       (void)ioread32(vmmu);
+       printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
+
+       printk(KERN_INFO "Int stat is 0x%08x\n",
+              psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
+       printk(KERN_INFO "Fault is 0x%08x\n",
+              psb_ioread32(driver, PSB_CR_BIF_FAULT));
+
+       /* Disable MMU for host accesses and clear page fault register */
+       psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
+       iounmap(vmmu);
+      out_err2:
+       psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
+      out_err1:
+       kunmap(p);
+       __free_page(p);
+}
diff --git a/psb-kernel-source-4.41.1/psb_msvdx.c b/psb-kernel-source-4.41.1/psb_msvdx.c
new file mode 100644 (file)
index 0000000..74850a5
--- /dev/null
@@ -0,0 +1,681 @@
+/**
+ * file psb_msvdx.c
+ * MSVDX I/O operations and IRQ handling
+ *
+ */
+
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "drm_os_linux.h"
+#include "psb_drv.h"
+#include "psb_drm.h"
+#include "psb_msvdx.h"
+
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#ifndef list_first_entry
+#define list_first_entry(ptr, type, member) \
+       list_entry((ptr)->next, type, member)
+#endif
+
+static int psb_msvdx_send (struct drm_device *dev, void *cmd,
+                          unsigned long cmd_size);
+
+int
+psb_msvdx_dequeue_send (struct drm_device *dev)
+{
+  struct drm_psb_private *dev_priv = dev->dev_private;
+  struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
+  int ret = 0;
+
+  if (list_empty (&dev_priv->msvdx_queue))
+    {
+      PSB_DEBUG_GENERAL ("MSVDXQUE: msvdx list empty.\n");
+      dev_priv->msvdx_busy = 0;
+      return -EINVAL;
+    }
+  msvdx_cmd =
+    list_first_entry (&dev_priv->msvdx_queue, struct psb_msvdx_cmd_queue,
+                     head);
+  PSB_DEBUG_GENERAL ("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
+  ret = psb_msvdx_send (dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
+  if (ret)
+    {
+      PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
+      ret = -EINVAL;
+    }
+  list_del (&msvdx_cmd->head);
+  kfree (msvdx_cmd->cmd);
+  drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
+  return ret;
+}
+
+int
+psb_msvdx_map_command (struct drm_device *dev,
+                      struct drm_buffer_object *cmd_buffer,
+                      unsigned long cmd_offset, unsigned long cmd_size,
+                      void **msvdx_cmd, uint32_t sequence, int copy_cmd)
+{
+  struct drm_psb_private *dev_priv = dev->dev_private;
+  int ret = 0;
+  unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
+  unsigned long cmd_size_remaining;
+  struct drm_bo_kmap_obj cmd_kmap;
+  void *cmd, *tmp, *cmd_start;
+  int is_iomem;
+
+  /* command buffers may not exceed page boundary */
+  if (cmd_size + cmd_page_offset > PAGE_SIZE)
+    return -EINVAL;
+
+  ret = drm_bo_kmap (cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
+
+  if (ret)
+    {
+      PSB_DEBUG_GENERAL ("MSVDXQUE:ret:%d\n", ret);
+      return ret;
+    }
+
+  cmd_start =
+    (void *) drm_bmo_virtual (&cmd_kmap, &is_iomem) + cmd_page_offset;
+  cmd = cmd_start;
+  cmd_size_remaining = cmd_size;
+
+  while (cmd_size_remaining > 0)
+    {
+      uint32_t mmu_ptd;
+      uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
+      uint32_t cur_cmd_id = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_ID);
+      PSB_DEBUG_GENERAL
+       ("cmd start at %08x cur_cmd_size = %d cur_cmd_id = %02x fence = %08x\n",
+        (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
+      if ((cur_cmd_size % sizeof (uint32_t))
+         || (cur_cmd_size > cmd_size_remaining))
+       {
+         ret = -EINVAL;
+         PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
+         goto out;
+       }
+
+      switch (cur_cmd_id)
+       {
+       case VA_MSGID_RENDER:
+         /* Fence ID */
+         MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_FENCE_VALUE, sequence);
+
+         mmu_ptd = psb_get_default_pd_addr (dev_priv->mmu);
+          if (atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc, 1, 0) == 1)
+            {
+                mmu_ptd |= 1;
+                PSB_DEBUG_GENERAL ("MSVDX: Setting MMU invalidate flag\n");
+           }  
+         /* PTD */
+         MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
+         break;
+
+       default:
+         /* Msg not supported */
+         ret = -EINVAL;
+         PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
+         goto out;
+       }
+
+      cmd += cur_cmd_size;
+      cmd_size_remaining -= cur_cmd_size;
+    }
+
+  if (copy_cmd)
+    {
+      PSB_DEBUG_GENERAL
+       ("MSVDXQUE: psb_msvdx_map_command copying command...\n");
+      tmp = drm_calloc (1, cmd_size, DRM_MEM_DRIVER);
+      if (tmp == NULL)
+       {
+         ret = -ENOMEM;
+         PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
+         goto out;
+       }
+      memcpy (tmp, cmd_start, cmd_size);
+      *msvdx_cmd = tmp;
+    }
+  else
+    {
+      PSB_DEBUG_GENERAL
+       ("MSVDXQUE: psb_msvdx_map_command did NOT copy command...\n");
+      ret = psb_msvdx_send (dev, cmd_start, cmd_size);
+      if (ret)
+       {
+         PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
+         ret = -EINVAL;
+       }
+    }
+
+out:
+  drm_bo_kunmap (&cmd_kmap);
+
+  return ret;
+}
+
+int
+psb_submit_video_cmdbuf (struct drm_device *dev,
+                        struct drm_buffer_object *cmd_buffer,
+                        unsigned long cmd_offset, unsigned long cmd_size,
+                        struct drm_fence_object *fence)
+{
+  struct drm_psb_private *dev_priv = dev->dev_private;
+  uint32_t sequence = fence->sequence;
+  unsigned long irq_flags;
+  int ret = 0;
+
+  mutex_lock (&dev_priv->msvdx_mutex);
+  psb_schedule_watchdog (dev_priv);
+
+  spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
+  dev_priv->msvdx_power_saving = 0;
+
+  if (dev_priv->msvdx_needs_reset)
+    {
+      spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
+      PSB_DEBUG_GENERAL ("MSVDX: Needs reset\n");
+      if (psb_msvdx_reset (dev_priv))
+       {
+         mutex_unlock (&dev_priv->msvdx_mutex);
+         ret = -EBUSY;
+         PSB_DEBUG_GENERAL ("MSVDX: Reset failed\n");
+         return ret;
+       }
+      PSB_DEBUG_GENERAL ("MSVDX: Reset ok\n");
+      dev_priv->msvdx_needs_reset = 0;
+      dev_priv->msvdx_busy = 0;
+      dev_priv->msvdx_start_idle = 0;
+
+      psb_msvdx_init (dev);
+      psb_msvdx_irq_preinstall (dev_priv);
+      psb_msvdx_irq_postinstall (dev_priv);
+      PSB_DEBUG_GENERAL ("MSVDX: Init ok\n");
+      spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
+    }
+
+  if (!dev_priv->msvdx_busy)
+    {
+      dev_priv->msvdx_busy = 1;
+      spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
+      PSB_DEBUG_GENERAL
+       ("MSVDXQUE: nothing in the queue sending sequence:%08x..\n",
+        sequence);
+      ret =
+       psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
+                              NULL, sequence, 0);
+      if (ret)
+       {
+         mutex_unlock (&dev_priv->msvdx_mutex);
+         PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
+         return ret;
+       }
+    }
+  else
+    {
+      struct psb_msvdx_cmd_queue *msvdx_cmd;
+      void *cmd = NULL;
+
+      spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
+      /*queue the command to be sent when the h/w is ready */
+      PSB_DEBUG_GENERAL ("MSVDXQUE: queueing sequence:%08x..\n", sequence);
+      msvdx_cmd =
+       drm_calloc (1, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
+      if (msvdx_cmd == NULL)
+       {
+         mutex_unlock (&dev_priv->msvdx_mutex);
+         PSB_DEBUG_GENERAL ("MSVDXQUE: Out of memory...\n");
+         return -ENOMEM;
+       }
+
+      ret =
+       psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
+                              &cmd, sequence, 1);
+      if (ret)
+       {
+         mutex_unlock (&dev_priv->msvdx_mutex);
+         PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
+         drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue),
+                   DRM_MEM_DRIVER);
+         return ret;
+       }
+      msvdx_cmd->cmd = cmd;
+      msvdx_cmd->cmd_size = cmd_size;
+      msvdx_cmd->sequence = sequence;
+      spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
+      list_add_tail (&msvdx_cmd->head, &dev_priv->msvdx_queue);
+      if (!dev_priv->msvdx_busy)
+       {
+         dev_priv->msvdx_busy = 1;
+         PSB_DEBUG_GENERAL ("MSVDXQUE: Need immediate dequeue\n");
+         psb_msvdx_dequeue_send (dev);
+       }
+      spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
+    }
+  mutex_unlock (&dev_priv->msvdx_mutex);
+  return ret;
+}
+
+int
+psb_msvdx_send (struct drm_device *dev, void *cmd, unsigned long cmd_size)
+{
+  int ret = 0;
+  struct drm_psb_private *dev_priv = dev->dev_private;
+
+  while (cmd_size > 0)
+    {
+      uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
+      if (cur_cmd_size > cmd_size)
+       {
+         ret = -EINVAL;
+         PSB_DEBUG_GENERAL
+           ("MSVDX: cmd_size = %d cur_cmd_size = %d\n",
+            (int) cmd_size, cur_cmd_size);
+         goto out;
+       }
+      /* Send the message to h/w */
+      ret = psb_mtx_send (dev_priv, cmd);
+      if (ret)
+       {
+         PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
+         goto out;
+       }
+      cmd += cur_cmd_size;
+      cmd_size -= cur_cmd_size;
+    }
+
+out:
+  PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
+  return ret;
+}
+
+/***********************************************************************************
+ * Function Name      : psb_mtx_send
+ * Inputs             :
+ * Outputs            :
+ * Returns            :
+ * Description        :
+ ************************************************************************************/
+int
+psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg)
+{
+
+  static uint32_t padMessage[FWRK_PADMSG_SIZE];
+
+  const uint32_t *pui32Msg = (uint32_t *) pvMsg;
+  uint32_t msgNumWords, wordsFree, readIndex, writeIndex;
+  int ret = 0;
+
+  PSB_DEBUG_GENERAL ("MSVDX: psb_mtx_send\n");
+
+  /* we need clocks enabled before we touch VEC local ram */
+  PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
+
+  msgNumWords = (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_SIZE) + 3) / 4;
+
+  if (msgNumWords > NUM_WORDS_MTX_BUF)
+    {
+      ret = -EINVAL;
+      PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
+      goto out;
+    }
+
+  readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_RD_INDEX);
+  writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
+
+  if (writeIndex + msgNumWords > NUM_WORDS_MTX_BUF)
+    {                          /* message would wrap, need to send a pad message */
+      BUG_ON (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_ID) == FWRK_MSGID_PADDING); /* Shouldn't happen for a PAD message itself */
+      /* if the read pointer is at zero then we must wait for it to change otherwise the write
+       * pointer will equal the read pointer,which should only happen when the buffer is empty
+       *
+       * This will only happens if we try to overfill the queue, queue management should make
+       * sure this never happens in the first place.
+       */
+      BUG_ON (0 == readIndex);
+      if (0 == readIndex)
+       {
+         ret = -EINVAL;
+         PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
+         goto out;
+       }
+      /* Send a pad message */
+      MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_SIZE,
+                        (NUM_WORDS_MTX_BUF - writeIndex) << 2);
+      MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_ID, FWRK_MSGID_PADDING);
+      psb_mtx_send (dev_priv, padMessage);
+      writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
+    }
+
+  wordsFree =
+    (writeIndex >=
+     readIndex) ? NUM_WORDS_MTX_BUF - (writeIndex -
+                                      readIndex) : readIndex - writeIndex;
+
+  BUG_ON (msgNumWords > wordsFree);
+  if (msgNumWords > wordsFree)
+    {
+      ret = -EINVAL;
+      PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
+      goto out;
+    }
+
+  while (msgNumWords > 0)
+    {
+      PSB_WMSVDX32 (*pui32Msg++, MSVDX_COMMS_TO_MTX_BUF + (writeIndex << 2));
+      msgNumWords--;
+      writeIndex++;
+      if (NUM_WORDS_MTX_BUF == writeIndex)
+       {
+         writeIndex = 0;
+       }
+    }
+  PSB_WMSVDX32 (writeIndex, MSVDX_COMMS_TO_MTX_WRT_INDEX);
+
+  /* Make sure clocks are enabled before we kick */
+  PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
+
+  /* signal an interrupt to let the mtx know there is a new message */
+  PSB_WMSVDX32 (1, MSVDX_MTX_KICKI);
+
+out:
+  return ret;
+}
+
+/*
+ * MSVDX MTX interrupt
+ */
+void
+psb_msvdx_mtx_interrupt (struct drm_device *dev)
+{
+  static uint32_t msgBuffer[128];
+  uint32_t readIndex, writeIndex;
+  uint32_t msgNumWords, msgWordOffset;
+  struct drm_psb_private *dev_priv =
+    (struct drm_psb_private *) dev->dev_private;
+
+  /* Are clocks enabled  - If not enable before attempting to read from VLR */
+  if (PSB_RMSVDX32 (MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
+    {
+      PSB_DEBUG_GENERAL
+       ("MSVDX: Warning - Clocks disabled when Interupt set\n");
+      PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
+    }
+
+  for (;;)
+    {
+      readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_RD_INDEX);
+      writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_WRT_INDEX);
+
+      if (readIndex != writeIndex)
+       {
+         msgWordOffset = 0;
+
+         msgBuffer[msgWordOffset] =
+           PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
+
+         msgNumWords = (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_SIZE) + 3) / 4;       /* round to nearest word */
+
+         /*ASSERT(msgNumWords <= sizeof(msgBuffer) / sizeof(uint32_t)); */
+
+         if (++readIndex >= NUM_WORDS_HOST_BUF)
+           readIndex = 0;
+
+         for (msgWordOffset++; msgWordOffset < msgNumWords; msgWordOffset++)
+           {
+             msgBuffer[msgWordOffset] =
+               PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
+
+             if (++readIndex >= NUM_WORDS_HOST_BUF)
+               {
+                 readIndex = 0;
+               }
+           }
+
+         /* Update the Read index */
+         PSB_WMSVDX32 (readIndex, MSVDX_COMMS_TO_HOST_RD_INDEX);
+
+         if (!dev_priv->msvdx_needs_reset)
+           switch (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID))
+             {
+             case VA_MSGID_CMD_HW_PANIC:
+             case VA_MSGID_CMD_FAILED:
+               {
+                 uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
+                                                        FW_VA_CMD_FAILED_FENCE_VALUE);
+                 uint32_t ui32FaultStatus = MEMIO_READ_FIELD (msgBuffer,
+                                                              FW_VA_CMD_FAILED_IRQSTATUS);
+
+                if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC )
+                 PSB_DEBUG_GENERAL
+                   ("MSVDX: VA_MSGID_CMD_HW_PANIC: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
+                    ui32Fence, ui32FaultStatus);
+                else
+                 PSB_DEBUG_GENERAL
+                   ("MSVDX: VA_MSGID_CMD_FAILED: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
+                    ui32Fence, ui32FaultStatus);
+
+                 dev_priv->msvdx_needs_reset = 1;
+
+                if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC)
+                   {
+                     if (dev_priv->
+                         msvdx_current_sequence
+                         - dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
+                       dev_priv->msvdx_current_sequence++;
+                     PSB_DEBUG_GENERAL
+                       ("MSVDX: Fence ID missing, assuming %08x\n",
+                        dev_priv->msvdx_current_sequence);
+                   }
+                else
+                  dev_priv->msvdx_current_sequence = ui32Fence;
+
+                 psb_fence_error (dev,
+                                  PSB_ENGINE_VIDEO,
+                                  dev_priv->
+                                  msvdx_current_sequence,
+                                  DRM_FENCE_TYPE_EXE, DRM_CMD_FAILED);
+
+                 /* Flush the command queue */
+                 psb_msvdx_flush_cmd_queue (dev);
+
+                 goto isrExit;
+                 break;
+               }
+             case VA_MSGID_CMD_COMPLETED:
+               {
+                 uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
+                                                        FW_VA_CMD_COMPLETED_FENCE_VALUE);
+                 uint32_t ui32Flags =
+                   MEMIO_READ_FIELD (msgBuffer, FW_VA_CMD_COMPLETED_FLAGS);
+
+                 PSB_DEBUG_GENERAL
+                   ("msvdx VA_MSGID_CMD_COMPLETED: FenceID: %08x, flags: 0x%x\n",
+                    ui32Fence, ui32Flags);
+                 dev_priv->msvdx_current_sequence = ui32Fence;
+
+                 psb_fence_handler (dev, PSB_ENGINE_VIDEO);
+
+
+                 if (ui32Flags & FW_VA_RENDER_HOST_INT)
+                   {
+                     /*Now send the next command from the msvdx cmd queue */
+                     psb_msvdx_dequeue_send (dev);
+                     goto isrExit;
+                   }
+                 break;
+               }
+             case VA_MSGID_ACK:
+               PSB_DEBUG_GENERAL ("msvdx VA_MSGID_ACK\n");
+               break;
+
+             case VA_MSGID_TEST1:
+               PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST1\n");
+               break;
+
+             case VA_MSGID_TEST2:
+               PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST2\n");
+               break;
+               /* Don't need to do anything with these messages */
+
+             case VA_MSGID_DEBLOCK_REQUIRED:
+               {
+                 uint32_t ui32ContextId = MEMIO_READ_FIELD (msgBuffer,
+                                                            FW_VA_DEBLOCK_REQUIRED_CONTEXT);
+
+                 /* The BE we now be locked. */
+
+                 /* Unblock rendec by reading the mtx2mtx end of slice */
+                 (void) PSB_RMSVDX32 (MSVDX_RENDEC_READ_DATA);
+
+                 PSB_DEBUG_GENERAL
+                   ("msvdx VA_MSGID_DEBLOCK_REQUIRED Context=%08x\n",
+                    ui32ContextId);
+                 goto isrExit;
+                 break;
+               }
+
+             default:
+               {
+                 PSB_DEBUG_GENERAL
+                   ("ERROR: msvdx Unknown message from MTX \n");
+               }
+               break;
+
+             }
+       }
+      else
+       {
+         /* Get out of here if nothing */
+         break;
+       }
+    }
+isrExit:
+
+#if 1
+  if (!dev_priv->msvdx_busy)
+  {
+    /* check that clocks are enabled before reading VLR */
+    if( PSB_RMSVDX32( MSVDX_MAN_CLK_ENABLE ) != (clk_enable_all) )
+        PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
+
+   /* If the firmware says the hardware is idle and the CCB is empty then we can power down */
+   uint32_t ui32FWStatus = PSB_RMSVDX32( MSVDX_COMMS_FW_STATUS );
+   uint32_t ui32CCBRoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_RD_INDEX );
+   uint32_t ui32CCBWoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_WRT_INDEX );
+
+   if( (ui32FWStatus & MSVDX_FW_STATUS_HW_IDLE) && (ui32CCBRoff == ui32CCBWoff))
+   {
+       PSB_DEBUG_GENERAL("MSVDX_CLOCK: Setting clock to minimal...\n");
+        PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
+   }
+   }
+#endif
+  DRM_MEMORYBARRIER ();
+}
+
+void
+psb_msvdx_lockup (struct drm_psb_private *dev_priv,
+                 int *msvdx_lockup, int *msvdx_idle)
+{
+       unsigned long irq_flags;
+//     struct psb_scheduler *scheduler = &dev_priv->scheduler;
+
+  spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
+  *msvdx_lockup = 0;
+  *msvdx_idle = 1;
+
+  if (!dev_priv->has_msvdx)
+  {
+      spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
+      return;
+  }
+#if 0
+  PSB_DEBUG_GENERAL ("MSVDXTimer: current_sequence:%d "
+                    "last_sequence:%d and last_submitted_sequence :%d\n",
+                    dev_priv->msvdx_current_sequence,
+                    dev_priv->msvdx_last_sequence,
+                    dev_priv->sequence[PSB_ENGINE_VIDEO]);
+#endif
+  if (dev_priv->msvdx_current_sequence -
+      dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
+    {
+
+      if (dev_priv->msvdx_current_sequence == dev_priv->msvdx_last_sequence)
+       {
+         PSB_DEBUG_GENERAL
+           ("MSVDXTimer: msvdx locked-up for sequence:%d\n",
+            dev_priv->msvdx_current_sequence);
+         *msvdx_lockup = 1;
+       }
+      else
+       {
+         PSB_DEBUG_GENERAL ("MSVDXTimer: msvdx responded fine so far...\n");
+         dev_priv->msvdx_last_sequence = dev_priv->msvdx_current_sequence;
+         *msvdx_idle = 0;
+       }
+       if (dev_priv->msvdx_start_idle)
+               dev_priv->msvdx_start_idle = 0;
+    } 
+    else 
+    {
+       //if (dev_priv->msvdx_needs_reset == 0)
+       if (dev_priv->msvdx_power_saving == 0)
+       {
+           if (dev_priv->msvdx_start_idle && (dev_priv->msvdx_finished_sequence == dev_priv->msvdx_current_sequence))
+           {
+               //if (dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME >= jiffies)
+               if (time_after_eq(jiffies, dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME))
+               {
+                   printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
+                   PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
+                   // MSVDX needn't to be reset for the latter commands after pausing and resuming playing.
+                   //dev_priv->msvdx_needs_reset = 1;
+                   dev_priv->msvdx_power_saving = 1;
+               }
+               else
+               {
+                   *msvdx_idle = 0;
+               }
+           }
+           else
+           {
+               dev_priv->msvdx_start_idle = 1;
+               dev_priv->msvdx_idle_start_jiffies = jiffies;
+               dev_priv->msvdx_finished_sequence = dev_priv->msvdx_current_sequence;
+               *msvdx_idle = 0;
+           }
+       }
+    }
+    spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
+}
diff --git a/psb-kernel-source-4.41.1/psb_msvdx.h b/psb-kernel-source-4.41.1/psb_msvdx.h
new file mode 100644 (file)
index 0000000..455791b
--- /dev/null
@@ -0,0 +1,564 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_MSVDX_H_
+#define _PSB_MSVDX_H_
+
+#define assert(expr) \
+        if(unlikely(!(expr))) {                                   \
+        printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
+        #expr,__FILE__,__FUNCTION__,__LINE__);          \
+        }
+
+#define PSB_ASSERT(x) assert (x)
+#define IMG_ASSERT(x) assert (x)
+
+#include "psb_drv.h"
+int
+psb_wait_for_register (struct drm_psb_private *dev_priv,
+                       uint32_t ui32Offset,
+                       uint32_t ui32Value, uint32_t ui32Enable);
+
+void psb_msvdx_mtx_interrupt (struct drm_device *dev);
+int psb_msvdx_init (struct drm_device *dev);
+int psb_msvdx_uninit (struct drm_device *dev);
+int psb_msvdx_reset (struct drm_psb_private *dev_priv);
+uint32_t psb_get_default_pd_addr (struct psb_mmu_driver *driver);
+int psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg);
+void psb_msvdx_irq_preinstall (struct drm_psb_private *dev_priv);
+void psb_msvdx_irq_postinstall (struct drm_psb_private *dev_priv);
+void psb_msvdx_flush_cmd_queue (struct drm_device *dev);
+extern void psb_msvdx_lockup (struct drm_psb_private *dev_priv,
+                             int *msvdx_lockup, int *msvdx_idle);
+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2       /*  Non-Optimal Invalidation is not default */
+#define FW_VA_RENDER_HOST_INT          0x00004000
+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION    0x00000020
+
+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE    0x00000200
+
+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
+                                               | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
+                                                | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
+
+
+#define POULSBO_D0     0x5
+#define POULSBO_D1     0x6
+#define PSB_REVID_OFFSET 0x8
+
+#define MSVDX_FW_STATUS_HW_IDLE        0x00000001 /* There is no work currently underway on the hardware*/
+
+#define clk_enable_all         MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK                  |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK  |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK   |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK                  |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK    |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK    |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
+
+#define clk_enable_minimal     MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
+                               MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
+
+#define clk_enable_auto                MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK  |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK                 |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK   |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK   |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK                  |               \
+                                                       MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
+
+#define msvdx_sw_reset_all     MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK                                    |               \
+                                                       MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK                                 |               \
+                                                       MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK                                 |               \
+                                                       MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK                  |               \
+                                                       MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK
+
+
+#define PCI_PORT5_REG80_FFUSE                           0xD0058000
+#define MTX_CODE_BASE          (0x80900000)
+#define MTX_DATA_BASE          (0x82880000)
+#define PC_START_ADDRESS       (0x80900000)
+
+#define MTX_CORE_CODE_MEM                      (0x10 )
+#define MTX_CORE_DATA_MEM                      (0x18 )
+
+#define MTX_INTERNAL_REG( R_SPECIFIER , U_SPECIFIER )          ( ((R_SPECIFIER)<<4) | (U_SPECIFIER) )
+#define MTX_PC                 MTX_INTERNAL_REG( 0 , 5 )
+
+#define RENDEC_A_SIZE  ( 2 * 1024* 1024 )
+#define RENDEC_B_SIZE  ( RENDEC_A_SIZE / 4 )
+
+#define MEMIO_READ_FIELD(vpMem, field)                                                                                                                                                             \
+       ((uint32_t)(((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & field##_MASK) >> field##_SHIFT))
+
+#define MEMIO_WRITE_FIELD(vpMem, field, ui32Value)                                                                                                             \
+       (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) =                                                                             \
+       ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & (field##_TYPE)~field##_MASK) |             \
+               (field##_TYPE)(( (uint32_t) (ui32Value) << field##_SHIFT) & field##_MASK);
+
+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, ui32Value)                                                                                                        \
+        (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) =                                                                            \
+       ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) |                                            \
+               (field##_TYPE) (( (uint32_t) (ui32Value) << field##_SHIFT)) );
+
+#define REGIO_READ_FIELD(ui32RegValue, reg, field)                                                     \
+       ((ui32RegValue & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
+
+#define REGIO_WRITE_FIELD(ui32RegValue, reg, field, ui32Value)                                 \
+       (ui32RegValue) =                                                                        \
+       ((ui32RegValue) & ~(reg##_##field##_MASK)) |                                            \
+               (((ui32Value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
+
+#define REGIO_WRITE_FIELD_LITE(ui32RegValue, reg, field, ui32Value)                            \
+       (ui32RegValue) =                                                                        \
+       ( (ui32RegValue) | ( (ui32Value) << (reg##_##field##_SHIFT) ) );
+
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK         (0x00000001)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK         (0x00000002)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK          (0x00000004)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK         (0x00000008)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK           (0x00000010)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK           (0x00000020)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK          (0x00000040)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK         (0x00040000)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK                (0x00080000)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK          (0x00100000)
+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK          (0x00200000)
+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK           (0x00000100)
+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK                (0x00010000)
+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK                (0x00100000)
+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK         (0x01000000)
+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK            (0x10000000)
+
+/* MTX registers */
+#define MSVDX_MTX_ENABLE               (0x0000)
+#define MSVDX_MTX_KICKI                        (0x0088)
+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST  (0x00FC)
+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA     (0x00F8)
+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER     (0x0104)
+#define MSVDX_MTX_RAM_ACCESS_CONTROL   (0x0108)
+#define MSVDX_MTX_RAM_ACCESS_STATUS    (0x010C)
+#define MSVDX_MTX_SOFT_RESET           (0x0200)
+
+/* MSVDX registers */
+#define MSVDX_CONTROL                  (0x0600)
+#define MSVDX_INTERRUPT_CLEAR          (0x060C)
+#define MSVDX_INTERRUPT_STATUS         (0x0608)
+#define MSVDX_HOST_INTERRUPT_ENABLE    (0x0610)
+#define MSVDX_MMU_CONTROL0             (0x0680)
+#define MSVDX_MTX_RAM_BANK             (0x06F0)
+#define MSVDX_MAN_CLK_ENABLE           (0x0620)
+
+/* RENDEC registers */
+#define MSVDX_RENDEC_CONTROL0          (0x0868)
+#define MSVDX_RENDEC_CONTROL1          (0x086C)
+#define MSVDX_RENDEC_BUFFER_SIZE       (0x0870)
+#define MSVDX_RENDEC_BASE_ADDR0                (0x0874)
+#define MSVDX_RENDEC_BASE_ADDR1                (0x0878)
+#define MSVDX_RENDEC_READ_DATA         (0x0898)
+#define MSVDX_RENDEC_CONTEXT0          (0x0950)
+#define MSVDX_RENDEC_CONTEXT1          (0x0954)
+#define MSVDX_RENDEC_CONTEXT2          (0x0958)
+#define MSVDX_RENDEC_CONTEXT3          (0x095C)
+#define MSVDX_RENDEC_CONTEXT4          (0x0960)
+#define MSVDX_RENDEC_CONTEXT5          (0x0964)
+
+/*
+ * This defines the MSVDX communication buffer
+ */
+#define MSVDX_COMMS_SIGNATURE_VALUE    (0xA5A5A5A5)    /*!< Signature value */
+#define NUM_WORDS_HOST_BUF             (100)   /*!< Host buffer size (in 32-bit words) */
+#define NUM_WORDS_MTX_BUF              (100)   /*!< MTX buffer size (in 32-bit words) */
+
+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
+
+#define MSVDX_COMMS_FW_STATUS                  (MSVDX_COMMS_AREA_ADDR - 0x10)
+#define        MSVDX_COMMS_SCRATCH                             (MSVDX_COMMS_AREA_ADDR - 0x08)
+#define        MSVDX_COMMS_MSG_COUNTER                 (MSVDX_COMMS_AREA_ADDR - 0x04)
+#define        MSVDX_COMMS_SIGNATURE                   (MSVDX_COMMS_AREA_ADDR + 0x00)
+#define        MSVDX_COMMS_TO_HOST_BUF_SIZE    (MSVDX_COMMS_AREA_ADDR + 0x04)
+#define MSVDX_COMMS_TO_HOST_RD_INDEX   (MSVDX_COMMS_AREA_ADDR + 0x08)
+#define MSVDX_COMMS_TO_HOST_WRT_INDEX  (MSVDX_COMMS_AREA_ADDR + 0x0C)
+#define MSVDX_COMMS_TO_MTX_BUF_SIZE            (MSVDX_COMMS_AREA_ADDR + 0x10)
+#define MSVDX_COMMS_TO_MTX_RD_INDEX            (MSVDX_COMMS_AREA_ADDR + 0x14)
+#define MSVDX_COMMS_OFFSET_FLAGS               (MSVDX_COMMS_AREA_ADDR + 0x18)
+#define MSVDX_COMMS_TO_MTX_WRT_INDEX   (MSVDX_COMMS_AREA_ADDR + 0x1C)
+#define MSVDX_COMMS_TO_HOST_BUF                        (MSVDX_COMMS_AREA_ADDR + 0x20)
+#define MSVDX_COMMS_TO_MTX_BUF                 (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
+
+#define MSVDX_COMMS_AREA_END                   (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
+
+#if (MSVDX_COMMS_AREA_END != 0x03000)
+#error
+#endif
+
+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK          (0x80000000)
+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT         (31)
+
+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK             (0x00010000)
+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT            (16)
+
+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK            (0x0FF00000)
+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT           (20)
+
+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK         (0x000FFFFC)
+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT                (2)
+
+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK            (0x00000002)
+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT           (1)
+
+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK             (0x00000001)
+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT            (0)
+
+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK            (0x00000001)
+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT           (0)
+
+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK               (0x00000001)
+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT              (0)
+
+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK         (0x00000100)
+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT                (8)
+
+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK           (0x00000F00)
+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT          (8)
+
+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK         (0x00004000)
+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT                (14)
+
+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK                   (0x00000002)
+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT                  (1)
+
+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK           (0x000F0000)
+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT          (16)
+
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK              (0x0000FFFF)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT             (0)
+
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK              (0xFFFF0000)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT             (16)
+
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK            (0x000000FF)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT           (0)
+
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK         (0x000C0000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT                (18)
+
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK         (0x00030000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT                (16)
+
+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK              (0x01000000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT             (24)
+
+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK           (0x00000001)
+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT          (0)
+
+#define        FWRK_MSGID_START_PSR_HOSTMTX_MSG        (0x80)  /*!< Start of parser specific Host->MTX messages. */
+#define        FWRK_MSGID_START_PSR_MTXHOST_MSG        (0xC0)  /*!< Start of parser specific MTX->Host messages. */
+#define FWRK_MSGID_PADDING                                     ( 0 )
+
+#define FWRK_GENMSG_SIZE_TYPE          uint8_t
+#define FWRK_GENMSG_SIZE_MASK          (0xFF)
+#define FWRK_GENMSG_SIZE_SHIFT         (0)
+#define FWRK_GENMSG_SIZE_OFFSET                (0x0000)
+#define FWRK_GENMSG_ID_TYPE            uint8_t
+#define FWRK_GENMSG_ID_MASK            (0xFF)
+#define FWRK_GENMSG_ID_SHIFT           (0)
+#define FWRK_GENMSG_ID_OFFSET          (0x0001)
+#define FWRK_PADMSG_SIZE               (2)
+
+/*!
+******************************************************************************
+ This type defines the framework specified message ids
+******************************************************************************/
+enum
+{
+  /*! Sent by the DXVA driver on the host to the mtx firmware.
+   */
+  VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
+  VA_MSGID_RENDER,
+  VA_MSGID_DEBLOCK,
+  VA_MSGID_OOLD,
+
+  /* Test Messages */
+  VA_MSGID_TEST1,
+  VA_MSGID_TEST2,
+
+  /*! Sent by the mtx firmware to itself.
+   */
+  VA_MSGID_RENDER_MC_INTERRUPT,
+
+  /*! Sent by the DXVA firmware on the MTX to the host.
+   */
+  VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
+  VA_MSGID_CMD_COMPLETED_BATCH,
+  VA_MSGID_DEBLOCK_REQUIRED,
+  VA_MSGID_TEST_RESPONCE,
+  VA_MSGID_ACK,
+
+  VA_MSGID_CMD_FAILED,
+  VA_MSGID_CMD_UNSUPPORTED,
+  VA_MSGID_CMD_HW_PANIC,
+};
+
+/* MSVDX Firmware interface */
+
+#define FW_VA_RENDER_SIZE              (32)
+
+// FW_VA_RENDER     MSG_SIZE
+#define FW_VA_RENDER_MSG_SIZE_ALIGNMENT                (1)
+#define FW_VA_RENDER_MSG_SIZE_TYPE             uint8_t
+#define FW_VA_RENDER_MSG_SIZE_MASK             (0xFF)
+#define FW_VA_RENDER_MSG_SIZE_LSBMASK          (0xFF)
+#define FW_VA_RENDER_MSG_SIZE_OFFSET           (0x0000)
+#define FW_VA_RENDER_MSG_SIZE_SHIFT            (0)
+
+// FW_VA_RENDER     ID
+#define FW_VA_RENDER_ID_ALIGNMENT              (1)
+#define FW_VA_RENDER_ID_TYPE           uint8_t
+#define FW_VA_RENDER_ID_MASK           (0xFF)
+#define FW_VA_RENDER_ID_LSBMASK                (0xFF)
+#define FW_VA_RENDER_ID_OFFSET         (0x0001)
+#define FW_VA_RENDER_ID_SHIFT          (0)
+
+// FW_VA_RENDER     BUFFER_SIZE
+#define FW_VA_RENDER_BUFFER_SIZE_ALIGNMENT             (2)
+#define FW_VA_RENDER_BUFFER_SIZE_TYPE          uint16_t
+#define FW_VA_RENDER_BUFFER_SIZE_MASK          (0x0FFF)
+#define FW_VA_RENDER_BUFFER_SIZE_LSBMASK               (0x0FFF)
+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET                (0x0002)
+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT         (0)
+
+// FW_VA_RENDER     MMUPTD
+#define FW_VA_RENDER_MMUPTD_ALIGNMENT          (4)
+#define FW_VA_RENDER_MMUPTD_TYPE               uint32_t
+#define FW_VA_RENDER_MMUPTD_MASK               (0xFFFFFFFF)
+#define FW_VA_RENDER_MMUPTD_LSBMASK            (0xFFFFFFFF)
+#define FW_VA_RENDER_MMUPTD_OFFSET             (0x0004)
+#define FW_VA_RENDER_MMUPTD_SHIFT              (0)
+
+// FW_VA_RENDER     LLDMA_ADDRESS
+#define FW_VA_RENDER_LLDMA_ADDRESS_ALIGNMENT           (4)
+#define FW_VA_RENDER_LLDMA_ADDRESS_TYPE                uint32_t
+#define FW_VA_RENDER_LLDMA_ADDRESS_MASK                (0xFFFFFFFF)
+#define FW_VA_RENDER_LLDMA_ADDRESS_LSBMASK             (0xFFFFFFFF)
+#define FW_VA_RENDER_LLDMA_ADDRESS_OFFSET              (0x0008)
+#define FW_VA_RENDER_LLDMA_ADDRESS_SHIFT               (0)
+
+// FW_VA_RENDER     CONTEXT
+#define FW_VA_RENDER_CONTEXT_ALIGNMENT         (4)
+#define FW_VA_RENDER_CONTEXT_TYPE              uint32_t
+#define FW_VA_RENDER_CONTEXT_MASK              (0xFFFFFFFF)
+#define FW_VA_RENDER_CONTEXT_LSBMASK           (0xFFFFFFFF)
+#define FW_VA_RENDER_CONTEXT_OFFSET            (0x000C)
+#define FW_VA_RENDER_CONTEXT_SHIFT             (0)
+
+// FW_VA_RENDER     FENCE_VALUE
+#define FW_VA_RENDER_FENCE_VALUE_ALIGNMENT             (4)
+#define FW_VA_RENDER_FENCE_VALUE_TYPE          uint32_t
+#define FW_VA_RENDER_FENCE_VALUE_MASK          (0xFFFFFFFF)
+#define FW_VA_RENDER_FENCE_VALUE_LSBMASK               (0xFFFFFFFF)
+#define FW_VA_RENDER_FENCE_VALUE_OFFSET                (0x0010)
+#define FW_VA_RENDER_FENCE_VALUE_SHIFT         (0)
+
+// FW_VA_RENDER     OPERATING_MODE
+#define FW_VA_RENDER_OPERATING_MODE_ALIGNMENT          (4)
+#define FW_VA_RENDER_OPERATING_MODE_TYPE               uint32_t
+#define FW_VA_RENDER_OPERATING_MODE_MASK               (0xFFFFFFFF)
+#define FW_VA_RENDER_OPERATING_MODE_LSBMASK            (0xFFFFFFFF)
+#define FW_VA_RENDER_OPERATING_MODE_OFFSET             (0x0014)
+#define FW_VA_RENDER_OPERATING_MODE_SHIFT              (0)
+
+// FW_VA_RENDER     FIRST_MB_IN_SLICE
+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_ALIGNMENT               (2)
+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_TYPE            uint16_t
+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_MASK            (0xFFFF)
+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_LSBMASK         (0xFFFF)
+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_OFFSET          (0x0018)
+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_SHIFT           (0)
+
+// FW_VA_RENDER     LAST_MB_IN_FRAME
+#define FW_VA_RENDER_LAST_MB_IN_FRAME_ALIGNMENT                (2)
+#define FW_VA_RENDER_LAST_MB_IN_FRAME_TYPE             uint16_t
+#define FW_VA_RENDER_LAST_MB_IN_FRAME_MASK             (0xFFFF)
+#define FW_VA_RENDER_LAST_MB_IN_FRAME_LSBMASK          (0xFFFF)
+#define FW_VA_RENDER_LAST_MB_IN_FRAME_OFFSET           (0x001A)
+#define FW_VA_RENDER_LAST_MB_IN_FRAME_SHIFT            (0)
+
+// FW_VA_RENDER     FLAGS
+#define FW_VA_RENDER_FLAGS_ALIGNMENT           (4)
+#define FW_VA_RENDER_FLAGS_TYPE                uint32_t
+#define FW_VA_RENDER_FLAGS_MASK                (0xFFFFFFFF)
+#define FW_VA_RENDER_FLAGS_LSBMASK             (0xFFFFFFFF)
+#define FW_VA_RENDER_FLAGS_OFFSET              (0x001C)
+#define FW_VA_RENDER_FLAGS_SHIFT               (0)
+
+#define FW_VA_CMD_COMPLETED_SIZE               (12)
+
+// FW_VA_CMD_COMPLETED     MSG_SIZE
+#define FW_VA_CMD_COMPLETED_MSG_SIZE_ALIGNMENT         (1)
+#define FW_VA_CMD_COMPLETED_MSG_SIZE_TYPE              uint8_t
+#define FW_VA_CMD_COMPLETED_MSG_SIZE_MASK              (0xFF)
+#define FW_VA_CMD_COMPLETED_MSG_SIZE_LSBMASK           (0xFF)
+#define FW_VA_CMD_COMPLETED_MSG_SIZE_OFFSET            (0x0000)
+#define FW_VA_CMD_COMPLETED_MSG_SIZE_SHIFT             (0)
+
+// FW_VA_CMD_COMPLETED     ID
+#define FW_VA_CMD_COMPLETED_ID_ALIGNMENT               (1)
+#define FW_VA_CMD_COMPLETED_ID_TYPE            uint8_t
+#define FW_VA_CMD_COMPLETED_ID_MASK            (0xFF)
+#define FW_VA_CMD_COMPLETED_ID_LSBMASK         (0xFF)
+#define FW_VA_CMD_COMPLETED_ID_OFFSET          (0x0001)
+#define FW_VA_CMD_COMPLETED_ID_SHIFT           (0)
+
+// FW_VA_CMD_COMPLETED     FENCE_VALUE
+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_ALIGNMENT              (4)
+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE           uint32_t
+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK           (0xFFFFFFFF)
+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_LSBMASK                (0xFFFFFFFF)
+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET         (0x0004)
+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT          (0)
+
+// FW_VA_CMD_COMPLETED     FLAGS
+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT            (4)
+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE         uint32_t
+#define FW_VA_CMD_COMPLETED_FLAGS_MASK         (0xFFFFFFFF)
+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK              (0xFFFFFFFF)
+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET               (0x0008)
+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT                (0)
+
+#define FW_VA_CMD_FAILED_SIZE          (12)
+
+// FW_VA_CMD_FAILED     MSG_SIZE
+#define FW_VA_CMD_FAILED_MSG_SIZE_ALIGNMENT            (1)
+#define FW_VA_CMD_FAILED_MSG_SIZE_TYPE         uint8_t
+#define FW_VA_CMD_FAILED_MSG_SIZE_MASK         (0xFF)
+#define FW_VA_CMD_FAILED_MSG_SIZE_LSBMASK              (0xFF)
+#define FW_VA_CMD_FAILED_MSG_SIZE_OFFSET               (0x0000)
+#define FW_VA_CMD_FAILED_MSG_SIZE_SHIFT                (0)
+
+// FW_VA_CMD_FAILED     ID
+#define FW_VA_CMD_FAILED_ID_ALIGNMENT          (1)
+#define FW_VA_CMD_FAILED_ID_TYPE               uint8_t
+#define FW_VA_CMD_FAILED_ID_MASK               (0xFF)
+#define FW_VA_CMD_FAILED_ID_LSBMASK            (0xFF)
+#define FW_VA_CMD_FAILED_ID_OFFSET             (0x0001)
+#define FW_VA_CMD_FAILED_ID_SHIFT              (0)
+
+// FW_VA_CMD_FAILED     FLAGS
+#define FW_VA_CMD_FAILED_FLAGS_ALIGNMENT               (2)
+#define FW_VA_CMD_FAILED_FLAGS_TYPE            uint16_t
+#define FW_VA_CMD_FAILED_FLAGS_MASK            (0xFFFF)
+#define FW_VA_CMD_FAILED_FLAGS_LSBMASK         (0xFFFF)
+#define FW_VA_CMD_FAILED_FLAGS_OFFSET          (0x0002)
+#define FW_VA_CMD_FAILED_FLAGS_SHIFT           (0)
+
+// FW_VA_CMD_FAILED     FENCE_VALUE
+#define FW_VA_CMD_FAILED_FENCE_VALUE_ALIGNMENT         (4)
+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE              uint32_t
+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK              (0xFFFFFFFF)
+#define FW_VA_CMD_FAILED_FENCE_VALUE_LSBMASK           (0xFFFFFFFF)
+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET            (0x0004)
+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT             (0)
+
+// FW_VA_CMD_FAILED     IRQSTATUS
+#define FW_VA_CMD_FAILED_IRQSTATUS_ALIGNMENT           (4)
+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE                uint32_t
+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK                (0xFFFFFFFF)
+#define FW_VA_CMD_FAILED_IRQSTATUS_LSBMASK             (0xFFFFFFFF)
+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET              (0x0008)
+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT               (0)
+
+#define FW_VA_DEBLOCK_REQUIRED_SIZE            (8)
+
+// FW_VA_DEBLOCK_REQUIRED     MSG_SIZE
+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_ALIGNMENT              (1)
+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_TYPE           uint8_t
+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_MASK           (0xFF)
+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_LSBMASK                (0xFF)
+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_OFFSET         (0x0000)
+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_SHIFT          (0)
+
+// FW_VA_DEBLOCK_REQUIRED     ID
+#define FW_VA_DEBLOCK_REQUIRED_ID_ALIGNMENT            (1)
+#define FW_VA_DEBLOCK_REQUIRED_ID_TYPE         uint8_t
+#define FW_VA_DEBLOCK_REQUIRED_ID_MASK         (0xFF)
+#define FW_VA_DEBLOCK_REQUIRED_ID_LSBMASK              (0xFF)
+#define FW_VA_DEBLOCK_REQUIRED_ID_OFFSET               (0x0001)
+#define FW_VA_DEBLOCK_REQUIRED_ID_SHIFT                (0)
+
+// FW_VA_DEBLOCK_REQUIRED     CONTEXT
+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_ALIGNMENT               (4)
+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE            uint32_t
+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK            (0xFFFFFFFF)
+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_LSBMASK         (0xFFFFFFFF)
+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET          (0x0004)
+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT           (0)
+
+#define FW_VA_HW_PANIC_SIZE            (12)
+
+// FW_VA_HW_PANIC     FLAGS
+#define FW_VA_HW_PANIC_FLAGS_ALIGNMENT         (2)
+#define FW_VA_HW_PANIC_FLAGS_TYPE              uint16_t
+#define FW_VA_HW_PANIC_FLAGS_MASK              (0xFFFF)
+#define FW_VA_HW_PANIC_FLAGS_LSBMASK           (0xFFFF)
+#define FW_VA_HW_PANIC_FLAGS_OFFSET            (0x0002)
+#define FW_VA_HW_PANIC_FLAGS_SHIFT             (0)
+
+// FW_VA_HW_PANIC     MSG_SIZE
+#define FW_VA_HW_PANIC_MSG_SIZE_ALIGNMENT              (1)
+#define FW_VA_HW_PANIC_MSG_SIZE_TYPE           uint8_t
+#define FW_VA_HW_PANIC_MSG_SIZE_MASK           (0xFF)
+#define FW_VA_HW_PANIC_MSG_SIZE_LSBMASK                (0xFF)
+#define FW_VA_HW_PANIC_MSG_SIZE_OFFSET         (0x0000)
+#define FW_VA_HW_PANIC_MSG_SIZE_SHIFT          (0)
+
+// FW_VA_HW_PANIC     ID
+#define FW_VA_HW_PANIC_ID_ALIGNMENT            (1)
+#define FW_VA_HW_PANIC_ID_TYPE         uint8_t
+#define FW_VA_HW_PANIC_ID_MASK         (0xFF)
+#define FW_VA_HW_PANIC_ID_LSBMASK              (0xFF)
+#define FW_VA_HW_PANIC_ID_OFFSET               (0x0001)
+#define FW_VA_HW_PANIC_ID_SHIFT                (0)
+
+// FW_VA_HW_PANIC     FENCE_VALUE
+#define FW_VA_HW_PANIC_FENCE_VALUE_ALIGNMENT           (4)
+#define FW_VA_HW_PANIC_FENCE_VALUE_TYPE                uint32_t
+#define FW_VA_HW_PANIC_FENCE_VALUE_MASK                (0xFFFFFFFF)
+#define FW_VA_HW_PANIC_FENCE_VALUE_LSBMASK             (0xFFFFFFFF)
+#define FW_VA_HW_PANIC_FENCE_VALUE_OFFSET              (0x0004)
+#define FW_VA_HW_PANIC_FENCE_VALUE_SHIFT               (0)
+
+// FW_VA_HW_PANIC     IRQSTATUS
+#define FW_VA_HW_PANIC_IRQSTATUS_ALIGNMENT             (4)
+#define FW_VA_HW_PANIC_IRQSTATUS_TYPE          uint32_t
+#define FW_VA_HW_PANIC_IRQSTATUS_MASK          (0xFFFFFFFF)
+#define FW_VA_HW_PANIC_IRQSTATUS_LSBMASK               (0xFFFFFFFF)
+#define FW_VA_HW_PANIC_IRQSTATUS_OFFSET                (0x0008)
+#define FW_VA_HW_PANIC_IRQSTATUS_SHIFT         (0)
+
+#endif
diff --git a/psb-kernel-source-4.41.1/psb_msvdxinit.c b/psb-kernel-source-4.41.1/psb_msvdxinit.c
new file mode 100644 (file)
index 0000000..d923446
--- /dev/null
@@ -0,0 +1,625 @@
+/**
+ * file psb_msvdxinit.c
+ * MSVDX initialization and mtx-firmware upload
+ *
+ */
+
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "drm.h"
+#include "psb_drv.h"
+#include "psb_msvdx.h"
+#include <linux/firmware.h>
+
+/*MSVDX FW header*/
+struct msvdx_fw
+{
+  uint32_t ver;
+  uint32_t text_size;
+  uint32_t data_size;
+  uint32_t data_location;
+};
+
+int
+psb_wait_for_register (struct drm_psb_private *dev_priv,
+                      uint32_t ui32Offset,
+                      uint32_t ui32Value, uint32_t ui32Enable)
+{
+  uint32_t ui32Temp;
+  uint32_t ui32PollCount = 1000;
+  while (ui32PollCount)
+    {
+      ui32Temp = PSB_RMSVDX32 (ui32Offset);
+      if (ui32Value == (ui32Temp & ui32Enable))        /* All the bits are reset   */
+       return 0;               /* So exit                      */
+
+      /* Wait a bit */
+      DRM_UDELAY (100);
+      ui32PollCount--;
+    }
+  PSB_DEBUG_GENERAL
+    ("MSVDX: Timeout while waiting for register %08x: expecting %08x (mask %08x), got %08x\n",
+     ui32Offset, ui32Value, ui32Enable, ui32Temp);
+  return 1;
+}
+
+int
+psb_poll_mtx_irq (struct drm_psb_private *dev_priv)
+{
+  int ret = 0;
+  uint32_t MtxInt = 0;
+  REGIO_WRITE_FIELD_LITE (MtxInt, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
+
+  ret = psb_wait_for_register (dev_priv, MSVDX_INTERRUPT_STATUS, MtxInt,       /* Required value */
+                              MtxInt /* Enabled bits */ );
+  if (ret)
+    {
+      PSB_DEBUG_GENERAL
+       ("MSVDX: Error Mtx did not return int within a resonable time\n");
+
+      return ret;
+    }
+
+  PSB_DEBUG_GENERAL ("MSVDX: Got MTX Int\n");
+
+  /* Got it so clear the bit */
+  PSB_WMSVDX32 (MtxInt, MSVDX_INTERRUPT_CLEAR);
+
+  return ret;
+}
+
+void
+psb_write_mtx_core_reg (struct drm_psb_private *dev_priv,
+                       const uint32_t ui32CoreRegister,
+                       const uint32_t ui32Val)
+{
+  uint32_t ui32Reg = 0;
+
+  /* Put data in MTX_RW_DATA */
+  PSB_WMSVDX32 (ui32Val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
+
+  /* DREADY is set to 0 and request a write */
+  ui32Reg = ui32CoreRegister;
+  REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
+                         MTX_RNW, 0);
+  REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
+                         MTX_DREADY, 0);
+  PSB_WMSVDX32 (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
+
+  psb_wait_for_register (dev_priv, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,       /* Required Value */
+                        MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
+}
+
+void
+psb_upload_fw (struct drm_psb_private *dev_priv, const uint32_t ui32DataMem,
+              uint32_t ui32RamBankSize, uint32_t ui32Address,
+              const unsigned int uiWords, const uint32_t * const pui32Data)
+{
+  uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
+    (uint32_t) ~ 0;
+  uint32_t ui32AccessControl;
+
+  /* Save the access control register... */
+  ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
+
+  /* Wait for MCMSTAT to become be idle 1 */
+  psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1,     /* Required Value */
+                        0xffffffff /* Enables */ );
+
+  for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
+    {
+      ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
+
+      if (ui32RamId != ui32CurrBank)
+       {
+         ui32Addr = ui32Address >> 2;
+
+         ui32Ctrl = 0;
+
+         REGIO_WRITE_FIELD_LITE (ui32Ctrl,
+                                 MSVDX_MTX_RAM_ACCESS_CONTROL,
+                                 MTX_MCMID, ui32RamId);
+         REGIO_WRITE_FIELD_LITE (ui32Ctrl,
+                                 MSVDX_MTX_RAM_ACCESS_CONTROL,
+                                 MTX_MCM_ADDR, ui32Addr);
+         REGIO_WRITE_FIELD_LITE (ui32Ctrl,
+                                 MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
+
+         PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
+
+         ui32CurrBank = ui32RamId;
+       }
+      ui32Address += 4;
+
+      PSB_WMSVDX32 (pui32Data[ui32Loop], MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
+
+      /* Wait for MCMSTAT to become be idle 1 */
+      psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
+                            0xffffffff /* Enables */ );
+    }
+  PSB_DEBUG_GENERAL ("MSVDX: Upload done\n");
+
+  /* Restore the access control register... */
+  PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
+}
+
+static int
+psb_verify_fw (struct drm_psb_private *dev_priv,
+              const uint32_t ui32RamBankSize,
+              const uint32_t ui32DataMem, uint32_t ui32Address,
+              const uint32_t uiWords, const uint32_t * const pui32Data)
+{
+  uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
+    (uint32_t) ~ 0;
+  uint32_t ui32AccessControl;
+  int ret = 0;
+
+  /* Save the access control register... */
+  ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
+
+  /* Wait for MCMSTAT to become be idle 1 */
+  psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1,     /* Required Value */
+                        0xffffffff /* Enables */ );
+
+  for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
+    {
+      uint32_t ui32ReadBackVal;
+      ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
+
+      if (ui32RamId != ui32CurrBank)
+       {
+         ui32Addr = ui32Address >> 2;
+         ui32Ctrl = 0;
+         REGIO_WRITE_FIELD_LITE (ui32Ctrl,
+                                 MSVDX_MTX_RAM_ACCESS_CONTROL,
+                                 MTX_MCMID, ui32RamId);
+         REGIO_WRITE_FIELD_LITE (ui32Ctrl,
+                                 MSVDX_MTX_RAM_ACCESS_CONTROL,
+                                 MTX_MCM_ADDR, ui32Addr);
+         REGIO_WRITE_FIELD_LITE (ui32Ctrl,
+                                 MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
+         REGIO_WRITE_FIELD_LITE (ui32Ctrl,
+                                 MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMR, 1);
+
+         PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
+
+         ui32CurrBank = ui32RamId;
+       }
+      ui32Address += 4;
+
+      /* Wait for MCMSTAT to become be idle 1 */
+      psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
+                            0xffffffff /* Enables */ );
+
+      ui32ReadBackVal = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
+      if (pui32Data[ui32Loop] != ui32ReadBackVal)
+       {
+         DRM_ERROR
+           ("psb: Firmware validation fails at index=%08x\n", ui32Loop);
+         ret = 1;
+         break;
+       }
+    }
+
+  /* Restore the access control register... */
+  PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
+
+  return ret;
+}
+
+static uint32_t *
+msvdx_get_fw (struct drm_device *dev,
+             const struct firmware **raw, uint8_t * name)
+{
+  int rc;
+  int *ptr = NULL;
+
+  rc = request_firmware (raw, name, &dev->pdev->dev);
+  if (rc < 0)
+    {
+      DRM_ERROR ("MSVDX: %s request_firmware failed: Reason %d\n", name, rc);
+      return NULL;
+    }
+
+  if ((*raw)->size < sizeof (struct msvdx_fw))
+    {
+      PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
+                        name, (*raw)->size);
+      return NULL;
+    }
+
+  ptr = (int *) ((*raw))->data;
+
+  if (!ptr)
+    {
+      PSB_DEBUG_GENERAL ("MSVDX: Failed to load %s\n", name);
+      return NULL;
+    }
+  /*another sanity check... */
+  if ((*raw)->size !=
+      (sizeof (struct msvdx_fw) +
+       sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
+       sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->data_size))
+    {
+      PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
+                        name, (*raw)->size);
+      return NULL;
+    }
+  return ptr;
+}
+
+static int
+psb_setup_fw (struct drm_device *dev)
+{
+  struct drm_psb_private *dev_priv = dev->dev_private;
+  int ret = 0;
+
+  uint32_t ram_bank_size;
+  struct msvdx_fw *fw;
+  uint32_t *fw_ptr = NULL;
+  uint32_t *text_ptr = NULL;
+  uint32_t *data_ptr = NULL;
+  const struct firmware *raw = NULL;
+  /* todo : Assert the clock is on - if not turn it on to upload code */
+
+  PSB_DEBUG_GENERAL ("MSVDX: psb_setup_fw\n");
+
+  /* Reset MTX */
+  PSB_WMSVDX32 (MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK, MSVDX_MTX_SOFT_RESET);
+
+  /* Initialses Communication controll area to 0 */
+  if(dev_priv->psb_rev_id >= POULSBO_D1)
+   {
+       PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1 or later revision.\n");
+       PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1, MSVDX_COMMS_OFFSET_FLAGS);
+   }
+  else 
+   {
+       PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0 or earlier revision.\n");
+        PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0, MSVDX_COMMS_OFFSET_FLAGS);
+   }
+
+  PSB_WMSVDX32 (0, MSVDX_COMMS_MSG_COUNTER);
+  PSB_WMSVDX32 (0, MSVDX_COMMS_SIGNATURE);
+  PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_RD_INDEX);
+  PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
+  PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_RD_INDEX);
+  PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
+  PSB_WMSVDX32 (0, MSVDX_COMMS_FW_STATUS);
+
+  /* read register bank size */
+  {
+    uint32_t ui32BankSize, ui32Reg;
+    ui32Reg = PSB_RMSVDX32 (MSVDX_MTX_RAM_BANK);
+    ui32BankSize =
+      REGIO_READ_FIELD (ui32Reg, MSVDX_MTX_RAM_BANK, CR_MTX_RAM_BANK_SIZE);
+    ram_bank_size = (uint32_t) (1 << (ui32BankSize + 2));
+  }
+
+  PSB_DEBUG_GENERAL ("MSVDX: RAM bank size = %d bytes\n", ram_bank_size);
+
+  fw_ptr = msvdx_get_fw (dev, &raw, "msvdx_fw.bin");
+
+  if (!fw_ptr)
+    {
+      DRM_ERROR ("psb: No valid msvdx_fw.bin firmware found.\n");
+      ret = 1;
+      goto out;
+    }
+
+  fw = (struct msvdx_fw *) fw_ptr;
+  if (fw->ver != 0x02)
+    {
+      DRM_ERROR
+       ("psb: msvdx_fw.bin firmware version mismatch, got version=%02x expected version=%02x\n",
+        fw->ver, 0x02);
+      ret = 1;
+      goto out;
+    }
+
+  text_ptr = (uint32_t *) ((uint8_t *) fw_ptr + sizeof (struct msvdx_fw));
+  data_ptr = text_ptr + fw->text_size;
+
+  PSB_DEBUG_GENERAL ("MSVDX: Retrieved pointers for firmware\n");
+  PSB_DEBUG_GENERAL ("MSVDX: text_size: %d\n", fw->text_size);
+  PSB_DEBUG_GENERAL ("MSVDX: data_size: %d\n", fw->data_size);
+  PSB_DEBUG_GENERAL ("MSVDX: data_location: 0x%x\n", fw->data_location);
+  PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of text: 0x%x\n", *text_ptr);
+  PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of data: 0x%x\n", *data_ptr);
+
+  PSB_DEBUG_GENERAL ("MSVDX: Uploading firmware\n");
+  psb_upload_fw (dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
+                PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr);
+  psb_upload_fw (dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
+                fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr);
+
+  /*todo :  Verify code upload possibly only in debug */
+  if (psb_verify_fw
+      (dev_priv, ram_bank_size, MTX_CORE_CODE_MEM,
+       PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr))
+    {
+      /* Firmware code upload failed */
+      ret = 1;
+      goto out;
+    }
+  if (psb_verify_fw
+      (dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
+       fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr))
+    {
+      /* Firmware data upload failed */
+      ret = 1;
+      goto out;
+    }
+
+  /*      -- Set starting PC address      */
+  psb_write_mtx_core_reg (dev_priv, MTX_PC, PC_START_ADDRESS);
+
+  /*      -- Turn on the thread   */
+  PSB_WMSVDX32 (MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
+
+  /* Wait for the signature value to be written back */
+  ret = psb_wait_for_register (dev_priv, MSVDX_COMMS_SIGNATURE, MSVDX_COMMS_SIGNATURE_VALUE,   /* Required value */
+                              0xffffffff /* Enabled bits */ );
+  if (ret)
+    {
+      DRM_ERROR ("psb: MSVDX firmware fails to initialize.\n");
+      goto out;
+    }
+
+  PSB_DEBUG_GENERAL ("MSVDX: MTX Initial indications OK\n");
+  PSB_DEBUG_GENERAL ("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
+                    MSVDX_COMMS_AREA_ADDR);
+out:
+  if (raw)
+    {
+      PSB_DEBUG_GENERAL ("MSVDX releasing firmware resouces....\n");
+      release_firmware (raw);
+    }
+  return ret;
+}
+
+static void
+psb_free_ccb (struct drm_buffer_object **ccb)
+{
+  drm_bo_usage_deref_unlocked (ccb);
+  *ccb = NULL;
+}
+
+/*******************************************************************************
+
+ @Function     psb_msvdx_reset
+
+ @Description
+
+ Reset chip and disable interrupts.
+
+ @Input psDeviceNode - device info. structure
+
+ @Return  0 - Success
+         1 - Failure
+
+******************************************************************************/
+int
+psb_msvdx_reset (struct drm_psb_private *dev_priv)
+{
+  int ret = 0;
+
+  /* Issue software reset */
+  PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
+
+  ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0,     /* Required value */
+                              MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK
+                              /* Enabled bits */ );
+
+  if (!ret)
+    {
+      /* Clear interrupt enabled flag */
+      PSB_WMSVDX32 (0, MSVDX_HOST_INTERRUPT_ENABLE);
+
+      /* Clear any pending interrupt flags                                                                                    */
+      PSB_WMSVDX32 (0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
+    }
+  
+  mutex_destroy (&dev_priv->msvdx_mutex);
+
+  return ret;
+}
+
+static int
+psb_allocate_ccb (struct drm_device *dev,
+                 struct drm_buffer_object **ccb,
+                 uint32_t * base_addr, int size)
+{
+  int ret;
+  struct drm_bo_kmap_obj tmp_kmap;
+  int is_iomem;
+
+  ret = drm_buffer_object_create (dev, size,
+                                 drm_bo_type_kernel,
+                                 DRM_BO_FLAG_READ |
+                                 DRM_PSB_FLAG_MEM_KERNEL |
+                                 DRM_BO_FLAG_NO_EVICT,
+                                 DRM_BO_HINT_DONT_FENCE, 0, 0, ccb);
+  if (ret)
+    {
+      PSB_DEBUG_GENERAL ("Failed to allocate CCB.\n");
+      *ccb = NULL;
+      return 1;
+    }
+
+  ret = drm_bo_kmap (*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
+  if (ret)
+    {
+      PSB_DEBUG_GENERAL ("drm_bo_kmap failed ret: %d\n", ret);
+      drm_bo_usage_deref_unlocked (ccb);
+      *ccb = NULL;
+      return 1;
+    }
+
+  memset (drm_bmo_virtual (&tmp_kmap, &is_iomem), 0, size);
+  drm_bo_kunmap (&tmp_kmap);
+
+  *base_addr = (*ccb)->offset;
+  return 0;
+}
+
+int
+psb_msvdx_init (struct drm_device *dev)
+{
+  struct drm_psb_private *dev_priv = dev->dev_private;
+  uint32_t ui32Cmd;
+  int ret;
+
+  PSB_DEBUG_GENERAL ("MSVDX: psb_msvdx_init\n");
+
+  /*Initialize command msvdx queueing */
+  INIT_LIST_HEAD (&dev_priv->msvdx_queue);
+  mutex_init (&dev_priv->msvdx_mutex);
+  spin_lock_init (&dev_priv->msvdx_lock);
+  dev_priv->msvdx_busy = 0;
+
+  /*figure out the stepping*/
+  pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &dev_priv->psb_rev_id );
+
+  /* Enable Clocks */
+  PSB_DEBUG_GENERAL ("Enabling clocks\n");
+  PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
+
+  /* Enable MMU by removing all bypass bits */
+  PSB_WMSVDX32 (0, MSVDX_MMU_CONTROL0);
+
+  PSB_DEBUG_GENERAL ("MSVDX: Setting up RENDEC\n");
+  /* Allocate device virtual memory as required by rendec.... */
+  if (!dev_priv->ccb0)
+    {
+      ret =
+       psb_allocate_ccb (dev, &dev_priv->ccb0,
+                         &dev_priv->base_addr0, RENDEC_A_SIZE);
+      if (ret)
+       goto err_exit;
+    }
+
+  if (!dev_priv->ccb1)
+    {
+      ret =
+       psb_allocate_ccb (dev, &dev_priv->ccb1,
+                         &dev_priv->base_addr1, RENDEC_B_SIZE);
+      if (ret)
+       goto err_exit;
+    }
+
+  PSB_DEBUG_GENERAL ("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
+                    dev_priv->base_addr0, dev_priv->base_addr1);
+
+  PSB_WMSVDX32 (dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
+  PSB_WMSVDX32 (dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
+
+  ui32Cmd = 0;
+  REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
+                    RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
+  REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
+                    RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE);
+
+  ui32Cmd = 0;
+  REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
+                    RENDEC_DECODE_START_SIZE, 0);
+  REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_W, 1);
+  REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_R, 1);
+  REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
+                    RENDEC_EXTERNAL_MEMORY, 1);
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL1);
+
+  ui32Cmd = 0x00101010;
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT0);
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT1);
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT2);
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT3);
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT4);
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT5);
+
+  ui32Cmd = 0;
+  REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE, 1);
+  PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL0);
+
+  ret = psb_setup_fw (dev);
+  if (ret)
+    goto err_exit;
+
+  PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
+
+  return 0;
+
+err_exit:
+  if (dev_priv->ccb0)
+    psb_free_ccb (&dev_priv->ccb0);
+  if (dev_priv->ccb1)
+    psb_free_ccb (&dev_priv->ccb1);
+
+  return 1;
+}
+
+int
+psb_msvdx_uninit (struct drm_device *dev)
+{
+  struct drm_psb_private *dev_priv = dev->dev_private;
+
+  /*Reset MSVDX chip */
+  psb_msvdx_reset (dev_priv);
+
+//  PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
+    printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
+    PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
+
+  /*Clean up resources...*/
+  if (dev_priv->ccb0)
+    psb_free_ccb (&dev_priv->ccb0);
+  if (dev_priv->ccb1)
+    psb_free_ccb (&dev_priv->ccb1);
+
+  return 0;
+}
+
+int psb_hw_info_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+    struct drm_psb_private *dev_priv = dev->dev_private;
+    struct drm_psb_hw_info *hw_info = data;
+    struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
+
+    hw_info->rev_id = dev_priv->psb_rev_id;
+   
+    /*read the fuse info to determine the caps*/
+    pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
+    pci_read_config_dword(pci_root, 0xD4, &hw_info->caps);
+
+    PSB_DEBUG_GENERAL("MSVDX: PSB caps: 0x%x\n", hw_info->caps);
+    return 0;
+}
diff --git a/psb-kernel-source-4.41.1/psb_reg.h b/psb-kernel-source-4.41.1/psb_reg.h
new file mode 100644 (file)
index 0000000..eb8a616
--- /dev/null
@@ -0,0 +1,562 @@
+/**************************************************************************
+ *
+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+#ifndef _PSB_REG_H_
+#define _PSB_REG_H_
+
+#define PSB_CR_CLKGATECTL                0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG   (1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK  (0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK  (0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT  (12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK   (0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK  (0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK  (0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT  (0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK   (0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED   (0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED  (1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO      (2)
+
+#define PSB_CR_CORE_ID                   0x0010
+#define _PSB_CC_ID_ID_SHIFT              (16)
+#define _PSB_CC_ID_ID_MASK               (0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT          (0)
+#define _PSB_CC_ID_CONFIG_MASK           (0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION               0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT    (24)
+#define _PSB_CC_REVISION_DESIGNER_MASK     (0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT       (16)
+#define _PSB_CC_REVISION_MAJOR_MASK        (0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT       (8)
+#define _PSB_CC_REVISION_MINOR_MASK        (0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK  (0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1       0x0018
+
+#define PSB_CR_SOFT_RESET                0x0080
+#define _PSB_CS_RESET_TSP_RESET          (1 << 6)
+#define _PSB_CS_RESET_ISP_RESET          (1 << 5)
+#define _PSB_CS_RESET_USE_RESET          (1 << 4)
+#define _PSB_CS_RESET_TA_RESET           (1 << 3)
+#define _PSB_CS_RESET_DPM_RESET          (1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET         (1 << 1)
+#define _PSB_CS_RESET_BIF_RESET          (1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2       0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2        0x0110
+
+#define PSB_CR_EVENT_STATUS2             0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2         0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT     (1 << 4)
+
+#define PSB_CR_EVENT_STATUS              0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE         0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR          0x0134
+#define _PSB_CE_MASTER_INTERRUPT         (1 << 31)
+#define _PSB_CE_TA_DPM_FAULT             (1 << 28)
+#define _PSB_CE_TWOD_COMPLETE            (1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS    (1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE          (1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER       (1 << 18)
+#define _PSB_CE_SW_EVENT                 (1 << 14)
+#define _PSB_CE_TA_FINISHED              (1 << 13)
+#define _PSB_CE_TA_TERMINATE             (1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH   (1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL    (1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT     (1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE          (1 << 0)
+
+
+#define PSB_USE_OFFSET_MASK              0x0007FFFF
+#define PSB_USE_OFFSET_SIZE              (PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0            0x0A0C
+#define PSB_CR_USE_CODE_BASE1            0x0A10
+#define PSB_CR_USE_CODE_BASE2            0x0A14
+#define PSB_CR_USE_CODE_BASE3            0x0A18
+#define PSB_CR_USE_CODE_BASE4            0x0A1C
+#define PSB_CR_USE_CODE_BASE5            0x0A20
+#define PSB_CR_USE_CODE_BASE6            0x0A24
+#define PSB_CR_USE_CODE_BASE7            0x0A28
+#define PSB_CR_USE_CODE_BASE8            0x0A2C
+#define PSB_CR_USE_CODE_BASE9            0x0A30
+#define PSB_CR_USE_CODE_BASE10           0x0A34
+#define PSB_CR_USE_CODE_BASE11           0x0A38
+#define PSB_CR_USE_CODE_BASE12           0x0A3C
+#define PSB_CR_USE_CODE_BASE13           0x0A40
+#define PSB_CR_USE_CODE_BASE14           0x0A44
+#define PSB_CR_USE_CODE_BASE15           0x0A48
+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT           (25)
+#define _PSB_CUC_BASE_DM_MASK            (0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT         (0) // 1024-bit aligned address?
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT    (7)
+#define _PSB_CUC_BASE_ADDR_MASK          (0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX              (0)
+#define _PSB_CUC_DM_PIXEL               (1)
+#define _PSB_CUC_DM_RESERVED            (2)
+#define _PSB_CUC_DM_EDM                         (3)
+
+#define PSB_CR_PDS_EXEC_BASE             0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) // 1MB aligned address
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
+
+#define PSB_CR_EVENT_KICKER              0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT     (4) // 128-bit aligned address
+
+#define PSB_CR_EVENT_KICK                0x0AC8
+#define _PSB_CE_KICK_NOW                 (1 << 0)
+
+
+#define PSB_CR_BIF_DIR_LIST_BASE1        0x0C38
+
+#define PSB_CR_BIF_CTRL                  0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT         (1 << 4)
+#define _PSB_CB_CTRL_INVALDC             (1 << 3)
+#define _PSB_CB_CTRL_FLUSH               (1 << 2)
+
+#define PSB_CR_BIF_INT_STAT              0x0C04
+
+#define PSB_CR_BIF_FAULT                 0x0C08
+#define _PSB_CBI_STAT_PF_N_RW            (1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT        (0)
+#define _PSB_CBI_STAT_FAULT_MASK         (0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE        (1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA           (1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM          (1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D           (1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE          (1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP          (1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP          (1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS      (1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST         (1 << 9)
+
+#define PSB_CR_BIF_BANK0                 0x0C78
+
+#define PSB_CR_BIF_BANK1                 0x0C7C
+
+#define PSB_CR_BIF_DIR_LIST_BASE0        0x0C84
+
+#define PSB_CR_BIF_TWOD_REQ_BASE         0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE           0x0CAC
+
+#define PSB_CR_2D_SOCIF                  0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT    (0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK     (0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY              (0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS            0x0E04
+#define _PSB_C2B_STATUS_BUSY             (1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT   (0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK    (0xFFFFFF << 0)
+
+/*
+ * 2D defs.
+ */
+
+/*
+ * 2D Slave Port Data : Block Header's Object Type
+ */
+
+#define        PSB_2D_CLIP_BH                   (0x00000000)
+#define        PSB_2D_PAT_BH                    (0x10000000)
+#define        PSB_2D_CTRL_BH                   (0x20000000)
+#define        PSB_2D_SRC_OFF_BH                (0x30000000)
+#define        PSB_2D_MASK_OFF_BH               (0x40000000)
+#define        PSB_2D_RESERVED1_BH              (0x50000000)
+#define        PSB_2D_RESERVED2_BH              (0x60000000)
+#define        PSB_2D_FENCE_BH                  (0x70000000)
+#define        PSB_2D_BLIT_BH                   (0x80000000)
+#define        PSB_2D_SRC_SURF_BH               (0x90000000)
+#define        PSB_2D_DST_SURF_BH               (0xA0000000)
+#define        PSB_2D_PAT_SURF_BH               (0xB0000000)
+#define        PSB_2D_SRC_PAL_BH                (0xC0000000)
+#define        PSB_2D_PAT_PAL_BH                (0xD0000000)
+#define        PSB_2D_MASK_SURF_BH              (0xE0000000)
+#define        PSB_2D_FLUSH_BH                  (0xF0000000)
+
+/*
+ * Clip Definition block (PSB_2D_CLIP_BH)
+ */
+#define PSB_2D_CLIPCOUNT_MAX             (1)
+#define PSB_2D_CLIPCOUNT_MASK            (0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK         (0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT           (0)
+// clip rectangle min & max
+#define PSB_2D_CLIP_XMAX_MASK            (0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK         (0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT           (12)
+#define PSB_2D_CLIP_XMIN_MASK            (0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK         (0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT           (0)
+// clip rectangle offset
+#define PSB_2D_CLIP_YMAX_MASK            (0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK         (0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT           (12)
+#define PSB_2D_CLIP_YMIN_MASK            (0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK         (0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT           (0)
+
+/*
+ * Pattern Control (PSB_2D_PAT_BH)
+ */
+#define PSB_2D_PAT_HEIGHT_MASK           (0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT          (0)
+#define PSB_2D_PAT_WIDTH_MASK            (0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT           (5)
+#define PSB_2D_PAT_YSTART_MASK           (0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT          (10)
+#define PSB_2D_PAT_XSTART_MASK           (0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT          (15)
+
+/*
+ * 2D Control block (PSB_2D_CTRL_BH)
+ */
+// Present Flags
+#define PSB_2D_SRCCK_CTRL                (0x00000001)
+#define PSB_2D_DSTCK_CTRL                (0x00000002)
+#define PSB_2D_ALPHA_CTRL                (0x00000004)
+// Colour Key Colour (SRC/DST)
+#define PSB_2D_CK_COL_MASK               (0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK            (0x00000000)
+#define PSB_2D_CK_COL_SHIFT              (0)
+// Colour Key Mask (SRC/DST)
+#define PSB_2D_CK_MASK_MASK              (0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK           (0x00000000)
+#define PSB_2D_CK_MASK_SHIFT             (0)
+// Alpha Control (Alpha/RGB)
+#define PSB_2D_GBLALPHA_MASK             (0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK          (0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT            (12)
+#define PSB_2D_SRCALPHA_OP_MASK          (0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK       (0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT         (20)
+#define PSB_2D_SRCALPHA_OP_ONE           (0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC           (0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST           (0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG            (0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG            (0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL           (0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO          (0x00600000)
+#define PSB_2D_SRCALPHA_INVERT           (0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR       (0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK          (0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK       (0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT         (24)
+#define PSB_2D_DSTALPHA_OP_ONE           (0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC           (0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST           (0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG            (0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG            (0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL           (0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO          (0x06000000)
+#define PSB_2D_DSTALPHA_INVERT           (0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR       (0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE  (0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE   (0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK  (0xDFFFFFFF)
+
+/*
+ *Source Offset (PSB_2D_SRC_OFF_BH)
+ */
+#define PSB_2D_SRCOFF_XSTART_MASK        ((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT       (12)
+#define PSB_2D_SRCOFF_YSTART_MASK        (0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT       (0)
+
+/*
+ * Mask Offset (PSB_2D_MASK_OFF_BH)
+ */
+#define PSB_2D_MASKOFF_XSTART_MASK       ((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT      (12)
+#define PSB_2D_MASKOFF_YSTART_MASK       (0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT      (0)
+
+/*
+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+ */
+
+/*
+ *Blit Rectangle (PSB_2D_BLIT_BH)
+ */
+
+#define PSB_2D_ROT_MASK                  (3<<25)
+#define PSB_2D_ROT_CLRMASK               (~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE                  (0<<25)
+#define PSB_2D_ROT_90DEGS                (1<<25)
+#define PSB_2D_ROT_180DEGS               (2<<25)
+#define PSB_2D_ROT_270DEGS               (3<<25)
+
+#define PSB_2D_COPYORDER_MASK            (3<<23)
+#define PSB_2D_COPYORDER_CLRMASK         (~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR           (0<<23)
+#define PSB_2D_COPYORDER_BR2TL           (1<<23)
+#define PSB_2D_COPYORDER_TR2BL           (2<<23)
+#define PSB_2D_COPYORDER_BL2TR           (3<<23)
+
+#define PSB_2D_DSTCK_CLRMASK             (0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE             (0x00000000)
+#define PSB_2D_DSTCK_PASS                (0x00200000)
+#define PSB_2D_DSTCK_REJECT              (0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK             (0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE             (0x00000000)
+#define PSB_2D_SRCCK_PASS                (0x00080000)
+#define PSB_2D_SRCCK_REJECT              (0x00100000)
+
+#define PSB_2D_CLIP_ENABLE               (0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE              (0x00020000)
+
+#define PSB_2D_PAT_CLRMASK               (0xFFFEFFFF)
+#define PSB_2D_PAT_MASK                  (0x00010000)
+#define PSB_2D_USE_PAT                   (0x00010000)
+#define PSB_2D_USE_FILL                  (0x00000000)
+/*
+ * Tungsten Graphics note on rop codes: If rop A and rop B are
+ * identical, the mask surface will not be read and need not be
+ * set up.
+ */
+
+#define PSB_2D_ROP3B_MASK                (0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK             (0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT               (8)
+// rop code A
+#define PSB_2D_ROP3A_MASK                (0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK             (0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT               (0)
+
+#define PSB_2D_ROP4_MASK                 (0x0000FFFF)
+/*
+ *     DWORD0: (Only pass if Pattern control == Use Fill Colour)
+ *     Fill Colour RGBA8888
+ */
+#define PSB_2D_FILLCOLOUR_MASK           (0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT          (0)
+/*
+ *     DWORD1: (Always Present)
+ *     X Start (Dest)
+ *     Y Start (Dest)
+ */
+#define PSB_2D_DST_XSTART_MASK           (0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK        (0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT          (12)
+#define PSB_2D_DST_YSTART_MASK           (0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK        (0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT          (0)
+/*
+ *     DWORD2: (Always Present)
+ *     X Size (Dest)
+ *     Y Size (Dest)
+ */
+#define PSB_2D_DST_XSIZE_MASK            (0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK         (0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT           (12)
+#define PSB_2D_DST_YSIZE_MASK            (0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK         (0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT           (0)
+
+/*
+ * Source Surface (PSB_2D_SRC_SURF_BH)
+ */
+/*
+ *      WORD 0
+ */
+
+#define PSB_2D_SRC_FORMAT_MASK           (0x00078000)
+#define PSB_2D_SRC_1_PAL                 (0x00000000)
+#define PSB_2D_SRC_2_PAL                 (0x00008000)
+#define PSB_2D_SRC_4_PAL                 (0x00010000)
+#define PSB_2D_SRC_8_PAL                 (0x00018000)
+#define PSB_2D_SRC_8_ALPHA               (0x00020000)
+#define PSB_2D_SRC_4_ALPHA               (0x00028000)
+#define PSB_2D_SRC_332RGB                (0x00030000)
+#define PSB_2D_SRC_4444ARGB              (0x00038000)
+#define PSB_2D_SRC_555RGB                (0x00040000)
+#define PSB_2D_SRC_1555ARGB              (0x00048000)
+#define PSB_2D_SRC_565RGB                (0x00050000)
+#define PSB_2D_SRC_0888ARGB              (0x00058000)
+#define PSB_2D_SRC_8888ARGB              (0x00060000)
+#define PSB_2D_SRC_8888UYVY              (0x00068000)
+#define PSB_2D_SRC_RESERVED              (0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP       (0x00078000)
+
+
+#define PSB_2D_SRC_STRIDE_MASK           (0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK        (0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT          (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_SRC_ADDR_MASK             (0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK          (0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT            (2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT       (2)
+
+/*
+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
+ */
+/*
+ *  WORD 0
+ */
+
+#define PSB_2D_PAT_FORMAT_MASK           (0x00078000)
+#define PSB_2D_PAT_1_PAL                 (0x00000000)
+#define PSB_2D_PAT_2_PAL                 (0x00008000)
+#define PSB_2D_PAT_4_PAL                 (0x00010000)
+#define PSB_2D_PAT_8_PAL                 (0x00018000)
+#define PSB_2D_PAT_8_ALPHA               (0x00020000)
+#define PSB_2D_PAT_4_ALPHA               (0x00028000)
+#define PSB_2D_PAT_332RGB                (0x00030000)
+#define PSB_2D_PAT_4444ARGB              (0x00038000)
+#define PSB_2D_PAT_555RGB                (0x00040000)
+#define PSB_2D_PAT_1555ARGB              (0x00048000)
+#define PSB_2D_PAT_565RGB                (0x00050000)
+#define PSB_2D_PAT_0888ARGB              (0x00058000)
+#define PSB_2D_PAT_8888ARGB              (0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK           (0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK        (0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT          (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_PAT_ADDR_MASK             (0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK          (0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT            (2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT       (2)
+
+/*
+ * Destination Surface (PSB_2D_DST_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_DST_FORMAT_MASK           (0x00078000)
+#define PSB_2D_DST_332RGB                (0x00030000)
+#define PSB_2D_DST_4444ARGB              (0x00038000)
+#define PSB_2D_DST_555RGB                (0x00040000)
+#define PSB_2D_DST_1555ARGB              (0x00048000)
+#define PSB_2D_DST_565RGB                (0x00050000)
+#define PSB_2D_DST_0888ARGB              (0x00058000)
+#define PSB_2D_DST_8888ARGB              (0x00060000)
+#define PSB_2D_DST_8888AYUV              (0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK           (0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK        (0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT          (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_DST_ADDR_MASK             (0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK          (0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT            (2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT       (2)
+
+/*
+ * Mask Surface (PSB_2D_MASK_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+#define PSB_2D_MASK_STRIDE_MASK          (0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK       (0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT         (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_MASK_ADDR_MASK            (0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK         (0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT           (2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT      (2)
+
+/*
+ * Source Palette (PSB_2D_SRC_PAL_BH)
+ */
+
+#define PSB_2D_SRCPAL_ADDR_SHIFT         (0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK       (0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK          (0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN          (1024)
+
+/*
+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
+ */
+
+#define PSB_2D_PATPAL_ADDR_SHIFT         (0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK       (0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK          (0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN          (1024)
+
+/*
+ * Rop3 Codes (2 LS bytes)
+ */
+
+#define PSB_2D_ROP3_SRCCOPY              (0xCCCC)
+#define PSB_2D_ROP3_PATCOPY              (0xF0F0)
+#define PSB_2D_ROP3_WHITENESS            (0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS            (0x0000)
+#define PSB_2D_ROP3_SRC                  (0xCC)
+#define PSB_2D_ROP3_PAT                  (0xF0)
+#define PSB_2D_ROP3_DST                  (0xAA)
+
+
+/*
+ * Sizes.
+ */
+
+#define PSB_SCENE_HW_COOKIE_SIZE 16
+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
+
+/*
+ * Scene stuff.
+ */
+
+#define PSB_NUM_HW_SCENES          2
+
+/*
+ * Scheduler completion actions.
+ */
+
+#define PSB_RASTER_BLOCK 0
+#define PSB_RASTER       1
+#define PSB_RETURN       2
+#define PSB_TA       3
+
+
+#endif
diff --git a/psb-kernel-source-4.41.1/psb_regman.c b/psb-kernel-source-4.41.1/psb_regman.c
new file mode 100644 (file)
index 0000000..5b9be86
--- /dev/null
@@ -0,0 +1,175 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include "drmP.h"
+#include "psb_drv.h"
+
+struct psb_use_reg {
+       struct drm_reg reg;
+       struct drm_psb_private *dev_priv;
+       uint32_t reg_seq;
+       uint32_t base;
+       uint32_t data_master;
+};
+
+struct psb_use_reg_data {
+       uint32_t base;
+       uint32_t size;
+       uint32_t data_master;
+};
+
+static int psb_use_reg_reusable(const struct drm_reg *reg, const void *data)
+{
+       struct psb_use_reg *use_reg =
+           container_of(reg, struct psb_use_reg, reg);
+       struct psb_use_reg_data *use_data = (struct psb_use_reg_data *)data;
+
+       return ((use_reg->base <= use_data->base) &&
+               (use_reg->base + PSB_USE_OFFSET_SIZE >
+                use_data->base + use_data->size) &&
+               use_reg->data_master == use_data->data_master);
+}
+
+static int psb_use_reg_set(struct psb_use_reg *use_reg,
+                          const struct psb_use_reg_data *use_data)
+{
+       struct drm_psb_private *dev_priv = use_reg->dev_priv;
+
+       if (use_reg->reg.fence == NULL)
+               use_reg->data_master = use_data->data_master;
+
+       if (use_reg->reg.fence == NULL &&
+           !psb_use_reg_reusable(&use_reg->reg, (const void *)use_data)) {
+
+               use_reg->base = use_data->base & ~PSB_USE_OFFSET_MASK;
+               use_reg->data_master = use_data->data_master;
+
+               if (!psb_use_reg_reusable(&use_reg->reg,
+                                         (const void *)use_data)) {
+                       DRM_ERROR("USE base mechanism didn't support "
+                                 "buffer size or alignment\n");
+                       return -EINVAL;
+               }
+
+               PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
+                          (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
+                          PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
+       }
+       return 0;
+
+}
+
+int psb_grab_use_base(struct drm_psb_private *dev_priv,
+                     unsigned long base,
+                     unsigned long size,
+                     unsigned int data_master,
+                     uint32_t fence_class,
+                     uint32_t fence_type,
+                     int no_wait,
+                     int interruptible, int *r_reg, uint32_t * r_offset)
+{
+       struct psb_use_reg_data use_data = {
+               .base = base,
+               .size = size,
+               .data_master = data_master
+       };
+       int ret;
+
+       struct drm_reg *reg;
+       struct psb_use_reg *use_reg;
+
+       ret = drm_regs_alloc(&dev_priv->use_manager,
+                            (const void *)&use_data,
+                            fence_class,
+                            fence_type, interruptible, no_wait, &reg);
+       if (ret)
+               return ret;
+
+       use_reg = container_of(reg, struct psb_use_reg, reg);
+       ret = psb_use_reg_set(use_reg, &use_data);
+
+       if (ret)
+               return ret;
+
+       *r_reg = use_reg->reg_seq;
+       *r_offset = base - use_reg->base;
+
+       return 0;
+};
+
+static void psb_use_reg_destroy(struct drm_reg *reg)
+{
+       struct psb_use_reg *use_reg =
+           container_of(reg, struct psb_use_reg, reg);
+       struct drm_psb_private *dev_priv = use_reg->dev_priv;
+
+       PSB_WSGX32(PSB_ALPL(0, _PSB_CUC_BASE_ADDR),
+                  PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
+
+       drm_free(use_reg, sizeof(*use_reg), DRM_MEM_DRIVER);
+}
+
+int psb_init_use_base(struct drm_psb_private *dev_priv,
+                     unsigned int reg_start, unsigned int reg_num)
+{
+       struct psb_use_reg *use_reg;
+       int i;
+       int ret = 0;
+
+       mutex_lock(&dev_priv->cmdbuf_mutex);
+
+       drm_regs_init(&dev_priv->use_manager,
+                     &psb_use_reg_reusable, &psb_use_reg_destroy);
+
+       for (i = reg_start; i < reg_start + reg_num; ++i) {
+               use_reg = drm_calloc(1, sizeof(*use_reg), DRM_MEM_DRIVER);
+               if (!use_reg) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               use_reg->dev_priv = dev_priv;
+               use_reg->reg_seq = i;
+               use_reg->base = 0;
+               use_reg->data_master = _PSB_CUC_DM_PIXEL;
+
+               PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
+                          (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
+                          PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
+
+               drm_regs_add(&dev_priv->use_manager, &use_reg->reg);
+       }
+      out:
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       return ret;
+
+}
+
+void psb_takedown_use_base(struct drm_psb_private *dev_priv)
+{
+       mutex_lock(&dev_priv->cmdbuf_mutex);
+       drm_regs_free(&dev_priv->use_manager);
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/psb-kernel-source-4.41.1/psb_reset.c b/psb-kernel-source-4.41.1/psb_reset.c
new file mode 100644 (file)
index 0000000..bfe8c49
--- /dev/null
@@ -0,0 +1,374 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_scene.h"
+#include "psb_msvdx.h"
+
+#define PSB_2D_TIMEOUT_MSEC 100
+
+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
+{
+       uint32_t val;
+
+       val = _PSB_CS_RESET_BIF_RESET |
+           _PSB_CS_RESET_DPM_RESET |
+           _PSB_CS_RESET_TA_RESET |
+           _PSB_CS_RESET_USE_RESET |
+           _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
+
+       if (reset_2d)
+               val |= _PSB_CS_RESET_TWOD_RESET;
+
+       PSB_WSGX32(val, PSB_CR_SOFT_RESET);
+       (void)PSB_RSGX32(PSB_CR_SOFT_RESET);
+
+       msleep(1);
+
+       PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+       wmb();
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+                  PSB_CR_BIF_CTRL);
+       wmb();
+       (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+       msleep(1);
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+                  PSB_CR_BIF_CTRL);
+       (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+}
+
+void psb_print_pagefault(struct drm_psb_private *dev_priv)
+{
+       uint32_t val;
+       uint32_t addr;
+
+       val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
+       addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
+
+       if (val) {
+               if (val & _PSB_CBI_STAT_PF_N_RW)
+                       DRM_ERROR("Poulsbo MMU page fault:\n");
+               else
+                       DRM_ERROR("Poulsbo MMU read / write "
+                                 "protection fault:\n");
+
+               if (val & _PSB_CBI_STAT_FAULT_CACHE)
+                       DRM_ERROR("\tCache requestor.\n");
+               if (val & _PSB_CBI_STAT_FAULT_TA)
+                       DRM_ERROR("\tTA requestor.\n");
+               if (val & _PSB_CBI_STAT_FAULT_VDM)
+                       DRM_ERROR("\tVDM requestor.\n");
+               if (val & _PSB_CBI_STAT_FAULT_2D)
+                       DRM_ERROR("\t2D requestor.\n");
+               if (val & _PSB_CBI_STAT_FAULT_PBE)
+                       DRM_ERROR("\tPBE requestor.\n");
+               if (val & _PSB_CBI_STAT_FAULT_TSP)
+                       DRM_ERROR("\tTSP requestor.\n");
+               if (val & _PSB_CBI_STAT_FAULT_ISP)
+                       DRM_ERROR("\tISP requestor.\n");
+               if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
+                       DRM_ERROR("\tUSSEPDS requestor.\n");
+               if (val & _PSB_CBI_STAT_FAULT_HOST)
+                       DRM_ERROR("\tHost requestor.\n");
+
+               DRM_ERROR("\tMMU failing address is 0x%08x.\n", (unsigned)addr);
+       }
+}
+
+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
+{
+       struct timer_list *wt = &dev_priv->watchdog_timer;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+       if (dev_priv->timer_available && !timer_pending(wt)) {
+               wt->expires = jiffies + PSB_WATCHDOG_DELAY;
+               add_timer(wt);
+       }
+       spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+}
+
+#if 0
+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
+                               unsigned int engine, int *lockup, int *idle)
+{
+       uint32_t received_seq;
+
+       received_seq = dev_priv->comm[engine << 4];
+       spin_lock(&dev_priv->sequence_lock);
+       *idle = (received_seq == dev_priv->sequence[engine]);
+       spin_unlock(&dev_priv->sequence_lock);
+
+       if (*idle) {
+               dev_priv->idle[engine] = 1;
+               *lockup = 0;
+               return;
+       }
+
+       if (dev_priv->idle[engine]) {
+               dev_priv->idle[engine] = 0;
+               dev_priv->last_sequence[engine] = received_seq;
+               *lockup = 0;
+               return;
+       }
+
+       *lockup = (dev_priv->last_sequence[engine] == received_seq);
+}
+
+#endif
+static void psb_watchdog_func(unsigned long data)
+{
+       struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
+       int lockup;
+       int msvdx_lockup;
+       int msvdx_idle;
+       int lockup_2d;
+       int idle_2d;
+       int idle;
+       unsigned long irq_flags;
+
+       psb_scheduler_lockup(dev_priv, &lockup, &idle);
+       psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
+#if 0
+       psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
+#else
+       lockup_2d = FALSE;
+       idle_2d = TRUE;
+#endif
+       if (lockup || msvdx_lockup || lockup_2d) {
+               spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+               dev_priv->timer_available = 0;
+               spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+               if (lockup) {
+                       psb_print_pagefault(dev_priv);
+                       schedule_work(&dev_priv->watchdog_wq);
+               }
+               if (msvdx_lockup)
+                       schedule_work(&dev_priv->msvdx_watchdog_wq);
+       }
+       if (!idle || !msvdx_idle || !idle_2d)
+               psb_schedule_watchdog(dev_priv);
+}
+
+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_msvdx_cmd_queue *msvdx_cmd;
+       struct list_head *list, *next;
+       /*Flush the msvdx cmd queue and signal all fences in the queue */
+       list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
+               msvdx_cmd = list_entry(list, struct psb_msvdx_cmd_queue, head);
+               PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
+                                 msvdx_cmd->sequence);
+               dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
+               psb_fence_error(dev, PSB_ENGINE_VIDEO,
+                               dev_priv->msvdx_current_sequence,
+                               DRM_FENCE_TYPE_EXE, DRM_CMD_HANG);
+               list_del(list);
+               kfree(msvdx_cmd->cmd);
+               drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
+                        DRM_MEM_DRIVER);
+       }
+}
+
+static void psb_msvdx_reset_wq(struct work_struct *work)
+{
+       struct drm_psb_private *dev_priv =
+           container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
+
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+
+       mutex_lock(&dev_priv->msvdx_mutex);
+       dev_priv->msvdx_needs_reset = 1;
+       dev_priv->msvdx_current_sequence++;
+       PSB_DEBUG_GENERAL
+           ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
+            dev_priv->msvdx_current_sequence);
+
+       psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
+                       dev_priv->msvdx_current_sequence, DRM_FENCE_TYPE_EXE,
+                       DRM_CMD_HANG);
+
+       spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+       dev_priv->timer_available = 1;
+       spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+
+       spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
+       psb_msvdx_flush_cmd_queue(scheduler->dev);
+       spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
+
+       psb_schedule_watchdog(dev_priv);
+       mutex_unlock(&dev_priv->msvdx_mutex);
+}
+
+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
+{
+       struct psb_xhw_buf buf;
+       uint32_t bif_ctrl;
+
+       INIT_LIST_HEAD(&buf.head);
+       psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+       bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
+       PSB_WSGX32(bif_ctrl |
+                  _PSB_CB_CTRL_CLEAR_FAULT |
+                  _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+       (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+       msleep(1);
+       PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
+       (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+       return psb_xhw_reset_dpm(dev_priv, &buf);
+}
+
+/*
+ * Block command submission and reset hardware and schedulers.
+ */
+
+static void psb_reset_wq(struct work_struct *work)
+{
+       struct drm_psb_private *dev_priv =
+           container_of(work, struct drm_psb_private, watchdog_wq);
+       int lockup_2d;
+       int idle_2d;
+       unsigned long irq_flags;
+       int ret;
+       int reset_count = 0;
+       struct psb_xhw_buf buf;
+       uint32_t xhw_lockup;
+
+       /*
+        * Block command submission.
+        */
+
+       mutex_lock(&dev_priv->reset_mutex);
+
+       INIT_LIST_HEAD(&buf.head);
+       if (psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup) == 0) {
+               if (xhw_lockup == 0 && psb_extend_raster_timeout(dev_priv) == 0) {
+                       /*
+                        * no lockup, just re-schedule
+                        */
+                       spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+                       dev_priv->timer_available = 1;
+                       spin_unlock_irqrestore(&dev_priv->watchdog_lock,
+                                              irq_flags);
+                       psb_schedule_watchdog(dev_priv);
+                       mutex_unlock(&dev_priv->reset_mutex);
+                       return;
+               }
+       }
+#if 0
+       msleep(PSB_2D_TIMEOUT_MSEC);
+
+       psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
+
+       if (lockup_2d) {
+               uint32_t seq_2d;
+               spin_lock(&dev_priv->sequence_lock);
+               seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
+               spin_unlock(&dev_priv->sequence_lock);
+               psb_fence_error(dev_priv->scheduler.dev,
+                               PSB_ENGINE_2D,
+                               seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
+               DRM_INFO("Resetting 2D engine.\n");
+       }
+
+       psb_reset(dev_priv, lockup_2d);
+#else
+       (void)lockup_2d;
+       (void)idle_2d;
+       psb_reset(dev_priv, 0);
+#endif
+       (void)psb_xhw_mmu_reset(dev_priv);
+       DRM_INFO("Resetting scheduler.\n");
+       psb_scheduler_pause(dev_priv);
+       psb_scheduler_reset(dev_priv, -EBUSY);
+       psb_scheduler_ta_mem_check(dev_priv);
+
+       while (dev_priv->ta_mem &&
+              !dev_priv->force_ta_mem_load && ++reset_count < 10) {
+
+               /*
+                * TA memory is currently fenced so offsets
+                * are valid. Reload offsets into the dpm now.
+                */
+
+               struct psb_xhw_buf buf;
+               INIT_LIST_HEAD(&buf.head);
+
+               msleep(100);
+               DRM_INFO("Trying to reload TA memory.\n");
+               ret = psb_xhw_ta_mem_load(dev_priv, &buf,
+                                         PSB_TA_MEM_FLAG_TA |
+                                         PSB_TA_MEM_FLAG_RASTER |
+                                         PSB_TA_MEM_FLAG_HOSTA |
+                                         PSB_TA_MEM_FLAG_HOSTD |
+                                         PSB_TA_MEM_FLAG_INIT,
+                                         dev_priv->ta_mem->ta_memory->offset,
+                                         dev_priv->ta_mem->hw_data->offset,
+                                         dev_priv->ta_mem->hw_cookie);
+               if (!ret)
+                       break;
+
+               psb_reset(dev_priv, 0);
+               (void)psb_xhw_mmu_reset(dev_priv);
+       }
+
+       psb_scheduler_restart(dev_priv);
+       spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+       dev_priv->timer_available = 1;
+       spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+       mutex_unlock(&dev_priv->reset_mutex);
+}
+
+void psb_watchdog_init(struct drm_psb_private *dev_priv)
+{
+       struct timer_list *wt = &dev_priv->watchdog_timer;
+       unsigned long irq_flags;
+
+       dev_priv->watchdog_lock = SPIN_LOCK_UNLOCKED;
+       spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+       init_timer(wt);
+       INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
+       INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
+       wt->data = (unsigned long)dev_priv;
+       wt->function = &psb_watchdog_func;
+       dev_priv->timer_available = 1;
+       spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+}
+
+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+       dev_priv->timer_available = 0;
+       spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+       (void)del_timer_sync(&dev_priv->watchdog_timer);
+}
diff --git a/psb-kernel-source-4.41.1/psb_scene.c b/psb-kernel-source-4.41.1/psb_scene.c
new file mode 100644 (file)
index 0000000..84335fe
--- /dev/null
@@ -0,0 +1,531 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+
+#include "drmP.h"
+#include "psb_drv.h"
+#include "psb_scene.h"
+
+void psb_clear_scene_atomic(struct psb_scene *scene)
+{
+       int i;
+       struct page *page;
+       void *v;
+
+       for (i = 0; i < scene->clear_num_pages; ++i) {
+               page = drm_ttm_get_page(scene->hw_data->ttm,
+                                       scene->clear_p_start + i);
+               if (in_irq())
+                       v = kmap_atomic(page, KM_IRQ0);
+               else
+                       v = kmap_atomic(page, KM_USER0);
+
+               memset(v, 0, PAGE_SIZE);
+
+               if (in_irq())
+                       kunmap_atomic(v, KM_IRQ0);
+               else
+                       kunmap_atomic(v, KM_USER0);
+       }
+}
+
+int psb_clear_scene(struct psb_scene *scene)
+{
+       struct drm_bo_kmap_obj bmo;
+       int is_iomem;
+       void *addr;
+
+       int ret = drm_bo_kmap(scene->hw_data, scene->clear_p_start,
+                             scene->clear_num_pages, &bmo);
+
+       PSB_DEBUG_RENDER("Scene clear\n");
+       if (ret)
+               return ret;
+
+       addr = drm_bmo_virtual(&bmo, &is_iomem);
+       BUG_ON(is_iomem);
+       memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
+       drm_bo_kunmap(&bmo);
+
+       return 0;
+}
+
+static void psb_destroy_scene_devlocked(struct psb_scene *scene)
+{
+       if (!scene)
+               return;
+
+       PSB_DEBUG_RENDER("Scene destroy\n");
+       drm_bo_usage_deref_locked(&scene->hw_data);
+       drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
+}
+
+void psb_scene_unref_devlocked(struct psb_scene **scene)
+{
+       struct psb_scene *tmp_scene = *scene;
+
+       PSB_DEBUG_RENDER("Scene unref\n");
+       *scene = NULL;
+       if (atomic_dec_and_test(&tmp_scene->ref_count)) {
+               psb_scheduler_remove_scene_refs(tmp_scene);
+               psb_destroy_scene_devlocked(tmp_scene);
+       }
+}
+
+struct psb_scene *psb_scene_ref(struct psb_scene *src)
+{
+       PSB_DEBUG_RENDER("Scene ref\n");
+       atomic_inc(&src->ref_count);
+       return src;
+}
+
+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
+                                        uint32_t w, uint32_t h)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       int ret = -EINVAL;
+       struct psb_scene *scene;
+       uint32_t bo_size;
+       struct psb_xhw_buf buf;
+
+       PSB_DEBUG_RENDER("Alloc scene w %u h %u\n", w, h);
+
+       scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
+
+       if (!scene) {
+               DRM_ERROR("Out of memory allocating scene object.\n");
+               return NULL;
+       }
+
+       scene->dev = dev;
+       scene->w = w;
+       scene->h = h;
+       scene->hw_scene = NULL;
+       atomic_set(&scene->ref_count, 1);
+
+       INIT_LIST_HEAD(&buf.head);
+       ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
+                                scene->hw_cookie, &bo_size,
+                                &scene->clear_p_start,
+                                &scene->clear_num_pages);
+       if (ret)
+               goto out_err;
+
+       ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
+                                      DRM_PSB_FLAG_MEM_MMU |
+                                      DRM_BO_FLAG_READ |
+                                      DRM_BO_FLAG_CACHED |
+                                      PSB_BO_FLAG_SCENE |
+                                      DRM_BO_FLAG_WRITE,
+                                      DRM_BO_HINT_DONT_FENCE,
+                                      0, 0, &scene->hw_data);
+       if (ret)
+               goto out_err;
+
+       return scene;
+      out_err:
+       drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
+       return NULL;
+}
+
+int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
+                           uint64_t mask,
+                           uint32_t hint,
+                           uint32_t w,
+                           uint32_t h,
+                           int final_pass, struct psb_scene **scene_p)
+{
+       struct drm_device *dev = pool->dev;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct psb_scene *scene = pool->scenes[pool->cur_scene];
+       int ret;
+       unsigned long irq_flags;
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       uint32_t bin_pt_offset;
+       uint32_t bin_param_offset;
+
+       PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", pool->cur_scene);
+
+       if (unlikely(!dev_priv->ta_mem)) {
+               dev_priv->ta_mem =
+                   psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
+               if (!dev_priv->ta_mem)
+                       return -ENOMEM;
+
+               bin_pt_offset = ~0;
+               bin_param_offset = ~0;
+       } else {
+               bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
+               bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
+       }
+
+       pool->w = w;
+       pool->h = h;
+       if (scene && (scene->w != pool->w || scene->h != pool->h)) {
+               spin_lock_irqsave(&scheduler->lock, irq_flags);
+               if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
+                       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+                       DRM_ERROR("Trying to resize a dirty scene.\n");
+                       return -EINVAL;
+               }
+               spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+               mutex_lock(&dev->struct_mutex);
+               psb_scene_unref_devlocked(&pool->scenes[pool->cur_scene]);
+               mutex_unlock(&dev->struct_mutex);
+               scene = NULL;
+       }
+
+       if (!scene) {
+               pool->scenes[pool->cur_scene] = scene =
+                   psb_alloc_scene(pool->dev, pool->w, pool->h);
+
+               if (!scene)
+                       return -ENOMEM;
+
+               scene->flags = PSB_SCENE_FLAG_CLEARED;
+       }
+
+       /*
+        * FIXME: We need atomic bit manipulation here for the
+        * scheduler. For now use the spinlock.
+        */
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
+               spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+               PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
+               mutex_lock(&scene->hw_data->mutex);
+               ret = drm_bo_wait(scene->hw_data, 0, 0, 0);
+               mutex_unlock(&scene->hw_data->mutex);
+               if (ret)
+                       return ret;
+
+               ret = psb_clear_scene(scene);
+
+               if (ret)
+                       return ret;
+               spin_lock_irqsave(&scheduler->lock, irq_flags);
+               scene->flags |= PSB_SCENE_FLAG_CLEARED;
+       }
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+
+       ret = drm_bo_do_validate(scene->hw_data, flags, mask, hint,
+                                PSB_ENGINE_TA, 0, NULL);
+       if (ret)
+               return ret;
+       ret = drm_bo_do_validate(dev_priv->ta_mem->hw_data, 0, 0, 0,
+                                PSB_ENGINE_TA, 0, NULL);
+       if (ret)
+               return ret;
+       ret = drm_bo_do_validate(dev_priv->ta_mem->ta_memory, 0, 0, 0,
+                                PSB_ENGINE_TA, 0, NULL);
+       if (ret)
+               return ret;
+
+       if (unlikely(bin_param_offset !=
+                    dev_priv->ta_mem->ta_memory->offset ||
+                    bin_pt_offset !=
+                    dev_priv->ta_mem->hw_data->offset ||
+                    dev_priv->force_ta_mem_load)) {
+
+               struct psb_xhw_buf buf;
+
+               INIT_LIST_HEAD(&buf.head);
+               ret = psb_xhw_ta_mem_load(dev_priv, &buf,
+                                         PSB_TA_MEM_FLAG_TA |
+                                         PSB_TA_MEM_FLAG_RASTER |
+                                         PSB_TA_MEM_FLAG_HOSTA |
+                                         PSB_TA_MEM_FLAG_HOSTD |
+                                         PSB_TA_MEM_FLAG_INIT,
+                                         dev_priv->ta_mem->ta_memory->offset,
+                                         dev_priv->ta_mem->hw_data->offset,
+                                         dev_priv->ta_mem->hw_cookie);
+               if (ret)
+                       return ret;
+
+               dev_priv->force_ta_mem_load = 0;
+       }
+
+       if (final_pass) {
+
+               /*
+                * Clear the scene on next use. Advance the scene counter.
+                */
+
+               spin_lock_irqsave(&scheduler->lock, irq_flags);
+               scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
+               spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+               pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
+       }
+
+       *scene_p = psb_scene_ref(scene);
+       return 0;
+}
+
+static void psb_scene_pool_destroy_devlocked(struct psb_scene_pool *pool)
+{
+       int i;
+
+       if (!pool)
+               return;
+
+       PSB_DEBUG_RENDER("Scene pool destroy.\n");
+       for (i = 0; i < pool->num_scenes; ++i) {
+               PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
+                                (unsigned long)pool->scenes[i]);
+               if (pool->scenes[i])
+                       psb_scene_unref_devlocked(&pool->scenes[i]);
+       }
+       drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
+}
+
+void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool)
+{
+       struct psb_scene_pool *tmp_pool = *pool;
+       struct drm_device *dev = tmp_pool->dev;
+
+       PSB_DEBUG_RENDER("Scene pool unref\n");
+       (void)dev;
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       *pool = NULL;
+       if (--tmp_pool->ref_count == 0)
+               psb_scene_pool_destroy_devlocked(tmp_pool);
+}
+
+struct psb_scene_pool *psb_scene_pool_ref_devlocked(struct psb_scene_pool *src)
+{
+       ++src->ref_count;
+       return src;
+}
+
+/*
+ * Callback for user object manager.
+ */
+
+static void psb_scene_pool_destroy(struct drm_file *priv,
+                                  struct drm_user_object *base)
+{
+       struct psb_scene_pool *pool =
+           drm_user_object_entry(base, struct psb_scene_pool, user);
+
+       psb_scene_pool_unref_devlocked(&pool);
+}
+
+struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file *priv,
+                                                      uint32_t handle,
+                                                      int check_owner)
+{
+       struct drm_user_object *uo;
+       struct psb_scene_pool *pool;
+
+       uo = drm_lookup_user_object(priv, handle);
+       if (!uo || (uo->type != PSB_USER_OBJECT_SCENE_POOL)) {
+               DRM_ERROR("Could not find scene pool object 0x%08x\n", handle);
+               return NULL;
+       }
+
+       if (check_owner && priv != uo->owner) {
+               if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
+                       return NULL;
+       }
+
+       pool = drm_user_object_entry(uo, struct psb_scene_pool, user);
+       return psb_scene_pool_ref_devlocked(pool);
+}
+
+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
+                                           int shareable,
+                                           uint32_t num_scenes,
+                                           uint32_t w, uint32_t h)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct psb_scene_pool *pool;
+       int ret;
+
+       PSB_DEBUG_RENDER("Scene pool alloc\n");
+       pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
+       if (!pool) {
+               DRM_ERROR("Out of memory allocating scene pool object.\n");
+               return NULL;
+       }
+       pool->w = w;
+       pool->h = h;
+       pool->dev = dev;
+       pool->num_scenes = num_scenes;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_add_user_object(priv, &pool->user, shareable);
+       if (ret)
+               goto out_err;
+
+       pool->user.type = PSB_USER_OBJECT_SCENE_POOL;
+       pool->user.remove = &psb_scene_pool_destroy;
+       pool->ref_count = 2;
+       mutex_unlock(&dev->struct_mutex);
+       return pool;
+      out_err:
+       drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
+       return NULL;
+}
+
+/*
+ * Code to support multiple ta memory buffers.
+ */
+
+static void psb_destroy_ta_mem_devlocked(struct psb_ta_mem *ta_mem)
+{
+       if (!ta_mem)
+               return;
+
+       drm_bo_usage_deref_locked(&ta_mem->hw_data);
+       drm_bo_usage_deref_locked(&ta_mem->ta_memory);
+       drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
+}
+
+void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem)
+{
+       struct psb_ta_mem *tmp_ta_mem = *ta_mem;
+       struct drm_device *dev = tmp_ta_mem->dev;
+
+       (void)dev;
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       *ta_mem = NULL;
+       if (--tmp_ta_mem->ref_count == 0)
+               psb_destroy_ta_mem_devlocked(tmp_ta_mem);
+}
+
+void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst, struct psb_ta_mem *src)
+{
+       struct drm_device *dev = src->dev;
+
+       (void)dev;
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       *dst = src;
+       ++src->ref_count;
+}
+
+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       int ret = -EINVAL;
+       struct psb_ta_mem *ta_mem;
+       uint32_t bo_size;
+       struct psb_xhw_buf buf;
+
+       INIT_LIST_HEAD(&buf.head);
+
+       ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
+
+       if (!ta_mem) {
+               DRM_ERROR("Out of memory allocating parameter memory.\n");
+               return NULL;
+       }
+
+       ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
+                                 ta_mem->hw_cookie, &bo_size);
+       if (ret == -ENOMEM) {
+               DRM_ERROR("Parameter memory size is too small.\n");
+               DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
+                        (unsigned int)(pages * (PAGE_SIZE / 1024)));
+               DRM_INFO("The Xpsb driver thinks this is too small and\n");
+               DRM_INFO("suggests %u kiB. Check the psb DRM\n",
+                        (unsigned int)(bo_size / 1024));
+               DRM_INFO("\"ta_mem_size\" parameter!\n");
+       }
+       if (ret)
+               goto out_err0;
+
+       bo_size = pages * PAGE_SIZE;
+       ta_mem->dev = dev;
+       ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
+                                      DRM_PSB_FLAG_MEM_MMU | DRM_BO_FLAG_READ |
+                                      DRM_BO_FLAG_WRITE |
+                                      PSB_BO_FLAG_SCENE,
+                                      DRM_BO_HINT_DONT_FENCE, 0, 0,
+                                      &ta_mem->hw_data);
+       if (ret)
+               goto out_err0;
+
+       ret =
+           drm_buffer_object_create(dev, pages << PAGE_SHIFT,
+                                    drm_bo_type_kernel,
+                                    DRM_PSB_FLAG_MEM_RASTGEOM |
+                                    DRM_BO_FLAG_READ |
+                                    DRM_BO_FLAG_WRITE |
+                                    PSB_BO_FLAG_SCENE,
+                                    DRM_BO_HINT_DONT_FENCE, 0,
+                                    1024 * 1024 >> PAGE_SHIFT,
+                                    &ta_mem->ta_memory);
+       if (ret)
+               goto out_err1;
+
+       ta_mem->ref_count = 1;
+       return ta_mem;
+      out_err1:
+       drm_bo_usage_deref_unlocked(&ta_mem->hw_data);
+      out_err0:
+       drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
+       return NULL;
+}
+
+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_psb_scene *scene = (struct drm_psb_scene *)data;
+       struct drm_user_object *uo;
+       struct drm_ref_object *ro;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+       if (!scene->handle_valid)
+               goto out_unlock;
+
+       uo = drm_lookup_user_object(file_priv, scene->handle);
+       if (!uo) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       if (uo->type != PSB_USER_OBJECT_SCENE_POOL) {
+               DRM_ERROR("Not a scene pool object.\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       if (uo->owner != file_priv) {
+               DRM_ERROR("Not owner of scene pool object.\n");
+               ret = -EPERM;
+               goto out_unlock;
+       }
+
+       scene->handle_valid = 0;
+       ro = drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE);
+       BUG_ON(!ro);
+       drm_remove_ref_object(file_priv, ro);
+
+      out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
diff --git a/psb-kernel-source-4.41.1/psb_scene.h b/psb-kernel-source-4.41.1/psb_scene.h
new file mode 100644 (file)
index 0000000..e2c3d05
--- /dev/null
@@ -0,0 +1,112 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+
+#ifndef _PSB_SCENE_H_
+#define _PSB_SCENE_H_
+
+#define PSB_USER_OBJECT_SCENE_POOL    drm_driver_type0
+#define PSB_USER_OBJECT_TA_MEM       drm_driver_type1
+#define PSB_MAX_NUM_SCENES            8
+
+struct psb_hw_scene;
+struct psb_hw_ta_mem;
+
+struct psb_scene_pool {
+       struct drm_device *dev;
+       struct drm_user_object user;
+       uint32_t ref_count;
+       uint32_t w;
+       uint32_t h;
+       uint32_t cur_scene;
+       struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
+       uint32_t num_scenes;
+};
+
+struct psb_scene {
+       struct drm_device *dev;
+       atomic_t ref_count;
+       uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
+       uint32_t bo_size;
+       uint32_t w;
+       uint32_t h;
+       struct psb_ta_mem *ta_mem;
+       struct psb_hw_scene *hw_scene;
+       struct drm_buffer_object *hw_data;
+       uint32_t flags;
+       uint32_t clear_p_start;
+       uint32_t clear_num_pages;
+};
+
+struct psb_scene_entry {
+       struct list_head head;
+       struct psb_scene *scene;
+};
+
+struct psb_user_scene {
+       struct drm_device *dev;
+       struct drm_user_object user;
+};
+
+struct psb_ta_mem {
+       struct drm_device *dev;
+       struct drm_user_object user;
+       uint32_t ref_count;
+       uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
+       uint32_t bo_size;
+       struct drm_buffer_object *ta_memory;
+       struct drm_buffer_object *hw_data;
+       int is_deallocating;
+       int deallocating_scheduled;
+};
+
+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
+                                                  int shareable,
+                                                  uint32_t num_scenes,
+                                                  uint32_t w, uint32_t h);
+extern void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool);
+extern struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file
+                                                             *priv,
+                                                             uint32_t handle,
+                                                             int check_owner);
+extern int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
+                                  uint64_t mask, uint32_t hint, uint32_t w,
+                                  uint32_t h, int final_pass,
+                                  struct psb_scene **scene_p);
+extern void psb_scene_unref_devlocked(struct psb_scene **scene);
+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
+                                    void *data, struct drm_file *file_priv);
+
+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
+{
+       return pool->user.hash.key;
+}
+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
+                                          uint32_t pages);
+extern void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst,
+                                    struct psb_ta_mem *src);
+extern void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem);
+
+#endif
diff --git a/psb-kernel-source-4.41.1/psb_schedule.c b/psb-kernel-source-4.41.1/psb_schedule.c
new file mode 100644 (file)
index 0000000..959f8f9
--- /dev/null
@@ -0,0 +1,1465 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+
+#include "drmP.h"
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_scene.h"
+
+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20)
+#define PSB_RASTER_TIMEOUT (DRM_HZ / 2)
+#define PSB_TA_TIMEOUT (DRM_HZ / 5)
+
+#undef PSB_SOFTWARE_WORKAHEAD
+
+#ifdef PSB_STABLE_SETTING
+
+/*
+ * Software blocks completely while the engines are working so there can be no
+ * overlap.
+ */
+
+#define PSB_WAIT_FOR_RASTER_COMPLETION
+#define PSB_WAIT_FOR_TA_COMPLETION
+
+#elif defined(PSB_PARANOID_SETTING)
+/*
+ * Software blocks "almost" while the engines are working so there can be no
+ * overlap.
+ */
+
+#define PSB_WAIT_FOR_RASTER_COMPLETION
+#define PSB_WAIT_FOR_TA_COMPLETION
+#define PSB_BE_PARANOID
+
+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
+/*
+ * Software leaps ahead while the rasterizer is running and prepares
+ * a new ta job that can be scheduled before the rasterizer has
+ * finished.
+ */
+
+#define PSB_WAIT_FOR_TA_COMPLETION
+
+#elif defined(PSB_SOFTWARE_WORKAHEAD)
+/*
+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
+ * But block overlapping in the scheduler.
+ */
+
+#define PSB_BLOCK_OVERLAP
+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
+
+#endif
+
+/*
+ * Avoid pixelbe pagefaults on C0.
+ */
+#if 0
+#define PSB_BLOCK_OVERLAP
+#endif
+
+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
+                           struct psb_scheduler *scheduler,
+                           uint32_t reply_flag);
+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
+                               struct psb_scheduler *scheduler,
+                               uint32_t reply_flag);
+
+#ifdef FIX_TG_16
+
+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
+static int psb_2d_trylock(struct drm_psb_private *dev_priv);
+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
+
+#endif
+
+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
+                         int *lockup, int *idle)
+{
+       unsigned long irq_flags;
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+
+       *lockup = 0;
+       *idle = 1;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+
+       if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
+           time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
+               *lockup = 1;
+       }
+       if (!*lockup
+           && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
+           && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
+               *lockup = 1;
+       }
+       if (!*lockup)
+               *idle = scheduler->idle;
+
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+}
+
+static inline void psb_set_idle(struct psb_scheduler *scheduler)
+{
+       scheduler->idle =
+           (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
+           (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
+       if (scheduler->idle)
+               wake_up(&scheduler->idle_queue);
+}
+
+/*
+ * Call with the scheduler spinlock held.
+ * Assigns a scene context to either the ta or the rasterizer,
+ * flushing out other scenes to memory if necessary.
+ */
+
+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
+                             struct psb_scene *scene,
+                             int engine, struct psb_task *task)
+{
+       uint32_t flags = 0;
+       struct psb_hw_scene *hw_scene;
+       struct drm_device *dev = scene->dev;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+
+       hw_scene = scene->hw_scene;
+       if (hw_scene && hw_scene->last_scene == scene) {
+
+               /*
+                * Reuse the last hw scene context and delete it from the
+                * free list.
+                */
+
+               PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
+                                hw_scene->context_number);
+               if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
+
+                       /*
+                        * No hw context initialization to be done.
+                        */
+
+                       flags |= PSB_SCENE_FLAG_SETUP_ONLY;
+               }
+
+               list_del_init(&hw_scene->head);
+
+       } else {
+               struct list_head *list;
+               hw_scene = NULL;
+
+               /*
+                * Grab a new hw scene context.
+                */
+
+               list_for_each(list, &scheduler->hw_scenes) {
+                       hw_scene = list_entry(list, struct psb_hw_scene, head);
+                       break;
+               }
+               BUG_ON(!hw_scene);
+               PSB_DEBUG_RENDER("New hw scene %d.\n",
+                                hw_scene->context_number);
+
+               list_del_init(list);
+       }
+       scene->hw_scene = hw_scene;
+       hw_scene->last_scene = scene;
+
+       flags |= PSB_SCENE_FLAG_SETUP;
+
+       /*
+        * Switch context and setup the engine.
+        */
+
+       return psb_xhw_scene_bind_fire(dev_priv,
+                                      &task->buf,
+                                      task->flags,
+                                      hw_scene->context_number,
+                                      scene->hw_cookie,
+                                      task->oom_cmds,
+                                      task->oom_cmd_size,
+                                      scene->hw_data->offset,
+                                      engine, flags | scene->flags);
+}
+
+static inline void psb_report_fence(struct psb_scheduler *scheduler,
+                                   uint32_t class,
+                                   uint32_t sequence,
+                                   uint32_t type, int call_handler)
+{
+       struct psb_scheduler_seq *seq = &scheduler->seq[type];
+
+       seq->sequence = sequence;
+       seq->reported = 0;
+       if (call_handler)
+               psb_fence_handler(scheduler->dev, class);
+}
+
+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
+                               struct psb_scheduler *scheduler);
+
+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
+                           struct psb_scheduler *scheduler)
+{
+       struct psb_task *task = NULL;
+       struct list_head *list, *next;
+       int pushed_raster_task = 0;
+
+       PSB_DEBUG_RENDER("schedule ta\n");
+
+       if (scheduler->idle_count != 0)
+               return;
+
+       if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
+               return;
+
+       if (scheduler->ta_state)
+               return;
+
+       /*
+        * Skip the ta stage for rasterization-only
+        * tasks. They arrive here to make sure we're rasterizing
+        * tasks in the correct order.
+        */
+
+       list_for_each_safe(list, next, &scheduler->ta_queue) {
+               task = list_entry(list, struct psb_task, head);
+               if (task->task_type != psb_raster_task)
+                       break;
+
+               list_del_init(list);
+               list_add_tail(list, &scheduler->raster_queue);
+               psb_report_fence(scheduler, task->engine, task->sequence,
+                                _PSB_FENCE_TA_DONE_SHIFT, 1);
+               task = NULL;
+               pushed_raster_task = 1;
+       }
+
+       if (pushed_raster_task)
+               psb_schedule_raster(dev_priv, scheduler);
+
+       if (!task)
+               return;
+
+       /*
+        * Still waiting for a vistest?
+        */
+
+       if (scheduler->feedback_task == task)
+               return;
+
+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
+
+       /*
+        * Block ta from trying to use both hardware contexts
+        * without the rasterizer starting to render from one of them.
+        */
+
+       if (!list_empty(&scheduler->raster_queue)) {
+               return;
+       }
+#endif
+
+#ifdef PSB_BLOCK_OVERLAP
+       /*
+        * Make sure rasterizer isn't doing anything.
+        */
+       if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
+               return;
+#endif
+       if (list_empty(&scheduler->hw_scenes))
+               return;
+
+#ifdef FIX_TG_16
+       if (psb_check_2d_idle(dev_priv))
+               return;
+#endif
+
+       list_del_init(&task->head);
+       if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
+               scheduler->ta_state = 1;
+
+       scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
+       scheduler->idle = 0;
+       scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
+
+       task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
+           0x00000000 : PSB_RF_FIRE_TA;
+
+       (void)psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
+       psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA, task);
+       psb_schedule_watchdog(dev_priv);
+}
+
+static int psb_fire_raster(struct psb_scheduler *scheduler,
+                          struct psb_task *task)
+{
+       struct drm_device *dev = scheduler->dev;
+       struct drm_psb_private *dev_priv = (struct drm_psb_private *)
+           dev->dev_private;
+
+       PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
+
+       return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
+}
+
+/*
+ * Take the first rasterization task from the hp raster queue or from the
+ * raster queue and fire the rasterizer.
+ */
+
+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
+                               struct psb_scheduler *scheduler)
+{
+       struct psb_task *task;
+       struct list_head *list;
+
+       if (scheduler->idle_count != 0)
+               return;
+
+       if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
+               PSB_DEBUG_RENDER("Raster busy.\n");
+               return;
+       }
+/* #ifdef PSB_BLOCK_OVERLAP */
+#if 1
+       if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
+               PSB_DEBUG_RENDER("TA busy.\n");
+               return;
+       }
+#endif
+
+       if (!list_empty(&scheduler->hp_raster_queue))
+               list = scheduler->hp_raster_queue.next;
+       else if (!list_empty(&scheduler->raster_queue))
+               list = scheduler->raster_queue.next;
+       else {
+               PSB_DEBUG_RENDER("Nothing in list\n");
+               return;
+       }
+
+       task = list_entry(list, struct psb_task, head);
+
+       /*
+        * Sometimes changing ZLS format requires an ISP reset.
+        * Doesn't seem to consume too much time.
+        */
+
+       if (task->scene)
+               PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
+
+       scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
+
+       list_del_init(list);
+       scheduler->idle = 0;
+       scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
+       scheduler->total_raster_jiffies = 0;
+
+       if (task->scene)
+               PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+
+       (void)psb_reg_submit(dev_priv, task->raster_cmds,
+                            task->raster_cmd_size);
+
+       if (task->scene) {
+               task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
+                   0x00000000 : PSB_RF_FIRE_RASTER;
+               psb_set_scene_fire(scheduler,
+                                  task->scene, PSB_SCENE_ENGINE_RASTER, task);
+       } else {
+               task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
+               psb_fire_raster(scheduler, task);
+       }
+       psb_schedule_watchdog(dev_priv);
+}
+
+int psb_extend_raster_timeout(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+       int ret;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       scheduler->total_raster_jiffies +=
+           jiffies - scheduler->raster_end_jiffies + PSB_RASTER_TIMEOUT;
+       scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
+       ret = (scheduler->total_raster_jiffies > PSB_ALLOWED_RASTER_RUNTIME) ?
+           -EBUSY : 0;
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+       return ret;
+}
+
+/*
+ * TA done handler.
+ */
+
+static void psb_ta_done(struct drm_psb_private *dev_priv,
+                       struct psb_scheduler *scheduler)
+{
+       struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
+       struct psb_scene *scene = task->scene;
+
+       PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
+
+       switch (task->ta_complete_action) {
+       case PSB_RASTER_BLOCK:
+               scheduler->ta_state = 1;
+               scene->flags |=
+                   (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
+               list_add_tail(&task->head, &scheduler->raster_queue);
+               break;
+       case PSB_RASTER:
+               scene->flags |=
+                   (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
+               list_add_tail(&task->head, &scheduler->raster_queue);
+               break;
+       case PSB_RETURN:
+               scheduler->ta_state = 0;
+               scene->flags |= PSB_SCENE_FLAG_DIRTY;
+               list_add_tail(&scene->hw_scene->head, &scheduler->hw_scenes);
+
+               break;
+       }
+
+       scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
+
+#ifdef FIX_TG_16
+       psb_2d_atomic_unlock(dev_priv);
+#endif
+
+       if (task->ta_complete_action != PSB_RASTER_BLOCK)
+               psb_report_fence(scheduler, task->engine, task->sequence,
+                                _PSB_FENCE_TA_DONE_SHIFT, 1);
+
+       psb_schedule_raster(dev_priv, scheduler);
+       psb_schedule_ta(dev_priv, scheduler);
+       psb_set_idle(scheduler);
+
+       if (task->ta_complete_action != PSB_RETURN)
+               return;
+
+       list_add_tail(&task->head, &scheduler->task_done_queue);
+       schedule_delayed_work(&scheduler->wq, 1);
+}
+
+/*
+ * Rasterizer done handler.
+ */
+
+static void psb_raster_done(struct drm_psb_private *dev_priv,
+                           struct psb_scheduler *scheduler)
+{
+       struct psb_task *task =
+           scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
+       struct psb_scene *scene = task->scene;
+       uint32_t complete_action = task->raster_complete_action;
+
+       PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
+
+       scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
+
+       if (complete_action != PSB_RASTER)
+               psb_schedule_raster(dev_priv, scheduler);
+
+       if (scene) {
+               if (task->feedback.page) {
+                       if (unlikely(scheduler->feedback_task)) {
+                               /*
+                                * This should never happen, since the previous
+                                * feedback query will return before the next
+                                * raster task is fired.
+                                */
+                               DRM_ERROR("Feedback task busy.\n");
+                       }
+                       scheduler->feedback_task = task;
+                       psb_xhw_vistest(dev_priv, &task->buf);
+               }
+               switch (complete_action) {
+               case PSB_RETURN:
+                       scene->flags &=
+                           ~(PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
+                       list_add_tail(&scene->hw_scene->head,
+                                     &scheduler->hw_scenes);
+                       psb_report_fence(scheduler, task->engine,
+                                        task->sequence,
+                                        _PSB_FENCE_SCENE_DONE_SHIFT, 1);
+                       if (task->flags & PSB_FIRE_FLAG_XHW_OOM) {
+                               scheduler->ta_state = 0;
+                       }
+                       break;
+               case PSB_RASTER:
+                       list_add(&task->head, &scheduler->raster_queue);
+                       task->raster_complete_action = PSB_RETURN;
+                       psb_schedule_raster(dev_priv, scheduler);
+                       break;
+               case PSB_TA:
+                       list_add(&task->head, &scheduler->ta_queue);
+                       scheduler->ta_state = 0;
+                       task->raster_complete_action = PSB_RETURN;
+                       task->ta_complete_action = PSB_RASTER;
+                       break;
+
+               }
+       }
+       psb_schedule_ta(dev_priv, scheduler);
+       psb_set_idle(scheduler);
+
+       if (complete_action == PSB_RETURN) {
+               if (task->scene == NULL) {
+                       psb_report_fence(scheduler, task->engine,
+                                        task->sequence,
+                                        _PSB_FENCE_RASTER_DONE_SHIFT, 1);
+               }
+               if (!task->feedback.page) {
+                       list_add_tail(&task->head, &scheduler->task_done_queue);
+                       schedule_delayed_work(&scheduler->wq, 1);
+               }
+       }
+
+}
+
+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       scheduler->idle_count++;
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+}
+
+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       if (--scheduler->idle_count == 0) {
+               psb_schedule_ta(dev_priv, scheduler);
+               psb_schedule_raster(dev_priv, scheduler);
+       }
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+}
+
+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+       int ret;
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       ret = scheduler->idle_count != 0 && scheduler->idle;
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+       return ret;
+}
+
+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+       int ret;
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       ret = (scheduler->idle &&
+              list_empty(&scheduler->raster_queue) &&
+              list_empty(&scheduler->ta_queue) &&
+              list_empty(&scheduler->hp_raster_queue));
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+       return ret;
+}
+
+static void psb_ta_oom(struct drm_psb_private *dev_priv,
+                      struct psb_scheduler *scheduler)
+{
+
+       struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
+       if (!task)
+               return;
+
+       if (task->aborting)
+               return;
+       task->aborting = 1;
+
+       DRM_INFO("Info: TA out of parameter memory.\n");
+
+       (void)psb_xhw_ta_oom(dev_priv, &task->buf, task->scene->hw_cookie);
+}
+
+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
+                            struct psb_scheduler *scheduler)
+{
+
+       struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
+       uint32_t flags;
+       if (!task)
+               return;
+
+       psb_xhw_ta_oom_reply(dev_priv, &task->buf,
+                            task->scene->hw_cookie,
+                            &task->ta_complete_action,
+                            &task->raster_complete_action, &flags);
+       task->flags |= flags;
+       task->aborting = 0;
+       psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
+}
+
+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
+                                 struct psb_scheduler *scheduler)
+{
+       DRM_ERROR("TA hw scene freed.\n");
+}
+
+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
+                             struct psb_scheduler *scheduler)
+{
+       struct psb_task *task = scheduler->feedback_task;
+       uint8_t *feedback_map;
+       uint32_t add;
+       uint32_t cur;
+       struct drm_psb_vistest *vistest;
+       int i;
+
+       scheduler->feedback_task = NULL;
+       if (!task) {
+               DRM_ERROR("No Poulsbo feedback task.\n");
+               return;
+       }
+       if (!task->feedback.page) {
+               DRM_ERROR("No Poulsbo feedback page.\n");
+               goto out;
+       }
+
+       if (in_irq())
+               feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
+       else
+               feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
+
+       /*
+        * Loop over all requested vistest components here.
+        * Only one (vistest) currently.
+        */
+
+       vistest = (struct drm_psb_vistest *)
+           (feedback_map + task->feedback.offset);
+
+       for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
+               add = task->buf.arg.arg.feedback[i];
+               cur = vistest->vt[i];
+
+               /*
+                * Vistest saturates.
+                */
+
+               vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
+       }
+       if (in_irq())
+               kunmap_atomic(feedback_map, KM_IRQ0);
+       else
+               kunmap_atomic(feedback_map, KM_USER0);
+      out:
+       psb_report_fence(scheduler, task->engine, task->sequence,
+                        _PSB_FENCE_FEEDBACK_SHIFT, 1);
+
+       if (list_empty(&task->head)) {
+               list_add_tail(&task->head, &scheduler->task_done_queue);
+               schedule_delayed_work(&scheduler->wq, 1);
+       } else
+               psb_schedule_ta(dev_priv, scheduler);
+}
+
+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
+                             struct psb_scheduler *scheduler)
+{
+       struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
+
+       psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
+
+       psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
+}
+
+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
+                                 struct psb_scheduler *scheduler)
+{
+       struct psb_task *task =
+           scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
+       uint32_t reply_flags;
+
+       if (!task) {
+               DRM_ERROR("Null task.\n");
+               return;
+       }
+
+       task->raster_complete_action = task->buf.arg.arg.sb.rca;
+       psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
+
+       reply_flags = PSB_RF_FIRE_RASTER;
+       if (task->raster_complete_action == PSB_RASTER)
+               reply_flags |= PSB_RF_DEALLOC;
+
+       psb_dispatch_raster(dev_priv, scheduler, reply_flags);
+}
+
+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
+                             struct psb_scheduler *scheduler)
+{
+       uint32_t type;
+       int ret;
+       unsigned long irq_flags;
+
+       /*
+        * Xhw cannot write directly to the comm page, so
+        * do it here. Firmware would have written directly.
+        */
+
+       ret = psb_xhw_handler(dev_priv);
+       if (unlikely(ret))
+               return ret;
+
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       type = dev_priv->comm[PSB_COMM_USER_IRQ];
+       dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
+       if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
+               dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
+               DRM_ERROR("Lost Poulsbo hardware event.\n");
+       }
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+
+       if (type == 0)
+               return 0;
+
+       switch (type) {
+       case PSB_UIRQ_VISTEST:
+               psb_vistest_reply(dev_priv, scheduler);
+               break;
+       case PSB_UIRQ_OOM_REPLY:
+               psb_ta_oom_reply(dev_priv, scheduler);
+               break;
+       case PSB_UIRQ_FIRE_TA_REPLY:
+               psb_ta_fire_reply(dev_priv, scheduler);
+               break;
+       case PSB_UIRQ_FIRE_RASTER_REPLY:
+               psb_raster_fire_reply(dev_priv, scheduler);
+               break;
+       default:
+               DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
+       }
+       return 0;
+}
+
+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+       int ret;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       ret = psb_user_interrupt(dev_priv, scheduler);
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+       return ret;
+}
+
+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
+                           struct psb_scheduler *scheduler,
+                           uint32_t reply_flag)
+{
+       struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
+       uint32_t flags;
+       uint32_t mask;
+
+       task->reply_flags |= reply_flag;
+       flags = task->reply_flags;
+       mask = PSB_RF_FIRE_TA;
+
+       if (!(flags & mask))
+               return;
+
+       mask = PSB_RF_TA_DONE;
+       if ((flags & mask) == mask) {
+               task->reply_flags &= ~mask;
+               psb_ta_done(dev_priv, scheduler);
+       }
+
+       mask = PSB_RF_OOM;
+       if ((flags & mask) == mask) {
+               task->reply_flags &= ~mask;
+               psb_ta_oom(dev_priv, scheduler);
+       }
+
+       mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
+       if ((flags & mask) == mask) {
+               task->reply_flags &= ~mask;
+               psb_ta_done(dev_priv, scheduler);
+       }
+}
+
+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
+                               struct psb_scheduler *scheduler,
+                               uint32_t reply_flag)
+{
+       struct psb_task *task =
+           scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
+       uint32_t flags;
+       uint32_t mask;
+
+       task->reply_flags |= reply_flag;
+       flags = task->reply_flags;
+       mask = PSB_RF_FIRE_RASTER;
+
+       if (!(flags & mask))
+               return;
+
+       /*
+        * For rasterizer-only tasks, don't report fence done here,
+        * as this is time consuming and the rasterizer wants a new
+        * task immediately. For other tasks, the hardware is probably
+        * still busy deallocating TA memory, so we can report
+        * fence done in parallel.
+        */
+
+       if (task->raster_complete_action == PSB_RETURN &&
+           (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
+               psb_report_fence(scheduler, task->engine, task->sequence,
+                                _PSB_FENCE_RASTER_DONE_SHIFT, 1);
+       }
+
+       mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
+       if ((flags & mask) == mask) {
+               task->reply_flags &= ~mask;
+               psb_raster_done(dev_priv, scheduler);
+       }
+}
+
+void psb_scheduler_handler(struct drm_psb_private *dev_priv, uint32_t status)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+
+       spin_lock(&scheduler->lock);
+
+       if (status & _PSB_CE_PIXELBE_END_RENDER) {
+               psb_dispatch_raster(dev_priv, scheduler, PSB_RF_RASTER_DONE);
+       }
+       if (status & _PSB_CE_DPM_3D_MEM_FREE) {
+               psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
+       }
+       if (status & _PSB_CE_TA_FINISHED) {
+               psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
+       }
+       if (status & _PSB_CE_TA_TERMINATE) {
+               psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
+       }
+       if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
+                     _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
+                     _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
+               psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
+       }
+       if (status & _PSB_CE_DPM_TA_MEM_FREE) {
+               psb_ta_hw_scene_freed(dev_priv, scheduler);
+       }
+       if (status & _PSB_CE_SW_EVENT) {
+               psb_user_interrupt(dev_priv, scheduler);
+       }
+       spin_unlock(&scheduler->lock);
+}
+
+static void psb_free_task_wq(struct work_struct *work)
+{
+       struct psb_scheduler *scheduler =
+           container_of(work, struct psb_scheduler, wq.work);
+
+       struct drm_device *dev = scheduler->dev;
+       struct list_head *list, *next;
+       unsigned long irq_flags;
+       struct psb_task *task;
+
+       if (!mutex_trylock(&scheduler->task_wq_mutex))
+               return;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       list_for_each_safe(list, next, &scheduler->task_done_queue) {
+               task = list_entry(list, struct psb_task, head);
+               list_del_init(list);
+               spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+
+               PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
+                                "Feedback bo 0x%08lx, done %d\n",
+                                task->sequence, (unsigned long)task->scene,
+                                (unsigned long)task->feedback.bo,
+                                atomic_read(&task->buf.done));
+
+               if (task->scene) {
+                       mutex_lock(&dev->struct_mutex);
+                       PSB_DEBUG_RENDER("Unref scene %d\n", task->sequence);
+                       psb_scene_unref_devlocked(&task->scene);
+                       if (task->feedback.bo) {
+                               PSB_DEBUG_RENDER("Unref feedback bo %d\n",
+                                                task->sequence);
+                               drm_bo_usage_deref_locked(&task->feedback.bo);
+                       }
+                       mutex_unlock(&dev->struct_mutex);
+               }
+
+               if (atomic_read(&task->buf.done)) {
+                       PSB_DEBUG_RENDER("Deleting task %d\n", task->sequence);
+                       drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
+                       task = NULL;
+               }
+               spin_lock_irqsave(&scheduler->lock, irq_flags);
+               if (task != NULL)
+                       list_add(list, &scheduler->task_done_queue);
+       }
+       if (!list_empty(&scheduler->task_done_queue)) {
+               PSB_DEBUG_RENDER("Rescheduling wq\n");
+               schedule_delayed_work(&scheduler->wq, 1);
+       }
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+
+       mutex_unlock(&scheduler->task_wq_mutex);
+}
+
+/*
+ * Check if any of the tasks in the queues is using a scene.
+ * In that case we know the TA memory buffer objects are
+ * fenced and will not be evicted until that fence is signaled.
+ */
+
+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+       struct psb_task *task;
+       struct psb_task *next_task;
+
+       dev_priv->force_ta_mem_load = 1;
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       list_for_each_entry_safe(task, next_task, &scheduler->ta_queue, head) {
+               if (task->scene) {
+                       dev_priv->force_ta_mem_load = 0;
+                       break;
+               }
+       }
+       list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
+                                head) {
+               if (task->scene) {
+                       dev_priv->force_ta_mem_load = 0;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+}
+
+void psb_scheduler_reset(struct drm_psb_private *dev_priv, int error_condition)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long wait_jiffies;
+       unsigned long cur_jiffies;
+       struct psb_task *task;
+       struct psb_task *next_task;
+       unsigned long irq_flags;
+
+       psb_scheduler_pause(dev_priv);
+       if (!psb_scheduler_idle(dev_priv)) {
+               spin_lock_irqsave(&scheduler->lock, irq_flags);
+
+               cur_jiffies = jiffies;
+               wait_jiffies = cur_jiffies;
+               if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
+                   time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
+                       wait_jiffies = scheduler->ta_end_jiffies;
+               if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
+                   time_after_eq(scheduler->raster_end_jiffies, wait_jiffies))
+                       wait_jiffies = scheduler->raster_end_jiffies;
+
+               wait_jiffies -= cur_jiffies;
+               spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+
+               (void)wait_event_timeout(scheduler->idle_queue,
+                                        psb_scheduler_idle(dev_priv),
+                                        wait_jiffies);
+       }
+
+       if (!psb_scheduler_idle(dev_priv)) {
+               spin_lock_irqsave(&scheduler->lock, irq_flags);
+               task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
+               if (task) {
+                       DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
+                       if (task->engine == PSB_ENGINE_HPRAST) {
+                               psb_fence_error(scheduler->dev,
+                                               PSB_ENGINE_HPRAST,
+                                               task->sequence,
+                                               _PSB_FENCE_TYPE_RASTER_DONE,
+                                               error_condition);
+
+                               list_del(&task->head);
+                               psb_xhw_clean_buf(dev_priv, &task->buf);
+                               list_add_tail(&task->head,
+                                             &scheduler->task_done_queue);
+                       } else {
+                               list_add(&task->head, &scheduler->raster_queue);
+                       }
+               }
+               scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
+               task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
+               if (task) {
+                       DRM_ERROR("Detected Poulsbo ta lockup.\n");
+                       list_add_tail(&task->head, &scheduler->raster_queue);
+#ifdef FIX_TG_16
+                       psb_2d_atomic_unlock(dev_priv);
+#endif
+               }
+               scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
+               scheduler->ta_state = 0;
+
+#ifdef FIX_TG_16
+               atomic_set(&dev_priv->ta_wait_2d, 0);
+               atomic_set(&dev_priv->ta_wait_2d_irq, 0);
+               wake_up(&dev_priv->queue_2d);
+#endif
+               spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+       }
+
+       /*
+        * Empty raster queue.
+        */
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
+                                head) {
+               struct psb_scene *scene = task->scene;
+
+               psb_fence_error(scheduler->dev,
+                               task->engine,
+                               task->sequence,
+                               _PSB_FENCE_TYPE_TA_DONE |
+                               _PSB_FENCE_TYPE_RASTER_DONE |
+                               _PSB_FENCE_TYPE_SCENE_DONE |
+                               _PSB_FENCE_TYPE_FEEDBACK, error_condition);
+               if (scene) {
+                       scene->flags = 0;
+                       if (scene->hw_scene) {
+                               list_add_tail(&scene->hw_scene->head,
+                                             &scheduler->hw_scenes);
+                               scene->hw_scene = NULL;
+                       }
+               }
+
+               psb_xhw_clean_buf(dev_priv, &task->buf);
+               list_del(&task->head);
+               list_add_tail(&task->head, &scheduler->task_done_queue);
+       }
+
+       schedule_delayed_work(&scheduler->wq, 1);
+       scheduler->idle = 1;
+       wake_up(&scheduler->idle_queue);
+
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+       psb_scheduler_restart(dev_priv);
+
+}
+
+int psb_scheduler_init(struct drm_device *dev, struct psb_scheduler *scheduler)
+{
+       struct psb_hw_scene *hw_scene;
+       int i;
+
+       memset(scheduler, 0, sizeof(*scheduler));
+       scheduler->dev = dev;
+       mutex_init(&scheduler->task_wq_mutex);
+       scheduler->lock = SPIN_LOCK_UNLOCKED;
+       scheduler->idle = 1;
+
+       INIT_LIST_HEAD(&scheduler->ta_queue);
+       INIT_LIST_HEAD(&scheduler->raster_queue);
+       INIT_LIST_HEAD(&scheduler->hp_raster_queue);
+       INIT_LIST_HEAD(&scheduler->hw_scenes);
+       INIT_LIST_HEAD(&scheduler->task_done_queue);
+       INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
+       init_waitqueue_head(&scheduler->idle_queue);
+
+       for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
+               hw_scene = &scheduler->hs[i];
+               hw_scene->context_number = i;
+               list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
+       }
+
+       for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
+               scheduler->seq[i].reported = 0;
+       }
+
+       return 0;
+}
+
+/*
+ * Scene references maintained by the scheduler are not refcounted.
+ * Remove all references to a particular scene here.
+ */
+
+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)scene->dev->dev_private;
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       struct psb_hw_scene *hw_scene;
+       unsigned long irq_flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
+               hw_scene = &scheduler->hs[i];
+               if (hw_scene->last_scene == scene) {
+                       BUG_ON(list_empty(&hw_scene->head));
+                       hw_scene->last_scene = NULL;
+               }
+       }
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+}
+
+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
+{
+       flush_scheduled_work();
+}
+
+static int psb_setup_task_devlocked(struct drm_device *dev,
+                                   struct drm_psb_cmdbuf_arg *arg,
+                                   struct drm_buffer_object *raster_cmd_buffer,
+                                   struct drm_buffer_object *ta_cmd_buffer,
+                                   struct drm_buffer_object *oom_cmd_buffer,
+                                   struct psb_scene *scene,
+                                   enum psb_task_type task_type,
+                                   uint32_t engine,
+                                   uint32_t flags, struct psb_task **task_p)
+{
+       struct psb_task *task;
+       int ret;
+
+       if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
+               DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
+               return -EINVAL;
+       }
+       if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
+               DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
+               return -EINVAL;
+       }
+       if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
+               DRM_ERROR("Too many raster cmds %d.\n", arg->oom_size);
+               return -EINVAL;
+       }
+
+       task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
+       if (!task)
+               return -ENOMEM;
+
+       atomic_set(&task->buf.done, 1);
+       task->engine = engine;
+       INIT_LIST_HEAD(&task->head);
+       INIT_LIST_HEAD(&task->buf.head);
+       if (ta_cmd_buffer && arg->ta_size != 0) {
+               task->ta_cmd_size = arg->ta_size;
+               ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
+                                            arg->ta_offset,
+                                            arg->ta_size,
+                                            PSB_ENGINE_TA, task->ta_cmds);
+               if (ret)
+                       goto out_err;
+       }
+       if (raster_cmd_buffer) {
+               task->raster_cmd_size = arg->cmdbuf_size;
+               ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
+                                            arg->cmdbuf_offset,
+                                            arg->cmdbuf_size,
+                                            PSB_ENGINE_TA, task->raster_cmds);
+               if (ret)
+                       goto out_err;
+       }
+       if (oom_cmd_buffer && arg->oom_size != 0) {
+               task->oom_cmd_size = arg->oom_size;
+               ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
+                                            arg->oom_offset,
+                                            arg->oom_size,
+                                            PSB_ENGINE_TA, task->oom_cmds);
+               if (ret)
+                       goto out_err;
+       }
+       task->task_type = task_type;
+       task->flags = flags;
+       if (scene)
+               task->scene = psb_scene_ref(scene);
+
+#ifdef PSB_DETEAR
+       if(PSB_VIDEO_BLIT == arg->sVideoInfo.flag) {
+               task->bVideoFlag = PSB_VIDEO_BLIT;
+               task->x = arg->sVideoInfo.x;
+               task->y = arg->sVideoInfo.y;
+               task->w = arg->sVideoInfo.w;
+               task->h = arg->sVideoInfo.h;
+               task->pFBBOHandle = arg->sVideoInfo.pFBBOHandle;
+               task->pFBVirtAddr = arg->sVideoInfo.pFBVirtAddr; 
+       }
+#endif
+
+       *task_p = task;
+       return 0;
+      out_err:
+       drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
+       *task_p = NULL;
+       return ret;
+}
+
+int psb_cmdbuf_ta(struct drm_file *priv,
+                 struct drm_psb_cmdbuf_arg *arg,
+                 struct drm_buffer_object *cmd_buffer,
+                 struct drm_buffer_object *ta_buffer,
+                 struct drm_buffer_object *oom_buffer,
+                 struct psb_scene *scene,
+                 struct psb_feedback_info *feedback,
+                 struct drm_fence_arg *fence_arg)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_fence_object *fence = NULL;
+       struct psb_task *task = NULL;
+       int ret;
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+
+       PSB_DEBUG_RENDER("Cmdbuf ta\n");
+
+       ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
+       if (ret)
+               return -EAGAIN;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, ta_buffer,
+                                      oom_buffer, scene,
+                                      psb_ta_task, PSB_ENGINE_TA,
+                                      PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (ret)
+               goto out_err;
+
+       task->feedback = *feedback;
+
+       /*
+        * Hand the task over to the scheduler.
+        */
+
+       spin_lock_irq(&scheduler->lock);
+       task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
+
+       task->ta_complete_action = PSB_RASTER;
+       task->raster_complete_action = PSB_RETURN;
+
+       list_add_tail(&task->head, &scheduler->ta_queue);
+       PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
+
+       psb_schedule_ta(dev_priv, scheduler);
+       spin_unlock_irq(&scheduler->lock);
+
+       psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
+       drm_regs_fence(&dev_priv->use_manager, fence);
+       if (fence) {
+               spin_lock_irq(&scheduler->lock);
+               psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
+               spin_unlock_irq(&scheduler->lock);
+               fence_arg->signaled |= DRM_FENCE_TYPE_EXE;
+       }
+
+      out_err:
+       if (ret && ret != -EAGAIN)
+               DRM_ERROR("TA task queue job failed.\n");
+
+       if (fence) {
+#ifdef PSB_WAIT_FOR_TA_COMPLETION
+               drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
+                                     _PSB_FENCE_TYPE_TA_DONE);
+#ifdef PSB_BE_PARANOID
+               drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
+                                     _PSB_FENCE_TYPE_SCENE_DONE);
+#endif
+#endif
+               drm_fence_usage_deref_unlocked(&fence);
+       }
+       mutex_unlock(&dev_priv->reset_mutex);
+
+       return ret;
+}
+
+int psb_cmdbuf_raster(struct drm_file *priv,
+                     struct drm_psb_cmdbuf_arg *arg,
+                     struct drm_buffer_object *cmd_buffer,
+                     struct drm_fence_arg *fence_arg)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_fence_object *fence = NULL;
+       struct psb_task *task = NULL;
+       int ret;
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+
+       uint32_t sequence_temp;
+
+       PSB_DEBUG_RENDER("Cmdbuf Raster\n");
+
+       ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
+       if (ret)
+               return -EAGAIN;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, NULL, NULL,
+                                      NULL, psb_raster_task,
+                                      PSB_ENGINE_TA, 0, &task);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (ret)
+               goto out_err;
+
+       /*
+        * Hand the task over to the scheduler.
+        */
+
+       spin_lock_irq(&scheduler->lock);
+       task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
+       sequence_temp = task->sequence; // backup for the change
+       task->ta_complete_action = PSB_RASTER;
+       task->raster_complete_action = PSB_RETURN;
+
+       list_add_tail(&task->head, &scheduler->ta_queue);
+       PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
+       psb_schedule_ta(dev_priv, scheduler);
+       spin_unlock_irq(&scheduler->lock);
+
+       psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
+       drm_regs_fence(&dev_priv->use_manager, fence);
+       if (fence) {
+               spin_lock_irq(&scheduler->lock);
+               psb_report_fence(scheduler, PSB_ENGINE_TA, sequence_temp, 0, 1);
+               spin_unlock_irq(&scheduler->lock);
+               fence_arg->signaled |= DRM_FENCE_TYPE_EXE;
+       }
+      out_err:
+       if (ret && ret != -EAGAIN)
+               DRM_ERROR("Raster task queue job failed.\n");
+
+       if (fence) {
+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
+               drm_fence_object_wait(fence, 1, 1, fence->type);
+#endif
+               drm_fence_usage_deref_unlocked(&fence);
+       }
+
+       mutex_unlock(&dev_priv->reset_mutex);
+
+       return ret;
+}
+
+#ifdef FIX_TG_16
+
+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
+{
+       if (psb_2d_trylock(dev_priv)) {
+               if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+                   !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+                      _PSB_C2B_STATUS_BUSY))) {
+                       return 0;
+               }
+               if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
+                       psb_2D_irq_on(dev_priv);
+
+               PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
+               PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
+               (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
+
+               psb_2d_atomic_unlock(dev_priv);
+       }
+
+       atomic_set(&dev_priv->ta_wait_2d, 1);
+       return -EBUSY;
+}
+
+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+
+       if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
+               psb_schedule_ta(dev_priv, scheduler);
+               if (atomic_read(&dev_priv->waiters_2d) != 0)
+                       wake_up(&dev_priv->queue_2d);
+       }
+}
+
+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
+               atomic_set(&dev_priv->ta_wait_2d, 0);
+               psb_2D_irq_off(dev_priv);
+               psb_schedule_ta(dev_priv, scheduler);
+               if (atomic_read(&dev_priv->waiters_2d) != 0)
+                       wake_up(&dev_priv->queue_2d);
+       }
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+}
+
+/*
+ * 2D locking functions. Can't use a mutex since the trylock() and
+ * unlock() methods need to be accessible from interrupt context.
+ */
+
+static int psb_2d_trylock(struct drm_psb_private *dev_priv)
+{
+       return (atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0);
+}
+
+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
+{
+       atomic_set(&dev_priv->lock_2d, 0);
+       if (atomic_read(&dev_priv->waiters_2d) != 0)
+               wake_up(&dev_priv->queue_2d);
+}
+
+void psb_2d_unlock(struct drm_psb_private *dev_priv)
+{
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&scheduler->lock, irq_flags);
+       psb_2d_atomic_unlock(dev_priv);
+       if (atomic_read(&dev_priv->ta_wait_2d) != 0)
+               psb_atomic_resume_ta_2d_idle(dev_priv);
+       spin_unlock_irqrestore(&scheduler->lock, irq_flags);
+}
+
+void psb_2d_lock(struct drm_psb_private *dev_priv)
+{
+       atomic_inc(&dev_priv->waiters_2d);
+       wait_event(dev_priv->queue_2d, atomic_read(&dev_priv->ta_wait_2d) == 0);
+       wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
+       atomic_dec(&dev_priv->waiters_2d);
+}
+
+#endif
diff --git a/psb-kernel-source-4.41.1/psb_schedule.h b/psb-kernel-source-4.41.1/psb_schedule.h
new file mode 100644 (file)
index 0000000..b8bb428
--- /dev/null
@@ -0,0 +1,177 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+
+#ifndef _PSB_SCHEDULE_H_
+#define _PSB_SCHEDULE_H_
+
+#include "drmP.h"
+
+enum psb_task_type {
+       psb_ta_midscene_task,
+       psb_ta_task,
+       psb_raster_task,
+       psb_freescene_task
+};
+
+#define PSB_MAX_TA_CMDS 60
+#define PSB_MAX_RASTER_CMDS 60
+#define PSB_MAX_OOM_CMDS 6
+
+struct psb_xhw_buf {
+       struct list_head head;
+       int copy_back;
+       atomic_t done;
+       struct drm_psb_xhw_arg arg;
+
+};
+
+struct psb_feedback_info {
+       struct drm_buffer_object *bo;
+       struct page *page;
+       uint32_t offset;
+};
+
+struct psb_task {
+       struct list_head head;
+       struct psb_scene *scene;
+       struct psb_feedback_info feedback;
+       enum psb_task_type task_type;
+       uint32_t engine;
+       uint32_t sequence;
+       uint32_t ta_cmds[PSB_MAX_TA_CMDS];
+       uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
+       uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
+       uint32_t ta_cmd_size;
+       uint32_t raster_cmd_size;
+       uint32_t oom_cmd_size;
+       uint32_t feedback_offset;
+       uint32_t ta_complete_action;
+       uint32_t raster_complete_action;
+       uint32_t hw_cookie;
+       uint32_t flags;
+       uint32_t reply_flags;
+       uint32_t aborting;
+       struct psb_xhw_buf buf;
+
+#ifdef PSB_DETEAR
+       uint32_t bVideoFlag;
+       uint32_t x, y, w, h;
+       uint32_t pFBBOHandle;
+       void *pFBVirtAddr;
+#endif
+};
+
+struct psb_hw_scene {
+       struct list_head head;
+       uint32_t context_number;
+
+       /*
+        * This pointer does not refcount the last_scene_buffer,
+        * so we must make sure it is set to NULL before destroying
+        * the corresponding task.
+        */
+
+       struct psb_scene *last_scene;
+};
+
+struct psb_scene;
+struct drm_psb_private;
+
+struct psb_scheduler_seq {
+       uint32_t sequence;
+       int reported;
+};
+
+struct psb_scheduler {
+       struct drm_device *dev;
+       struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
+       struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
+       struct mutex task_wq_mutex;
+       spinlock_t lock;
+       struct list_head hw_scenes;
+       struct list_head ta_queue;
+       struct list_head raster_queue;
+       struct list_head hp_raster_queue;
+       struct list_head task_done_queue;
+       struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
+       struct psb_task *feedback_task;
+       int ta_state;
+       struct psb_hw_scene *pending_hw_scene;
+       uint32_t pending_hw_scene_seq;
+       struct delayed_work wq;
+       struct psb_scene_pool *pool;
+       uint32_t idle_count;
+       int idle;
+       wait_queue_head_t idle_queue;
+       unsigned long ta_end_jiffies;
+       unsigned long raster_end_jiffies;
+       unsigned long total_raster_jiffies;
+};
+
+#define PSB_RF_FIRE_TA       (1 << 0)
+#define PSB_RF_OOM           (1 << 1)
+#define PSB_RF_OOM_REPLY     (1 << 2)
+#define PSB_RF_TERMINATE     (1 << 3)
+#define PSB_RF_TA_DONE       (1 << 4)
+#define PSB_RF_FIRE_RASTER   (1 << 5)
+#define PSB_RF_RASTER_DONE   (1 << 6)
+#define PSB_RF_DEALLOC       (1 << 7)
+
+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
+                                                  int shareable, uint32_t w,
+                                                  uint32_t h);
+extern uint32_t psb_scene_handle(struct psb_scene *scene);
+extern int psb_scheduler_init(struct drm_device *dev,
+                             struct psb_scheduler *scheduler);
+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
+extern int psb_cmdbuf_ta(struct drm_file *priv,
+                        struct drm_psb_cmdbuf_arg *arg,
+                        struct drm_buffer_object *cmd_buffer,
+                        struct drm_buffer_object *ta_buffer,
+                        struct drm_buffer_object *oom_buffer,
+                        struct psb_scene *scene,
+                        struct psb_feedback_info *feedback,
+                        struct drm_fence_arg *fence_arg);
+extern int psb_cmdbuf_raster(struct drm_file *priv,
+                            struct drm_psb_cmdbuf_arg *arg,
+                            struct drm_buffer_object *cmd_buffer,
+                            struct drm_fence_arg *fence_arg);
+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
+                                 uint32_t status);
+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
+
+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
+                                int *lockup, int *idle);
+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
+                               int error_condition);
+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
+extern int psb_extend_raster_timeout(struct drm_psb_private *dev_priv);
+
+#endif
diff --git a/psb-kernel-source-4.41.1/psb_setup.c b/psb-kernel-source-4.41.1/psb_setup.c
new file mode 100644 (file)
index 0000000..c480e0e
--- /dev/null
@@ -0,0 +1,17 @@
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "psb_drv.h"
+#include "i915_reg.h"
+#include "intel_crt.c"
+
+/* Fixed name */
+#define ACPI_EDID_LCD  "\\_SB_.PCI0.GFX0.DD04._DDC"
+#define ACPI_DOD       "\\_SB_.PCI0.GFX0._DOD"
+
+#include "intel_lvds.c"
+#include "intel_sdvo.c"
+#include "intel_display.c"
+#include "intel_modes.c"
diff --git a/psb-kernel-source-4.41.1/psb_sgx.c b/psb-kernel-source-4.41.1/psb_sgx.c
new file mode 100644 (file)
index 0000000..3027113
--- /dev/null
@@ -0,0 +1,1454 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include "drmP.h"
+#include "psb_drv.h"
+#include "psb_drm.h"
+#include "psb_reg.h"
+#include "psb_scene.h"
+#include "psb_detear.h"
+
+#include "psb_msvdx.h"
+
+int psb_submit_video_cmdbuf(struct drm_device *dev,
+                           struct drm_buffer_object *cmd_buffer,
+                           unsigned long cmd_offset, unsigned long cmd_size,
+                           struct drm_fence_object *fence);
+
+struct psb_dstbuf_cache {
+       unsigned int dst;
+       uint32_t *use_page;
+       unsigned int use_index;
+       uint32_t use_background;
+       struct drm_buffer_object *dst_buf;
+       unsigned long dst_offset;
+       uint32_t *dst_page;
+       unsigned int dst_page_offset;
+       struct drm_bo_kmap_obj dst_kmap;
+       int dst_is_iomem;
+};
+
+struct psb_buflist_item {
+       struct drm_buffer_object *bo;
+       void __user *data;
+       int ret;
+       int presumed_offset_correct;
+};
+
+
+#define PSB_REG_GRAN_SHIFT 2
+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
+#define PSB_MAX_REG 0x1000
+
+static const uint32_t disallowed_ranges[][2] = {
+       {0x0000, 0x0200},
+       {0x0208, 0x0214},
+       {0x021C, 0x0224},
+       {0x0230, 0x0234},
+       {0x0248, 0x024C},
+       {0x0254, 0x0358},
+       {0x0428, 0x0428},
+       {0x0430, 0x043C},
+       {0x0498, 0x04B4},
+       {0x04CC, 0x04D8},
+       {0x04E0, 0x07FC},
+       {0x0804, 0x0A58},
+       {0x0A68, 0x0A80},
+       {0x0AA0, 0x0B1C},
+       {0x0B2C, 0x0CAC},
+       {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
+};
+
+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
+                                   (PSB_REG_GRANULARITY *
+                                    (sizeof(uint32_t) << 3))];
+
+static inline int psb_disallowed(uint32_t reg)
+{
+       reg >>= PSB_REG_GRAN_SHIFT;
+       return ((psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0);
+}
+
+void psb_init_disallowed(void)
+{
+       int i;
+       uint32_t reg, tmp;
+       static int initialized = 0;
+
+       if (initialized)
+               return;
+
+       initialized = 1;
+       memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
+
+       for (i = 0; i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
+            ++i) {
+               for (reg = disallowed_ranges[i][0];
+                    reg <= disallowed_ranges[i][1]; reg += 4) {
+                       tmp = reg >> 2;
+                       psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
+               }
+       }
+}
+
+static int psb_memcpy_check(uint32_t * dst, const uint32_t * src, uint32_t size)
+{
+       size >>= 3;
+       while (size--) {
+               if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
+                       DRM_ERROR("Forbidden SGX register access: "
+                                 "0x%04x.\n", *src);
+                       return -EPERM;
+               }
+               *dst++ = *src++;
+               *dst++ = *src++;
+       }
+       return 0;
+}
+
+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
+                                unsigned size)
+{
+       uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+       int ret = 0;
+
+      retry:
+       if (avail < size) {
+#if 0
+               /* We'd ideally
+                * like to have an IRQ-driven event here.
+                */
+
+               psb_2D_irq_on(dev_priv);
+               DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
+                           ((avail = PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
+               psb_2D_irq_off(dev_priv);
+               if (ret == 0)
+                       return 0;
+               if (ret == -EINTR) {
+                       ret = 0;
+                       goto retry;
+               }
+#else
+               avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+               goto retry;
+#endif
+       }
+       return ret;
+}
+
+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t * cmdbuf,
+                 unsigned size)
+{
+       int ret = 0;
+       int i;
+       unsigned submit_size;
+
+       while (size > 0) {
+               submit_size = (size < 0x60) ? size : 0x60;
+               size -= submit_size;
+               ret = psb_2d_wait_available(dev_priv, submit_size);
+               if (ret)
+                       return ret;
+
+               submit_size <<= 2;
+
+#ifdef PSB_DETEAR
+               /* delayed 2D blit tasks are not executed right now,
+                  let's save a copy of the task */
+               if(dev_priv->blit_2d) {
+                       /* FIXME: should use better approach other
+                          than the dev_priv->blit_2d to distinguish
+                          delayed 2D blit tasks */
+                       dev_priv->blit_2d = 0; 
+                       memcpy(psb_blit_info.cmdbuf, cmdbuf, 10*4);
+               } else
+#endif /* PSB_DETEAR */
+               {
+                       for (i = 0; i < submit_size; i += 4) {
+                               PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+                       }
+                       (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+               }
+       }
+       return 0;
+}
+
+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
+{
+       uint32_t buffer[8];
+       uint32_t *bufp = buffer;
+       int ret;
+
+       *bufp++ = PSB_2D_FENCE_BH;
+
+       *bufp++ = PSB_2D_DST_SURF_BH |
+           PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
+       *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
+
+       *bufp++ = PSB_2D_BLIT_BH |
+           PSB_2D_ROT_NONE |
+           PSB_2D_COPYORDER_TL2BR |
+           PSB_2D_DSTCK_DISABLE |
+           PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
+
+       *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
+       *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
+           (0 << PSB_2D_DST_YSTART_SHIFT);
+       *bufp++ = (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
+
+       *bufp++ = PSB_2D_FLUSH_BH;
+
+       psb_2d_lock(dev_priv);
+       ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
+       psb_2d_unlock(dev_priv);
+
+       if (!ret)
+               psb_schedule_watchdog(dev_priv);
+       return ret;
+}
+
+int psb_emit_2d_copy_blit(struct drm_device *dev,
+                         uint32_t src_offset,
+                         uint32_t dst_offset, uint32_t pages, int direction)
+{
+       uint32_t cur_pages;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       uint32_t buf[10];
+       uint32_t *bufp;
+       uint32_t xstart;
+       uint32_t ystart;
+       uint32_t blit_cmd;
+       uint32_t pg_add;
+       int ret = 0;
+
+       if (!dev_priv)
+               return 0;
+
+       if (direction) {
+               pg_add = (pages - 1) << PAGE_SHIFT;
+               src_offset += pg_add;
+               dst_offset += pg_add;
+       }
+
+       blit_cmd = PSB_2D_BLIT_BH |
+           PSB_2D_ROT_NONE |
+           PSB_2D_DSTCK_DISABLE |
+           PSB_2D_SRCCK_DISABLE |
+           PSB_2D_USE_PAT |
+           PSB_2D_ROP3_SRCCOPY |
+           (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
+       xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
+
+       psb_2d_lock(dev_priv);
+       while (pages > 0) {
+               cur_pages = pages;
+               if (cur_pages > 2048)
+                       cur_pages = 2048;
+               pages -= cur_pages;
+               ystart = (direction) ? cur_pages - 1 : 0;
+
+               bufp = buf;
+               *bufp++ = PSB_2D_FENCE_BH;
+
+               *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
+                   (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
+               *bufp++ = dst_offset;
+               *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
+                   (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
+               *bufp++ = src_offset;
+               *bufp++ =
+                   PSB_2D_SRC_OFF_BH | (xstart << PSB_2D_SRCOFF_XSTART_SHIFT) |
+                   (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
+               *bufp++ = blit_cmd;
+               *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
+                   (ystart << PSB_2D_DST_YSTART_SHIFT);
+               *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
+                   (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
+
+               ret = psb_2d_submit(dev_priv, buf, bufp - buf);
+               if (ret)
+                       goto out;
+               pg_add = (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
+               src_offset += pg_add;
+               dst_offset += pg_add;
+       }
+      out:
+       psb_2d_unlock(dev_priv);
+       return ret;
+}
+
+void psb_init_2d(struct drm_psb_private *dev_priv)
+{
+       dev_priv->sequence_lock = SPIN_LOCK_UNLOCKED;
+       psb_reset(dev_priv, 1);
+       dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
+       PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
+       (void)PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
+}
+
+int psb_idle_2d(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long _end = jiffies + DRM_HZ;
+       int busy = 0;
+
+       /*
+        * First idle the 2D engine.
+        */
+
+       if (dev_priv->engine_lockup_2d)
+               return -EBUSY;
+
+       if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+           ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+               goto out;
+
+       do {
+               busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+       } while (busy && !time_after_eq(jiffies, _end));
+
+       if (busy)
+               busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+       if (busy)
+               goto out;
+
+       do {
+               busy =
+                   ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
+                    != 0);
+       } while (busy && !time_after_eq(jiffies, _end));
+       if (busy)
+               busy =
+                   ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
+                    != 0);
+
+      out:
+       if (busy)
+               dev_priv->engine_lockup_2d = 1;
+
+       return (busy) ? -EBUSY : 0;
+}
+
+int psb_idle_3d(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_scheduler *scheduler = &dev_priv->scheduler;
+       int ret;
+
+       ret = wait_event_timeout(scheduler->idle_queue,
+                                psb_scheduler_finished(dev_priv), DRM_HZ * 10);
+
+       return (ret < 1) ? -EBUSY : 0;
+}
+
+static void psb_dereference_buffers_locked(struct psb_buflist_item *buffers,
+                                          unsigned num_buffers)
+{
+       while (num_buffers--)
+               drm_bo_usage_deref_locked(&((buffers++)->bo));
+
+}
+
+static int psb_check_presumed(struct drm_bo_op_arg *arg,
+                             struct drm_buffer_object *bo,
+                             uint32_t __user * data, int *presumed_ok)
+{
+       struct drm_bo_op_req *req = &arg->d.req;
+       uint32_t hint_offset;
+       uint32_t hint = req->bo_req.hint;
+
+       *presumed_ok = 0;
+
+       if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
+               return 0;
+       if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
+               *presumed_ok = 1;
+               return 0;
+       }
+       if (bo->offset == req->bo_req.presumed_offset) {
+               *presumed_ok = 1;
+               return 0;
+       }
+
+       /*
+        * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
+        * the user-space IOCTL argument list, since the buffer has moved,
+        * we're about to apply relocations and we might subsequently
+        * hit an -EAGAIN. In that case the argument list will be reused by
+        * user-space, but the presumed offset is no longer valid.
+        *
+        * Needless to say, this is a bit ugly.
+        */
+
+       hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
+       hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
+       return __put_user(hint, data + hint_offset);
+}
+
+static int psb_validate_buffer_list(struct drm_file *file_priv,
+                                   unsigned fence_class,
+                                   unsigned long data,
+                                   struct psb_buflist_item *buffers,
+                                   unsigned *num_buffers)
+{
+       struct drm_bo_op_arg arg;
+       struct drm_bo_op_req *req = &arg.d.req;
+       int ret = 0;
+       unsigned buf_count = 0;
+       struct psb_buflist_item *item = buffers;
+
+       do {
+               if (buf_count >= *num_buffers) {
+                       DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
+                       ret = -EINVAL;
+                       goto out_err;
+               }
+               item = buffers + buf_count;
+               item->bo = NULL;
+
+               if (copy_from_user(&arg, (void __user *)data, sizeof(arg))) {
+                       ret = -EFAULT;
+                       DRM_ERROR("Error copying validate list.\n"
+                                 "\tbuffer %d, user addr 0x%08lx %d\n",
+                                 buf_count, (unsigned long)data, sizeof(arg));
+                       goto out_err;
+               }
+
+               ret = 0;
+               if (req->op != drm_bo_validate) {
+                       DRM_ERROR
+                           ("Buffer object operation wasn't \"validate\".\n");
+                       ret = -EINVAL;
+                       goto out_err;
+               }
+
+               item->ret = 0;
+               item->data = (void *)__user data;
+               ret = drm_bo_handle_validate(file_priv,
+                                            req->bo_req.handle,
+                                            fence_class,
+                                            req->bo_req.flags,
+                                            req->bo_req.mask,
+                                            req->bo_req.hint,
+                                            0, NULL, &item->bo);
+               if (ret)
+                       goto out_err;
+
+               PSB_DEBUG_GENERAL("Validated buffer at 0x%08lx\n",
+                                 buffers[buf_count].bo->offset);
+
+               buf_count++;
+
+
+               ret = psb_check_presumed(&arg, item->bo,
+                                        (uint32_t __user *)
+                                        (unsigned long) data,
+                                        &item->presumed_offset_correct);
+
+               if (ret)
+                       goto out_err;
+
+               data = arg.next;
+       } while (data);
+
+       *num_buffers = buf_count;
+
+       return 0;
+      out_err:
+
+       *num_buffers = buf_count;
+       item->ret = (ret != -EAGAIN) ? ret : 0;
+       return ret;
+}
+
+int
+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
+              unsigned int cmds)
+{
+       int i;
+
+       /*
+        * cmds is 32-bit words.
+        */
+
+       cmds >>= 1;
+       for (i = 0; i < cmds; ++i) {
+               PSB_WSGX32(regs[1], regs[0]);
+               regs += 2;
+       }
+       wmb();
+       return 0;
+}
+
+/*
+ * Security: Block user-space writing to MMU mapping registers.
+ * This is important for security and brings Poulsbo DRM
+ * up to par with the other DRM drivers. Using this,
+ * user-space should not be able to map arbitrary memory
+ * pages to graphics memory, but all user-space processes
+ * basically have access to all buffer objects mapped to
+ * graphics memory.
+ */
+
+int
+psb_submit_copy_cmdbuf(struct drm_device *dev,
+                      struct drm_buffer_object *cmd_buffer,
+                      unsigned long cmd_offset,
+                      unsigned long cmd_size,
+                      int engine, uint32_t * copy_buffer)
+{
+       unsigned long cmd_end = cmd_offset + (cmd_size << 2);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long cmd_page_offset = cmd_offset - (cmd_offset & PAGE_MASK);
+       unsigned long cmd_next;
+       struct drm_bo_kmap_obj cmd_kmap;
+       uint32_t *cmd_page;
+       unsigned cmds;
+       int is_iomem;
+       int ret = 0;
+
+       if (cmd_size == 0)
+               return 0;
+
+       if (engine == PSB_ENGINE_2D)
+               psb_2d_lock(dev_priv);
+
+       do {
+               cmd_next = drm_bo_offset_end(cmd_offset, cmd_end);
+               ret = drm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
+                                 1, &cmd_kmap);
+
+               if (ret)
+                       return ret;
+               cmd_page = drm_bmo_virtual(&cmd_kmap, &is_iomem);
+               cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
+               cmds = (cmd_next - cmd_offset) >> 2;
+
+               switch (engine) {
+               case PSB_ENGINE_2D:
+                       ret =
+                           psb_2d_submit(dev_priv, cmd_page + cmd_page_offset,
+                                         cmds);
+                       break;
+               case PSB_ENGINE_RASTERIZER:
+               case PSB_ENGINE_TA:
+               case PSB_ENGINE_HPRAST:
+                       PSB_DEBUG_GENERAL("Reg copy.\n");
+                       ret = psb_memcpy_check(copy_buffer,
+                                              cmd_page + cmd_page_offset,
+                                              cmds * sizeof(uint32_t));
+                       copy_buffer += cmds;
+                       break;
+               default:
+                       ret = -EINVAL;
+               }
+               drm_bo_kunmap(&cmd_kmap);
+               if (ret)
+                       break;
+       } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
+
+       if (engine == PSB_ENGINE_2D)
+               psb_2d_unlock(dev_priv);
+
+       return ret;
+}
+
+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
+{
+       if (dst_cache->dst_page) {
+               drm_bo_kunmap(&dst_cache->dst_kmap);
+               dst_cache->dst_page = NULL;
+       }
+       dst_cache->dst_buf = NULL;
+       dst_cache->dst = ~0;
+       dst_cache->use_page = NULL;
+}
+
+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
+                                  struct psb_buflist_item *buffers,
+                                  unsigned int dst, unsigned long dst_offset)
+{
+       int ret;
+
+       PSB_DEBUG_RELOC("Destination buffer is %d.\n", dst);
+
+       if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
+               psb_clear_dstbuf_cache(dst_cache);
+               dst_cache->dst = dst;
+               dst_cache->dst_buf = buffers[dst].bo;
+       }
+
+       if (unlikely(dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
+               DRM_ERROR("Relocation destination out of bounds.\n");
+               return -EINVAL;
+       }
+
+       if (!drm_bo_same_page(dst_cache->dst_offset, dst_offset) ||
+           NULL == dst_cache->dst_page) {
+               if (NULL != dst_cache->dst_page) {
+                       drm_bo_kunmap(&dst_cache->dst_kmap);
+                       dst_cache->dst_page = NULL;
+               }
+
+               ret = drm_bo_kmap(dst_cache->dst_buf, dst_offset >> PAGE_SHIFT,
+                                 1, &dst_cache->dst_kmap);
+               if (ret) {
+                       DRM_ERROR("Could not map destination buffer for "
+                                 "relocation.\n");
+                       return ret;
+               }
+
+               dst_cache->dst_page = drm_bmo_virtual(&dst_cache->dst_kmap,
+                                                     &dst_cache->dst_is_iomem);
+               dst_cache->dst_offset = dst_offset & PAGE_MASK;
+               dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
+       }
+       return 0;
+}
+
+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
+                          uint32_t fence_class,
+                          const struct drm_psb_reloc *reloc,
+                          struct psb_buflist_item *buffers,
+                          int num_buffers,
+                          struct psb_dstbuf_cache *dst_cache,
+                          int no_wait, int interruptible)
+{
+       int reg;
+       uint32_t val;
+       uint32_t background;
+       unsigned int index;
+       int ret;
+       unsigned int shift;
+       unsigned int align_shift;
+       uint32_t fence_type;
+       struct drm_buffer_object *reloc_bo;
+
+       PSB_DEBUG_RELOC("Reloc type %d\n"
+                         "\t where 0x%04x\n"
+                         "\t buffer 0x%04x\n"
+                         "\t mask 0x%08x\n"
+                         "\t shift 0x%08x\n"
+                         "\t pre_add 0x%08x\n"
+                         "\t background 0x%08x\n"
+                         "\t dst_buffer 0x%08x\n"
+                         "\t arg0 0x%08x\n"
+                         "\t arg1 0x%08x\n",
+                         reloc->reloc_op,
+                         reloc->where,
+                         reloc->buffer,
+                         reloc->mask,
+                         reloc->shift,
+                         reloc->pre_add,
+                         reloc->background,
+                         reloc->dst_buffer, reloc->arg0, reloc->arg1);
+
+       if (unlikely(reloc->buffer >= num_buffers)) {
+               DRM_ERROR("Illegal relocation buffer %d.\n", reloc->buffer);
+               return -EINVAL;
+       }
+
+       if (buffers[reloc->buffer].presumed_offset_correct)
+               return 0;
+
+       if (unlikely(reloc->dst_buffer >= num_buffers)) {
+               DRM_ERROR("Illegal destination buffer for relocation %d.\n",
+                         reloc->dst_buffer);
+               return -EINVAL;
+       }
+
+       ret = psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
+                                     reloc->where << 2);
+       if (ret)
+               return ret;
+
+       reloc_bo = buffers[reloc->buffer].bo;
+
+       if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
+               DRM_ERROR("Illegal relocation offset add.\n");
+               return -EINVAL;
+       }
+
+       switch (reloc->reloc_op) {
+       case PSB_RELOC_OP_OFFSET:
+               val = reloc_bo->offset + reloc->pre_add;
+               break;
+       case PSB_RELOC_OP_2D_OFFSET:
+               val = reloc_bo->offset + reloc->pre_add -
+                   dev_priv->mmu_2d_offset;
+               if (unlikely(val >= PSB_2D_SIZE)) {
+                       DRM_ERROR("2D relocation out of bounds\n");
+                       return -EINVAL;
+               }
+               break;
+       case PSB_RELOC_OP_PDS_OFFSET:
+               val = reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
+               if (unlikely(val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
+                       DRM_ERROR("PDS relocation out of bounds\n");
+                       return -EINVAL;
+               }
+               break;
+       case PSB_RELOC_OP_USE_OFFSET:
+       case PSB_RELOC_OP_USE_REG:
+
+               /*
+                * Security:
+                * Only allow VERTEX or PIXEL data masters, as
+                * shaders run under other data masters may in theory
+                * alter MMU mappings.
+                */
+
+               if (unlikely(reloc->arg1 != _PSB_CUC_DM_PIXEL &&
+                            reloc->arg1 != _PSB_CUC_DM_VERTEX)) {
+                       DRM_ERROR("Invalid data master in relocation. %d\n",
+                                 reloc->arg1);
+                       return -EPERM;
+               }
+
+               fence_type = reloc_bo->fence_type;
+               ret = psb_grab_use_base(dev_priv,
+                                       reloc_bo->offset +
+                                       reloc->pre_add, reloc->arg0,
+                                       reloc->arg1, fence_class,
+                                       fence_type, no_wait,
+                                       interruptible, &reg, &val);
+               if (ret)
+                       return ret;
+
+               val = (reloc->reloc_op == PSB_RELOC_OP_USE_REG) ? reg : val;
+               break;
+       default:
+               DRM_ERROR("Unimplemented relocation.\n");
+               return -EINVAL;
+       }
+
+       shift = (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
+       align_shift = (reloc->shift & PSB_RELOC_ALSHIFT_MASK) >>
+           PSB_RELOC_ALSHIFT_SHIFT;
+
+       val = ((val >> align_shift) << shift);
+       index = reloc->where - dst_cache->dst_page_offset;
+
+       background = reloc->background;
+
+       if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET) {
+               if (dst_cache->use_page == dst_cache->dst_page &&
+                   dst_cache->use_index == index)
+                       background = dst_cache->use_background;
+               else
+                       background = dst_cache->dst_page[index];
+       }
+#if 0
+       if (dst_cache->dst_page[index] != PSB_RELOC_MAGIC &&
+           reloc->reloc_op != PSB_RELOC_OP_USE_OFFSET)
+               DRM_ERROR("Inconsistent relocation 0x%08lx.\n",
+                         (unsigned long)dst_cache->dst_page[index]);
+#endif
+
+       val = (background & ~reloc->mask) | (val & reloc->mask);
+       dst_cache->dst_page[index] = val;
+
+       if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET ||
+           reloc->reloc_op == PSB_RELOC_OP_USE_REG) {
+               dst_cache->use_page = dst_cache->dst_page;
+               dst_cache->use_index = index;
+               dst_cache->use_background = val;
+       }
+
+       PSB_DEBUG_RELOC("Reloc buffer %d index 0x%08x, value 0x%08x\n",
+                         reloc->dst_buffer, index, dst_cache->dst_page[index]);
+
+       return 0;
+}
+
+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
+                              unsigned int num_pages)
+{
+       int ret = 0;
+
+       spin_lock(&dev_priv->reloc_lock);
+       if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
+               dev_priv->rel_mapped_pages += num_pages;
+               ret = 1;
+       }
+       spin_unlock(&dev_priv->reloc_lock);
+       return ret;
+}
+
+static int psb_fixup_relocs(struct drm_file *file_priv,
+                           uint32_t fence_class,
+                           unsigned int num_relocs,
+                           unsigned int reloc_offset,
+                           uint32_t reloc_handle,
+                           struct psb_buflist_item *buffers,
+                           unsigned int num_buffers,
+                           int no_wait, int interruptible)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct drm_buffer_object *reloc_buffer = NULL;
+       unsigned int reloc_num_pages;
+       unsigned int reloc_first_page;
+       unsigned int reloc_last_page;
+       struct psb_dstbuf_cache dst_cache;
+       struct drm_psb_reloc *reloc;
+       struct drm_bo_kmap_obj reloc_kmap;
+       int reloc_is_iomem;
+       int count;
+       int ret = 0;
+       int registered = 0;
+       int short_circuit = 1;
+       int i;
+
+       if (num_relocs == 0)
+               return 0;
+
+       for (i=0; i<num_buffers; ++i) {
+               if (!buffers[i].presumed_offset_correct) {
+                       short_circuit = 0;
+                       break;
+               }
+       }
+
+       if (short_circuit)
+               return 0;
+
+       memset(&dst_cache, 0, sizeof(dst_cache));
+       memset(&reloc_kmap, 0, sizeof(reloc_kmap));
+
+       mutex_lock(&dev->struct_mutex);
+       reloc_buffer = drm_lookup_buffer_object(file_priv, reloc_handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+       if (!reloc_buffer)
+               goto out;
+
+       reloc_first_page = reloc_offset >> PAGE_SHIFT;
+       reloc_last_page =
+           (reloc_offset +
+            num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
+       reloc_num_pages = reloc_last_page - reloc_first_page + 1;
+       reloc_offset &= ~PAGE_MASK;
+
+       if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
+               DRM_ERROR("Relocation buffer is too large\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
+                   (registered =
+                    psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
+
+       if (ret == -EINTR) {
+               ret = -EAGAIN;
+               goto out;
+       }
+       if (ret) {
+               DRM_ERROR("Error waiting for space to map "
+                         "relocation buffer.\n");
+               goto out;
+       }
+
+       ret = drm_bo_kmap(reloc_buffer, reloc_first_page,
+                         reloc_num_pages, &reloc_kmap);
+
+       if (ret) {
+               DRM_ERROR("Could not map relocation buffer.\n"
+                         "\tReloc buffer id 0x%08x.\n"
+                         "\tReloc first page %d.\n"
+                         "\tReloc num pages %d.\n",
+                         reloc_handle, reloc_first_page, reloc_num_pages);
+               goto out;
+       }
+
+       reloc = (struct drm_psb_reloc *)
+           ((unsigned long)drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem) +
+            reloc_offset);
+
+       for (count = 0; count < num_relocs; ++count) {
+               ret = psb_apply_reloc(dev_priv, fence_class,
+                                     reloc, buffers,
+                                     num_buffers, &dst_cache,
+                                     no_wait, interruptible);
+               if (ret)
+                       goto out1;
+               reloc++;
+       }
+
+      out1:
+       drm_bo_kunmap(&reloc_kmap);
+      out:
+       if (registered) {
+               spin_lock(&dev_priv->reloc_lock);
+               dev_priv->rel_mapped_pages -= reloc_num_pages;
+               spin_unlock(&dev_priv->reloc_lock);
+               DRM_WAKEUP(&dev_priv->rel_mapped_queue);
+       }
+
+       psb_clear_dstbuf_cache(&dst_cache);
+       if (reloc_buffer)
+               drm_bo_usage_deref_unlocked(&reloc_buffer);
+       return ret;
+}
+
+static int psb_cmdbuf_2d(struct drm_file *priv,
+               struct drm_psb_cmdbuf_arg *arg,
+               struct drm_buffer_object *cmd_buffer,
+               struct drm_fence_arg *fence_arg)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *)dev->dev_private;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
+       if (ret)
+               return -EAGAIN;
+
+#ifdef PSB_DETEAR
+       if(arg->sVideoInfo.flag == (PSB_DELAYED_2D_BLIT)) {
+               dev_priv->blit_2d = 1;
+       }
+#endif /* PSB_DETEAR */
+
+       ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
+                       arg->cmdbuf_size, PSB_ENGINE_2D, NULL);
+       if (ret)
+               goto out_unlock;
+
+#ifdef PSB_DETEAR
+       if(arg->sVideoInfo.flag == (PSB_DELAYED_2D_BLIT)) {
+               arg->sVideoInfo.flag = 0;
+               clear_bit(0, &psb_blit_info.vdc_bit);
+               psb_blit_info.cmd_ready = 1;
+               /* block until the delayed 2D blit task finishes
+                  execution */
+               while(test_bit(0, &psb_blit_info.vdc_bit)==0)
+                       schedule();
+       }
+#endif /* PSB_DETEAR */
+
+       psb_fence_or_sync(priv, PSB_ENGINE_2D, arg, fence_arg, NULL);
+
+       mutex_lock(&cmd_buffer->mutex);
+       if (cmd_buffer->fence != NULL)
+               drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
+       mutex_unlock(&cmd_buffer->mutex);
+out_unlock:
+       mutex_unlock(&dev_priv->reset_mutex);
+       return ret;
+}
+
+#if 0
+static int psb_dump_page(struct drm_buffer_object *bo,
+                        unsigned int page_offset, unsigned int num)
+{
+       struct drm_bo_kmap_obj kmobj;
+       int is_iomem;
+       uint32_t *p;
+       int ret;
+       unsigned int i;
+
+       ret = drm_bo_kmap(bo, page_offset, 1, &kmobj);
+       if (ret)
+               return ret;
+
+       p = drm_bmo_virtual(&kmobj, &is_iomem);
+       for (i = 0; i < num; ++i)
+               PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
+
+       drm_bo_kunmap(&kmobj);
+       return 0;
+}
+#endif
+
+static void psb_idle_engine(struct drm_device *dev, int engine)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       uint32_t dummy;
+
+       switch (engine) {
+       case PSB_ENGINE_2D:
+
+               /*
+                * Make sure we flush 2D properly using a dummy
+                * fence sequence emit.
+                */
+
+               (void)psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
+                                             &dummy, &dummy);
+               psb_2d_lock(dev_priv);
+               (void)psb_idle_2d(dev);
+               psb_2d_unlock(dev_priv);
+               break;
+       case PSB_ENGINE_TA:
+       case PSB_ENGINE_RASTERIZER:
+       case PSB_ENGINE_HPRAST:
+               (void)psb_idle_3d(dev);
+               break;
+       default:
+
+               /*
+                * FIXME: Insert video engine idle command here.
+                */
+
+               break;
+       }
+}
+
+void psb_fence_or_sync(struct drm_file *priv,
+                      int engine,
+                      struct drm_psb_cmdbuf_arg *arg,
+                      struct drm_fence_arg *fence_arg,
+                      struct drm_fence_object **fence_p)
+{
+       struct drm_device *dev = priv->head->dev;
+       int ret;
+       struct drm_fence_object *fence;
+
+       ret = drm_fence_buffer_objects(dev, NULL, arg->fence_flags,
+                                      NULL, &fence);
+
+       if (ret) {
+
+               /*
+                * Fence creation failed.
+                * Fall back to synchronous operation and idle the engine.
+                */
+
+               psb_idle_engine(dev, engine);
+               if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
+
+                       /*
+                        * Communicate to user-space that
+                        * fence creation has failed and that
+                        * the engine is idle.
+                        */
+
+                       fence_arg->handle = ~0;
+                       fence_arg->error = ret;
+               }
+
+               drm_putback_buffer_objects(dev);
+               if (fence_p)
+                       *fence_p = NULL;
+               return;
+       }
+
+       if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
+
+               ret = drm_fence_add_user_object(priv, fence,
+                                               arg->fence_flags &
+                                               DRM_FENCE_FLAG_SHAREABLE);
+               if (!ret)
+                       drm_fence_fill_arg(fence, fence_arg);
+               else {
+                       /*
+                        * Fence user object creation failed.
+                        * We must idle the engine here as well, as user-
+                        * space expects a fence object to wait on. Since we
+                        * have a fence object we wait for it to signal
+                        * to indicate engine "sufficiently" idle.
+                        */
+
+                       (void)drm_fence_object_wait(fence, 0, 1, fence->type);
+                       drm_fence_usage_deref_unlocked(&fence);
+                       fence_arg->handle = ~0;
+                       fence_arg->error = ret;
+               }
+       }
+
+       if (fence_p)
+               *fence_p = fence;
+       else if (fence)
+               drm_fence_usage_deref_unlocked(&fence);
+}
+
+int psb_handle_copyback(struct drm_device *dev,
+                       struct psb_buflist_item *buffers,
+                       unsigned int num_buffers, int ret, void *data)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       struct drm_bo_op_arg arg;
+       struct psb_buflist_item *item = buffers;
+       struct drm_buffer_object *bo;
+       int err = ret;
+       int i;
+
+       /*
+        * Clear the unfenced use base register lists and buffer lists.
+        */
+
+       if (ret) {
+               drm_regs_fence(&dev_priv->use_manager, NULL);
+               drm_putback_buffer_objects(dev);
+       }
+
+       if (ret != -EAGAIN) {
+               for (i = 0; i < num_buffers; ++i) {
+                       arg.handled = 1;
+                       arg.d.rep.ret = item->ret;
+                       bo = item->bo;
+                       mutex_lock(&bo->mutex);
+                       drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
+                       mutex_unlock(&bo->mutex);
+                       if (copy_to_user(item->data, &arg, sizeof(arg)))
+                               err = -EFAULT;
+                       ++item;
+               }
+       }
+
+       return err;
+}
+
+static int psb_cmdbuf_video(struct drm_file *priv,
+                           struct drm_psb_cmdbuf_arg *arg,
+                           unsigned int num_buffers,
+                           struct drm_buffer_object *cmd_buffer,
+                           struct drm_fence_arg *fence_arg)
+{
+       struct drm_device *dev = priv->head->dev;
+       struct drm_fence_object *fence;
+       int ret;
+
+       /*
+        * Check this. Doesn't seem right. Have fencing done AFTER command
+        * submission and make sure drm_psb_idle idles the MSVDX completely.
+        */
+
+       psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, arg, fence_arg, &fence);
+       ret = psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
+                                     arg->cmdbuf_size, fence);
+
+       if (ret)
+               return ret;
+
+       drm_fence_usage_deref_unlocked(&fence);
+       mutex_lock(&cmd_buffer->mutex);
+       if (cmd_buffer->fence != NULL)
+               drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
+       mutex_unlock(&cmd_buffer->mutex);
+       return 0;
+}
+
+int psb_feedback_buf(struct drm_file *file_priv,
+                    uint32_t feedback_ops,
+                    uint32_t handle,
+                    uint32_t offset,
+                    uint32_t feedback_breakpoints,
+                    uint32_t feedback_size, struct psb_feedback_info *feedback)
+{
+       struct drm_buffer_object *bo;
+       struct page *page;
+       uint32_t page_no;
+       uint32_t page_offset;
+       int ret;
+
+       if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
+               DRM_ERROR("Illegal feedback op.\n");
+               return -EINVAL;
+       }
+
+       if (feedback_breakpoints != 0) {
+               DRM_ERROR("Feedback breakpoints not implemented yet.\n");
+               return -EINVAL;
+       }
+
+       if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
+               DRM_ERROR("Feedback buffer size too small.\n");
+               return -EINVAL;
+       }
+
+       page_offset = offset & ~PAGE_MASK;
+       if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
+           < page_offset) {
+               DRM_ERROR("Illegal feedback buffer alignment.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_handle_validate(file_priv,
+                                    handle,
+                                    PSB_ENGINE_TA,
+                                    DRM_BO_FLAG_MEM_LOCAL |
+                                    DRM_BO_FLAG_CACHED |
+                                    DRM_BO_FLAG_WRITE |
+                                    PSB_BO_FLAG_FEEDBACK,
+                                    DRM_BO_MASK_MEM |
+                                    DRM_BO_FLAG_CACHED |
+                                    DRM_BO_FLAG_WRITE |
+                                    PSB_BO_FLAG_FEEDBACK, 0, 0, NULL, &bo);
+       if (ret)
+               return ret;
+
+       page_no = offset >> PAGE_SHIFT;
+       if (page_no >= bo->num_pages) {
+               ret = -EINVAL;
+               DRM_ERROR("Illegal feedback buffer offset.\n");
+               goto out_unref;
+       }
+
+       if (bo->ttm == NULL) {
+               ret = -EINVAL;
+               DRM_ERROR("Vistest buffer without TTM.\n");
+               goto out_unref;
+       }
+
+       page = drm_ttm_get_page(bo->ttm, page_no);
+       if (!page) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       feedback->page = page;
+       feedback->bo = bo;
+       feedback->offset = page_offset;
+       return 0;
+
+      out_unref:
+       drm_bo_usage_deref_unlocked(&bo);
+       return ret;
+}
+
+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       drm_psb_cmdbuf_arg_t *arg = data;
+       int ret = 0;
+       unsigned num_buffers;
+       struct drm_buffer_object *cmd_buffer = NULL;
+       struct drm_buffer_object *ta_buffer = NULL;
+       struct drm_buffer_object *oom_buffer = NULL;
+       struct drm_fence_arg fence_arg;
+       struct drm_psb_scene user_scene;
+       struct psb_scene_pool *pool = NULL;
+       struct psb_scene *scene = NULL;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)file_priv->head->dev->dev_private;
+       int engine;
+       struct psb_feedback_info feedback;
+
+       if (!dev_priv)
+               return -EINVAL;
+
+       ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
+       if (ret)
+               return ret;
+
+       num_buffers = PSB_NUM_VALIDATE_BUFFERS;
+
+       ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+       if (ret) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               return -EAGAIN;
+       }
+       if (unlikely(dev_priv->buffers == NULL)) {
+               dev_priv->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
+                                           sizeof(*dev_priv->buffers));
+               if (dev_priv->buffers == NULL) {
+                       drm_bo_read_unlock(&dev->bm.bm_lock);
+                       return -ENOMEM;
+               }
+       }
+
+
+       engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
+           PSB_ENGINE_TA : arg->engine;
+
+       ret =
+           psb_validate_buffer_list(file_priv, engine,
+                                    (unsigned long)arg->buffer_list,
+                                    dev_priv->buffers, &num_buffers);
+       if (ret)
+               goto out_err0;
+
+       ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
+                              arg->reloc_offset, arg->reloc_handle,
+                              dev_priv->buffers, num_buffers, 0, 1);
+       if (ret)
+               goto out_err0;
+
+       mutex_lock(&dev->struct_mutex);
+       cmd_buffer = drm_lookup_buffer_object(file_priv, arg->cmdbuf_handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+       if (!cmd_buffer) {
+               ret = -EINVAL;
+               goto out_err0;
+       }
+
+       switch (arg->engine) {
+       case PSB_ENGINE_2D:
+               ret = psb_cmdbuf_2d(file_priv, arg, cmd_buffer, &fence_arg);
+               if (ret)
+                       goto out_err0;
+               break;
+       case PSB_ENGINE_VIDEO:
+               ret =
+                   psb_cmdbuf_video(file_priv, arg, num_buffers, cmd_buffer,
+                                    &fence_arg);
+               if (ret)
+                       goto out_err0;
+               break;
+       case PSB_ENGINE_RASTERIZER:
+               ret = psb_cmdbuf_raster(file_priv, arg, cmd_buffer, &fence_arg);
+               if (ret)
+                       goto out_err0;
+               break;
+       case PSB_ENGINE_TA:
+               if (arg->ta_handle == arg->cmdbuf_handle) {
+                       mutex_lock(&dev->struct_mutex);
+                       atomic_inc(&cmd_buffer->usage);
+                       ta_buffer = cmd_buffer;
+                       mutex_unlock(&dev->struct_mutex);
+               } else {
+                       mutex_lock(&dev->struct_mutex);
+                       ta_buffer =
+                           drm_lookup_buffer_object(file_priv,
+                                                    arg->ta_handle, 1);
+                       mutex_unlock(&dev->struct_mutex);
+                       if (!ta_buffer) {
+                               ret = -EINVAL;
+                               goto out_err0;
+                       }
+               }
+               if (arg->oom_size != 0) {
+                       if (arg->oom_handle == arg->cmdbuf_handle) {
+                               mutex_lock(&dev->struct_mutex);
+                               atomic_inc(&cmd_buffer->usage);
+                               oom_buffer = cmd_buffer;
+                               mutex_unlock(&dev->struct_mutex);
+                       } else {
+                               mutex_lock(&dev->struct_mutex);
+                               oom_buffer =
+                                   drm_lookup_buffer_object(file_priv,
+                                                            arg->oom_handle,
+                                                            1);
+                               mutex_unlock(&dev->struct_mutex);
+                               if (!oom_buffer) {
+                                       ret = -EINVAL;
+                                       goto out_err0;
+                               }
+                       }
+               }
+
+               ret = copy_from_user(&user_scene, (void __user *)
+                                    ((unsigned long)arg->scene_arg),
+                                    sizeof(user_scene));
+               if (ret)
+                       goto out_err0;
+
+               if (!user_scene.handle_valid) {
+                       pool = psb_scene_pool_alloc(file_priv, 0,
+                                                   user_scene.num_buffers,
+                                                   user_scene.w, user_scene.h);
+                       if (!pool) {
+                               ret = -ENOMEM;
+                               goto out_err0;
+                       }
+
+                       user_scene.handle = psb_scene_pool_handle(pool);
+                       user_scene.handle_valid = 1;
+                       ret = copy_to_user((void __user *)
+                                          ((unsigned long)arg->scene_arg),
+                                          &user_scene, sizeof(user_scene));
+
+                       if (ret)
+                               goto out_err0;
+               } else {
+                       mutex_lock(&dev->struct_mutex);
+                       pool = psb_scene_pool_lookup_devlocked(file_priv,
+                                                              user_scene.
+                                                              handle, 1);
+                       mutex_unlock(&dev->struct_mutex);
+                       if (!pool) {
+                               ret = -EINVAL;
+                               goto out_err0;
+                       }
+               }
+
+               mutex_lock(&dev_priv->reset_mutex);
+               ret = psb_validate_scene_pool(pool, 0, 0, 0,
+                                             user_scene.w,
+                                             user_scene.h,
+                                             arg->ta_flags &
+                                             PSB_TA_FLAG_LASTPASS, &scene);
+               mutex_unlock(&dev_priv->reset_mutex);
+
+               if (ret)
+                       goto out_err0;
+
+               memset(&feedback, 0, sizeof(feedback));
+               if (arg->feedback_ops) {
+                       ret = psb_feedback_buf(file_priv,
+                                              arg->feedback_ops,
+                                              arg->feedback_handle,
+                                              arg->feedback_offset,
+                                              arg->feedback_breakpoints,
+                                              arg->feedback_size, &feedback);
+                       if (ret)
+                               goto out_err0;
+               }
+               ret = psb_cmdbuf_ta(file_priv, arg, cmd_buffer, ta_buffer,
+                                   oom_buffer, scene, &feedback, &fence_arg);
+               if (ret)
+                       goto out_err0;
+               break;
+       default:
+               DRM_ERROR("Unimplemented command submission mechanism (%x).\n",
+                         arg->engine);
+               ret = -EINVAL;
+               goto out_err0;
+       }
+
+       if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
+               ret = copy_to_user((void __user *)
+                                  ((unsigned long)arg->fence_arg),
+                                  &fence_arg, sizeof(fence_arg));
+       }
+
+      out_err0:
+       ret =
+           psb_handle_copyback(dev, dev_priv->buffers, num_buffers, ret, data);
+       mutex_lock(&dev->struct_mutex);
+       if (scene)
+               psb_scene_unref_devlocked(&scene);
+       if (pool)
+               psb_scene_pool_unref_devlocked(&pool);
+       if (cmd_buffer)
+               drm_bo_usage_deref_locked(&cmd_buffer);
+       if (ta_buffer)
+               drm_bo_usage_deref_locked(&ta_buffer);
+       if (oom_buffer)
+               drm_bo_usage_deref_locked(&oom_buffer);
+
+       psb_dereference_buffers_locked(dev_priv->buffers, num_buffers);
+       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       drm_bo_read_unlock(&dev->bm.bm_lock);
+       return ret;
+}
diff --git a/psb-kernel-source-4.41.1/psb_xhw.c b/psb-kernel-source-4.41.1/psb_xhw.c
new file mode 100644 (file)
index 0000000..6470ec0
--- /dev/null
@@ -0,0 +1,629 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ * Make calls into closed source X server code.
+ */
+
+#include "drmP.h"
+#include "psb_drv.h"
+
+void
+psb_xhw_clean_buf(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       list_del_init(&buf->head);
+       if (dev_priv->xhw_cur_buf == buf)
+               dev_priv->xhw_cur_buf = NULL;
+       atomic_set(&buf->done, 1);
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+}
+
+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
+                             struct psb_xhw_buf *buf)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       atomic_set(&buf->done, 0);
+       if (unlikely(!dev_priv->xhw_submit_ok)) {
+               spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+               DRM_ERROR("No Xpsb 3D extension available.\n");
+               return -EINVAL;
+       }
+       if (!list_empty(&buf->head)) {
+               DRM_ERROR("Recursive list adding.\n");
+               goto out;
+       }
+       list_add_tail(&buf->head, &dev_priv->xhw_in);
+       wake_up_interruptible(&dev_priv->xhw_queue);
+      out:
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+       return 0;
+}
+
+int psb_xhw_hotplug(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+       int ret;
+
+       buf->copy_back = 1;
+       xa->op = PSB_XHW_HOTPLUG;
+       xa->issue_irq = 0;
+       xa->irq_op = 0;
+
+       ret = psb_xhw_add(dev_priv, buf);
+       return ret;
+}
+
+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
+                      struct psb_xhw_buf *buf,
+                      uint32_t w,
+                      uint32_t h,
+                      uint32_t * hw_cookie,
+                      uint32_t * bo_size,
+                      uint32_t * clear_p_start, uint32_t * clear_num_pages)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+       int ret;
+
+       buf->copy_back = 1;
+       xa->op = PSB_XHW_SCENE_INFO;
+       xa->irq_op = 0;
+       xa->issue_irq = 0;
+       xa->arg.si.w = w;
+       xa->arg.si.h = h;
+
+       ret = psb_xhw_add(dev_priv, buf);
+       if (ret)
+               return ret;
+
+       (void)wait_event_timeout(dev_priv->xhw_caller_queue,
+                                atomic_read(&buf->done), DRM_HZ);
+
+       if (!atomic_read(&buf->done)) {
+               psb_xhw_clean_buf(dev_priv, buf);
+               return -EBUSY;
+       }
+
+       if (!xa->ret) {
+               memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
+               *bo_size = xa->arg.si.size;
+               *clear_p_start = xa->arg.si.clear_p_start;
+               *clear_num_pages = xa->arg.si.clear_num_pages;
+       }
+       return xa->ret;
+}
+
+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
+                       struct psb_xhw_buf *buf, uint32_t fire_flags)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+
+       buf->copy_back = 0;
+       xa->op = PSB_XHW_FIRE_RASTER;
+       xa->issue_irq = 0;
+       xa->arg.sb.fire_flags = 0;
+
+       return psb_xhw_add(dev_priv, buf);
+}
+
+int psb_xhw_vistest(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+
+       buf->copy_back = 1;
+       xa->op = PSB_XHW_VISTEST;
+       /*
+        * Could perhaps decrease latency somewhat by
+        * issuing an irq in this case.
+        */
+       xa->issue_irq = 0;
+       xa->irq_op = PSB_UIRQ_VISTEST;
+       return psb_xhw_add(dev_priv, buf);
+}
+
+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
+                           struct psb_xhw_buf *buf,
+                           uint32_t fire_flags,
+                           uint32_t hw_context,
+                           uint32_t * cookie,
+                           uint32_t * oom_cmds,
+                           uint32_t num_oom_cmds,
+                           uint32_t offset, uint32_t engine, uint32_t flags)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+
+       buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
+       xa->op = PSB_XHW_SCENE_BIND_FIRE;
+       xa->issue_irq = (buf->copy_back) ? 1 : 0;
+       if (unlikely(buf->copy_back))
+               xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
+                   PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
+       else
+               xa->irq_op = 0;
+       xa->arg.sb.fire_flags = fire_flags;
+       xa->arg.sb.hw_context = hw_context;
+       xa->arg.sb.offset = offset;
+       xa->arg.sb.engine = engine;
+       xa->arg.sb.flags = flags;
+       xa->arg.sb.num_oom_cmds = num_oom_cmds;
+       memcpy(xa->cookie, cookie, sizeof(xa->cookie));
+       if (num_oom_cmds)
+               memcpy(xa->arg.sb.oom_cmds, oom_cmds,
+                      sizeof(uint32_t) * num_oom_cmds);
+       return psb_xhw_add(dev_priv, buf);
+}
+
+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+       int ret;
+
+       buf->copy_back = 1;
+       xa->op = PSB_XHW_RESET_DPM;
+       xa->issue_irq = 0;
+       xa->irq_op = 0;
+
+       ret = psb_xhw_add(dev_priv, buf);
+       if (ret)
+               return ret;
+
+       (void)wait_event_timeout(dev_priv->xhw_caller_queue,
+                                atomic_read(&buf->done), 3 * DRM_HZ);
+
+       if (!atomic_read(&buf->done)) {
+               psb_xhw_clean_buf(dev_priv, buf);
+               return -EBUSY;
+       }
+
+       return xa->ret;
+}
+
+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
+                        struct psb_xhw_buf *buf, uint32_t * value)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+       int ret;
+
+       *value = 0;
+
+       buf->copy_back = 1;
+       xa->op = PSB_XHW_CHECK_LOCKUP;
+       xa->issue_irq = 0;
+       xa->irq_op = 0;
+
+       ret = psb_xhw_add(dev_priv, buf);
+       if (ret)
+               return ret;
+
+       (void)wait_event_timeout(dev_priv->xhw_caller_queue,
+                                atomic_read(&buf->done), DRM_HZ * 3);
+
+       if (!atomic_read(&buf->done)) {
+               psb_xhw_clean_buf(dev_priv, buf);
+               return -EBUSY;
+       }
+
+       if (!xa->ret)
+               *value = xa->arg.cl.value;
+
+       return xa->ret;
+}
+
+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
+                            struct psb_xhw_buf *buf)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+       unsigned long irq_flags;
+
+       buf->copy_back = 0;
+       xa->op = PSB_XHW_TERMINATE;
+       xa->issue_irq = 0;
+
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       dev_priv->xhw_submit_ok = 0;
+       atomic_set(&buf->done, 0);
+       if (!list_empty(&buf->head)) {
+               DRM_ERROR("Recursive list adding.\n");
+               goto out;
+       }
+       list_add_tail(&buf->head, &dev_priv->xhw_in);
+      out:
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+       wake_up_interruptible(&dev_priv->xhw_queue);
+
+       (void)wait_event_timeout(dev_priv->xhw_caller_queue,
+                                atomic_read(&buf->done), DRM_HZ / 10);
+
+       if (!atomic_read(&buf->done)) {
+               DRM_ERROR("Xpsb terminate timeout.\n");
+               psb_xhw_clean_buf(dev_priv, buf);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
+                       struct psb_xhw_buf *buf,
+                       uint32_t pages, uint32_t * hw_cookie, uint32_t * size)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+       int ret;
+
+       buf->copy_back = 1;
+       xa->op = PSB_XHW_TA_MEM_INFO;
+       xa->issue_irq = 0;
+       xa->irq_op = 0;
+       xa->arg.bi.pages = pages;
+
+       ret = psb_xhw_add(dev_priv, buf);
+       if (ret)
+               return ret;
+
+       (void)wait_event_timeout(dev_priv->xhw_caller_queue,
+                                atomic_read(&buf->done), DRM_HZ);
+
+       if (!atomic_read(&buf->done)) {
+               psb_xhw_clean_buf(dev_priv, buf);
+               return -EBUSY;
+       }
+
+       if (!xa->ret)
+               memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
+
+       *size = xa->arg.bi.size;
+       return xa->ret;
+}
+
+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
+                       struct psb_xhw_buf *buf,
+                       uint32_t flags,
+                       uint32_t param_offset,
+                       uint32_t pt_offset, uint32_t * hw_cookie)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+       int ret;
+
+       buf->copy_back = 1;
+       xa->op = PSB_XHW_TA_MEM_LOAD;
+       xa->issue_irq = 0;
+       xa->irq_op = 0;
+       xa->arg.bl.flags = flags;
+       xa->arg.bl.param_offset = param_offset;
+       xa->arg.bl.pt_offset = pt_offset;
+       memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
+
+       ret = psb_xhw_add(dev_priv, buf);
+       if (ret)
+               return ret;
+
+       (void)wait_event_timeout(dev_priv->xhw_caller_queue,
+                                atomic_read(&buf->done), 3 * DRM_HZ);
+
+       if (!atomic_read(&buf->done)) {
+               psb_xhw_clean_buf(dev_priv, buf);
+               return -EBUSY;
+       }
+
+       if (!xa->ret)
+               memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
+
+       return xa->ret;
+}
+
+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
+                  struct psb_xhw_buf *buf, uint32_t * cookie)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+
+       /*
+        * This calls the extensive closed source
+        * OOM handler, which resolves the condition and
+        * sends a reply telling the scheduler what to do
+        * with the task.
+        */
+
+       buf->copy_back = 1;
+       xa->op = PSB_XHW_OOM;
+       xa->issue_irq = 1;
+       xa->irq_op = PSB_UIRQ_OOM_REPLY;
+       memcpy(xa->cookie, cookie, sizeof(xa->cookie));
+
+       return psb_xhw_add(dev_priv, buf);
+}
+
+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
+                         struct psb_xhw_buf *buf,
+                         uint32_t * cookie,
+                         uint32_t * bca, uint32_t * rca, uint32_t * flags)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+
+       /*
+        * Get info about how to schedule an OOM task.
+        */
+
+       memcpy(cookie, xa->cookie, sizeof(xa->cookie));
+       *bca = xa->arg.oom.bca;
+       *rca = xa->arg.oom.rca;
+       *flags = xa->arg.oom.flags;
+}
+
+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
+                       struct psb_xhw_buf *buf, uint32_t * cookie)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+
+       memcpy(cookie, xa->cookie, sizeof(xa->cookie));
+}
+
+int psb_xhw_resume(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
+{
+       struct drm_psb_xhw_arg *xa = &buf->arg;
+
+       buf->copy_back = 0;
+       xa->op = PSB_XHW_RESUME;
+       xa->issue_irq = 0;
+       xa->irq_op = 0;
+       return psb_xhw_add(dev_priv, buf);
+}
+
+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
+{
+}
+
+int psb_xhw_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       unsigned long irq_flags;
+
+       INIT_LIST_HEAD(&dev_priv->xhw_in);
+       dev_priv->xhw_lock = SPIN_LOCK_UNLOCKED;
+       atomic_set(&dev_priv->xhw_client, 0);
+       init_waitqueue_head(&dev_priv->xhw_queue);
+       init_waitqueue_head(&dev_priv->xhw_caller_queue);
+       mutex_init(&dev_priv->xhw_mutex);
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       dev_priv->xhw_on = 0;
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+
+       return 0;
+}
+
+static int psb_xhw_init_init(struct drm_device *dev,
+                            struct drm_file *file_priv,
+                            struct drm_psb_xhw_init_arg *arg)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       int ret;
+       int is_iomem;
+
+       if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
+               unsigned long irq_flags;
+
+               mutex_lock(&dev->struct_mutex);
+               dev_priv->xhw_bo =
+                   drm_lookup_buffer_object(file_priv, arg->buffer_handle, 1);
+               mutex_unlock(&dev->struct_mutex);
+               if (!dev_priv->xhw_bo) {
+                       ret = -EINVAL;
+                       goto out_err;
+               }
+               ret = drm_bo_kmap(dev_priv->xhw_bo, 0,
+                                 dev_priv->xhw_bo->num_pages,
+                                 &dev_priv->xhw_kmap);
+               if (ret) {
+                       DRM_ERROR("Failed mapping X server "
+                                 "communications buffer.\n");
+                       goto out_err0;
+               }
+               dev_priv->xhw = drm_bmo_virtual(&dev_priv->xhw_kmap, &is_iomem);
+               if (is_iomem) {
+                       DRM_ERROR("X server communications buffer"
+                                 "is in device memory.\n");
+                       ret = -EINVAL;
+                       goto out_err1;
+               }
+               dev_priv->xhw_file = file_priv;
+
+               spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+               dev_priv->xhw_on = 1;
+               dev_priv->xhw_submit_ok = 1;
+               spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+
+               return 0;
+       } else {
+               DRM_ERROR("Xhw is already initialized.\n");
+               return -EBUSY;
+       }
+      out_err1:
+       dev_priv->xhw = NULL;
+       drm_bo_kunmap(&dev_priv->xhw_kmap);
+      out_err0:
+       drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
+      out_err:
+       atomic_dec(&dev_priv->xhw_client);
+       return ret;
+}
+
+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
+{
+       struct psb_xhw_buf *cur_buf, *next;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       dev_priv->xhw_submit_ok = 0;
+
+       list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
+               list_del_init(&cur_buf->head);
+               if (cur_buf->copy_back) {
+                       cur_buf->arg.ret = -EINVAL;
+               }
+               atomic_set(&cur_buf->done, 1);
+       }
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+       wake_up(&dev_priv->xhw_caller_queue);
+}
+
+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
+                          struct drm_file *file_priv, int closing)
+{
+
+       if (dev_priv->xhw_file == file_priv &&
+           atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
+
+               if (closing)
+                       psb_xhw_queue_empty(dev_priv);
+               else {
+                       struct psb_xhw_buf buf;
+                       INIT_LIST_HEAD(&buf.head);
+
+                       psb_xhw_terminate(dev_priv, &buf);
+                       psb_xhw_queue_empty(dev_priv);
+               }
+
+               dev_priv->xhw = NULL;
+               drm_bo_kunmap(&dev_priv->xhw_kmap);
+               drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
+               dev_priv->xhw_file = NULL;
+       }
+}
+
+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_psb_xhw_init_arg *arg = (struct drm_psb_xhw_init_arg *)data;
+       struct drm_psb_private *dev_priv = 
+               (struct drm_psb_private *)dev->dev_private;
+
+       switch (arg->operation) {
+       case PSB_XHW_INIT:
+               return psb_xhw_init_init(dev, file_priv, arg);
+       case PSB_XHW_TAKEDOWN:
+               psb_xhw_init_takedown(dev_priv, file_priv, 0);
+       }
+       return 0;
+}
+
+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
+{
+       int empty;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       empty = list_empty(&dev_priv->xhw_in);
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+       return empty;
+}
+
+int psb_xhw_handler(struct drm_psb_private *dev_priv)
+{
+       unsigned long irq_flags;
+       struct drm_psb_xhw_arg *xa;
+       struct psb_xhw_buf *buf;
+
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+
+       if (!dev_priv->xhw_on) {
+               spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+               return -EINVAL;
+       }
+
+       buf = dev_priv->xhw_cur_buf;
+       if (buf && buf->copy_back) {
+               xa = &buf->arg;
+               memcpy(xa, dev_priv->xhw, sizeof(*xa));
+               dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
+               atomic_set(&buf->done, 1);
+               wake_up(&dev_priv->xhw_caller_queue);
+       } else
+               dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
+
+       dev_priv->xhw_cur_buf = 0;
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+       return 0;
+}
+
+int psb_xhw_ioctl(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *)dev->dev_private;
+       unsigned long irq_flags;
+       struct drm_psb_xhw_arg *xa;
+       int ret;
+       struct list_head *list;
+       struct psb_xhw_buf *buf;
+
+       if (!dev_priv)
+               return -EINVAL;
+
+       if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
+               return -EAGAIN;
+
+       if (psb_forced_user_interrupt(dev_priv)) {
+               mutex_unlock(&dev_priv->xhw_mutex);
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       while (list_empty(&dev_priv->xhw_in)) {
+               spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+               ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
+                                                      !psb_xhw_in_empty
+                                                      (dev_priv), DRM_HZ);
+               if (ret == -ERESTARTSYS || ret == 0) {
+                       mutex_unlock(&dev_priv->xhw_mutex);
+                       return -EAGAIN;
+               }
+               spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
+       }
+
+       list = dev_priv->xhw_in.next;
+       list_del_init(list);
+
+       buf = list_entry(list, struct psb_xhw_buf, head);
+       xa = &buf->arg;
+       memcpy(dev_priv->xhw, xa, sizeof(*xa));
+
+       if (unlikely(buf->copy_back))
+               dev_priv->xhw_cur_buf = buf;
+       else {
+               atomic_set(&buf->done, 1);
+               dev_priv->xhw_cur_buf = NULL;
+       }
+
+       if (xa->op == PSB_XHW_TERMINATE) {
+               dev_priv->xhw_on = 0;
+               wake_up(&dev_priv->xhw_caller_queue);
+       }
+       spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
+
+       mutex_unlock(&dev_priv->xhw_mutex);
+
+       return 0;
+}
diff --git a/s5.mk b/s5.mk
new file mode 100644 (file)
index 0000000..61ba4d2
--- /dev/null
+++ b/s5.mk
@@ -0,0 +1,7 @@
+ODUCT_PACKAGES := $(THIRD_PARTY_APPS)
+
+$(call inherit-product,$(SRC_TARGET_DIR)/product/generic_x86.mk)
+
+PRODUCT_NAME := s5
+PRODUCT_DEVICE := s5
+
diff --git a/s5_defconfig b/s5_defconfig
new file mode 100644 (file)
index 0000000..849409d
--- /dev/null
@@ -0,0 +1,2245 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.29
+# Fri Jun 25 18:13:51 2010
+#
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+# CONFIG_X86_64 is not set
+CONFIG_X86=y
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_FAST_CMPXCHG_LOCAL=y
+CONFIG_MMU=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_GENERIC_TIME_VSYSCALL is not set
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_X86_SMP=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_TRAMPOLINE=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION="-android-x86-s5"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_TREE=y
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_ASHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SMP=y
+# CONFIG_SPARSE_IRQ is not set
+CONFIG_X86_FIND_SMP_CONFIG=y
+CONFIG_X86_MPPARSE=y
+CONFIG_X86_PC=y
+# CONFIG_X86_ELAN is not set
+# CONFIG_X86_VOYAGER is not set
+# CONFIG_X86_GENERICARCH is not set
+# CONFIG_X86_VSMP is not set
+# CONFIG_X86_RDC321X is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+# CONFIG_PARAVIRT_GUEST is not set
+# CONFIG_MEMTEST is not set
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+CONFIG_M586=y
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MPSC is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_GENERIC_CPU is not set
+CONFIG_X86_GENERIC=y
+CONFIG_X86_CPU=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_X86_XADD=y
+# CONFIG_X86_PPRO_FENCE is not set
+CONFIG_X86_F00F_BUG=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_ALIGNMENT_16=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=4
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR_32=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+CONFIG_HPET_TIMER=y
+CONFIG_HPET_EMULATE_RTC=y
+CONFIG_DMI=y
+# CONFIG_IOMMU_HELPER is not set
+# CONFIG_IOMMU_API is not set
+CONFIG_NR_CPUS=2
+CONFIG_SCHED_SMT=y
+# CONFIG_SCHED_MC is not set
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+CONFIG_X86_MCE=y
+# CONFIG_X86_MCE_NONFATAL is not set
+# CONFIG_X86_MCE_P4THERMAL is not set
+CONFIG_VM86=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+# CONFIG_X86_REBOOTFIXUPS is not set
+CONFIG_MICROCODE=y
+CONFIG_MICROCODE_INTEL=y
+# CONFIG_MICROCODE_AMD is not set
+CONFIG_MICROCODE_OLD_INTERFACE=y
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+# CONFIG_HIGHMEM64G is not set
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_3G_OPT is not set
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_2G_OPT is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_X86_PAE is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW_64K=y
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+# CONFIG_MTRR_SANITIZER is not set
+# CONFIG_X86_PAT is not set
+# CONFIG_EFI is not set
+CONFIG_SECCOMP=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+# CONFIG_KEXEC is not set
+CONFIG_PHYSICAL_START=0x100000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Power management and ACPI options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_HAS_WAKELOCK=y
+CONFIG_HAS_EARLYSUSPEND=y
+CONFIG_WAKELOCK=y
+CONFIG_WAKELOCK_STAT=y
+CONFIG_USER_WAKELOCK=y
+CONFIG_EARLYSUSPEND=y
+# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set
+# CONFIG_CONSOLE_EARLYSUSPEND is not set
+CONFIG_FB_EARLYSUSPEND=y
+CONFIG_ACPI=y
+CONFIG_ACPI_SLEEP=y
+# CONFIG_ACPI_PROCFS is not set
+# CONFIG_ACPI_PROCFS_POWER is not set
+CONFIG_ACPI_SYSFS_POWER=y
+# CONFIG_ACPI_PROC_EVENT is not set
+CONFIG_ACPI_AC=y
+CONFIG_ACPI_BATTERY=y
+CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_VIDEO=y
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_HOTPLUG_CPU=y
+CONFIG_ACPI_THERMAL=y
+# CONFIG_ACPI_CUSTOM_DSDT is not set
+CONFIG_ACPI_BLACKLIST_YEAR=0
+# CONFIG_ACPI_DEBUG is not set
+# CONFIG_ACPI_PCI_SLOT is not set
+CONFIG_X86_PM_TIMER=y
+CONFIG_ACPI_CONTAINER=y
+CONFIG_ACPI_SBS=y
+CONFIG_X86_APM_BOOT=y
+CONFIG_APM=m
+# CONFIG_APM_IGNORE_USER_SUSPEND is not set
+CONFIG_APM_DO_ENABLE=y
+# CONFIG_APM_CPU_IDLE is not set
+CONFIG_APM_DISPLAY_BLANK=y
+CONFIG_APM_ALLOW_INTS=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_MIN_TICKS=10
+CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER=1000
+
+#
+# CPUFreq processor drivers
+#
+CONFIG_X86_ACPI_CPUFREQ=y
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_POWERNOW_K8 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+CONFIG_X86_SPEEDSTEP_ICH=y
+CONFIG_X86_SPEEDSTEP_SMI=y
+CONFIG_X86_P4_CLOCKMOD=y
+CONFIG_X86_CPUFREQ_NFORCE2=y
+# CONFIG_X86_LONGRUN is not set
+# CONFIG_X86_LONGHAUL is not set
+# CONFIG_X86_E_POWERSAVER is not set
+
+#
+# shared options
+#
+CONFIG_X86_SPEEDSTEP_LIB=y
+CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+# CONFIG_PCI_GOOLPC is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_HOTPLUG_PCI_PCIE is not set
+CONFIG_PCIEAER=y
+# CONFIG_PCIEASPM is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_MCA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_OLPC is not set
+# CONFIG_PCCARD is not set
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HOTPLUG_PCI_FAKE is not set
+# CONFIG_HOTPLUG_PCI_COMPAQ is not set
+# CONFIG_HOTPLUG_PCI_IBM is not set
+# CONFIG_HOTPLUG_PCI_ACPI is not set
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+# CONFIG_HOTPLUG_PCI_SHPC is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_COMPAT_NET_DEV_OPS=y
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_LRO=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+CONFIG_ANDROID_PARANOID_NETWORK=y
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIBTUSB=m
+# CONFIG_BT_HCIBTSDIO is not set
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=m
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_NL80211=y
+CONFIG_WIRELESS_OLD_REGULATORY=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+
+#
+# Rate control algorithm selection
+#
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=m
+CONFIG_RFKILL_PM=y
+# CONFIG_RFKILL_INPUT is not set
+CONFIG_RFKILL_LEDS=y
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=m
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_PNP=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_EEPROM_93CX6=m
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_ACPI=y
+CONFIG_SATA_PMP=y
+CONFIG_SATA_AHCI=y
+# CONFIG_SATA_SIL24 is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_PATA_ACPI is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CS5536 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+# CONFIG_PATA_PLATFORM is not set
+CONFIG_PATA_SCH=y
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# Enable only one of the two stacks, unless you know what you are doing
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_SB1000 is not set
+# CONFIG_ARCNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=m
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_E100 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_R6040 is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_SC92031 is not set
+CONFIG_ATL2=m
+CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_R8169=m
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_ATL1 is not set
+CONFIG_ATL1E=m
+# CONFIG_ATL1C is not set
+# CONFIG_JME is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+# CONFIG_LIBERTAS_USB is not set
+CONFIG_LIBERTAS_SDIO=m
+# CONFIG_LIBERTAS_DEBUG is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AIRO is not set
+# CONFIG_HERMES is not set
+# CONFIG_ATMEL is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8180 is not set
+CONFIG_RTL8187=m
+# CONFIG_ADM8211 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+CONFIG_ATH5K=m
+# CONFIG_ATH5K_DEBUG is not set
+CONFIG_ATH9K=m
+# CONFIG_ATH9K_DEBUG is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLCORE is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_IWLAGN is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_RT2X00 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+CONFIG_USB_HSO=m
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPPOLAC=m
+CONFIG_PPPOPNS=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NET_FC is not set
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_POLLDEV=m
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+# CONFIG_INPUT_KEYRESET is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+# CONFIG_MOUSE_PS2_LIFEBOOK is not set
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+CONFIG_MOUSE_PS2_EZEX=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PCSPKR=m
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_ATLAS_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_INPUT_KEYCHORD is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_CT82C710=y
+CONFIG_SERIO_PCIPS2=y
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=y
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVMEM=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_PNP=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_INTEL=y
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_GEODE is not set
+# CONFIG_HW_RANDOM_VIA is not set
+CONFIG_NVRAM=m
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_CS5535_GPIO is not set
+CONFIG_RAW_DRIVER=m
+CONFIG_MAX_RAW_DEVS=4096
+CONFIG_HPET=y
+CONFIG_HPET_MMAP=y
+CONFIG_HANGCHECK_TIMER=m
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD756_S4882=m
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_NFORCE2=m
+CONFIG_I2C_NFORCE2_S4985=m
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Graphics adapter I2C/DDC channel drivers
+#
+# CONFIG_I2C_VOODOO3 is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_SCx200_ACB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_PCA963X is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_SUPPLY_DEBUG=y
+CONFIG_PDA_POWER=y
+# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+CONFIG_HWMON=y
+CONFIG_HWMON_VID=y
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+CONFIG_SENSORS_LM85=y
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+CONFIG_THERMAL=y
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+# CONFIG_SSB_B43_PCI_BRIDGE is not set
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_REGULATOR is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+# CONFIG_VIDEO_V4L1_COMPAT is not set
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_VMALLOC=m
+CONFIG_VIDEOBUF_DVB=m
+CONFIG_VIDEO_IR=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_IR_I2C=m
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS53L32A=m
+CONFIG_VIDEO_WM8775=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_TVP5150=m
+CONFIG_VIDEO_CX25840=m
+CONFIG_VIDEO_CX2341X=m
+CONFIG_VIDEO_VIVI=m
+# CONFIG_VIDEO_BT848 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_SAA7134 is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_CX88 is not set
+# CONFIG_VIDEO_CX23885 is not set
+# CONFIG_VIDEO_AU0828 is not set
+# CONFIG_VIDEO_IVTV is not set
+# CONFIG_VIDEO_CX18 is not set
+# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_SOC_CAMERA is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
+CONFIG_VIDEO_EM28XX=m
+CONFIG_VIDEO_EM28XX_ALSA=m
+CONFIG_VIDEO_EM28XX_DVB=m
+CONFIG_VIDEO_USBVISION=m
+CONFIG_USB_ET61X251=m
+CONFIG_USB_SN9C102=m
+CONFIG_USB_ZC0301=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+# CONFIG_DVB_CAPTURE_DRIVERS is not set
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_TDA10048=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_S5H1411=m
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+# CONFIG_AGP_AMD64 is not set
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_BOOT_VESA_SUPPORT=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+CONFIG_FB_UVESA=m
+CONFIG_FB_VESA=y
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_CYBLA is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_PLATFORM is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_PROGEAR is not set
+# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_VGACON_SOFT_SCROLLBACK=y
+CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_SEQUENCER_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+CONFIG_SND_DRIVERS=y
+CONFIG_SND_PCSP=m
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_PCI=y
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5530 is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+CONFIG_SND_HDA_INTEL=m
+CONFIG_SND_HDA_HWDEP=y
+CONFIG_SND_HDA_RECONFIG=y
+# CONFIG_SND_HDA_INPUT_BEEP is not set
+CONFIG_SND_HDA_CODEC_REALTEK=y
+# CONFIG_SND_HDA_CODEC_ANALOG is not set
+# CONFIG_SND_HDA_CODEC_SIGMATEL is not set
+# CONFIG_SND_HDA_CODEC_VIA is not set
+# CONFIG_SND_HDA_CODEC_ATIHDMI is not set
+# CONFIG_SND_HDA_CODEC_NVHDMI is not set
+# CONFIG_SND_HDA_CODEC_INTELHDMI is not set
+# CONFIG_SND_HDA_CODEC_CONEXANT is not set
+# CONFIG_SND_HDA_CODEC_CMEDIA is not set
+# CONFIG_SND_HDA_CODEC_SI3054 is not set
+# CONFIG_SND_HDA_GENERIC is not set
+CONFIG_SND_HDA_POWER_SAVE=y
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=3
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HIFIER is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_SIS7019 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_USX2Y=m
+CONFIG_SND_USB_CAIAQ=m
+# CONFIG_SND_USB_CAIAQ_INPUT is not set
+CONFIG_SND_USB_US122L=m
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_GREENASIA_FF is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_THRUSTMASTER_FF is not set
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_UHCI_HCD=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+#
+
+#
+# see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+CONFIG_USB_LED=m
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_EMBEDDED_SDIO is not set
+# CONFIG_MMC_PARANOID_SD_INIT is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_MMC_BLOCK_DEFERRED_RESUME is not set
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+# CONFIG_MMC_RICOH_MMC is not set
+# CONFIG_MMC_WBSD is not set
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_ALIX2 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_CLEVO_MAIL is not set
+# CONFIG_LEDS_PCA955X is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+# CONFIG_LEDS_TRIGGER_SLEEP is not set
+CONFIG_SWITCH=y
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+CONFIG_RTC_DEBUG=y
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+CONFIG_RTC_INTF_ALARM=y
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_ET131X is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_ME4000 is not set
+# CONFIG_MEILHAUS is not set
+# CONFIG_VIDEO_GO7007 is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_USB_ATMEL is not set
+# CONFIG_AGNX is not set
+# CONFIG_OTUS is not set
+CONFIG_RT2860=m
+# CONFIG_RT2870 is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_ALTERA_PCIE_CHDMA is not set
+CONFIG_RTL8187SE=m
+# CONFIG_INPUT_MIMIO is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_EPL is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y
+# CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION is not set
+# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_ACER_WMI is not set
+# CONFIG_ASUS_LAPTOP is not set
+# CONFIG_FUJITSU_LAPTOP is not set
+# CONFIG_TC1100_WMI is not set
+# CONFIG_MSI_LAPTOP is not set
+# CONFIG_PANASONIC_LAPTOP is not set
+# CONFIG_COMPAL_LAPTOP is not set
+# CONFIG_SONY_LAPTOP is not set
+# CONFIG_THINKPAD_ACPI is not set
+# CONFIG_INTEL_MENLOW is not set
+# CONFIG_EEEPC_LAPTOP is not set
+# CONFIG_ACPI_WMI is not set
+# CONFIG_ACPI_ASUS is not set
+# CONFIG_ACPI_TOSHIBA is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+# CONFIG_FIRMWARE_MEMMAP is not set
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_ISCSI_IBFT_FIND is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_EXT4_FS=m
+# CONFIG_EXT4DEV_COMPAT is not set
+CONFIG_EXT4_FS_XATTR=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=m
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+# CONFIG_SUNRPC_REGISTER_V4 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+
+#
+# Tracers
+#
+# CONFIG_SYSPROF_TRACER is not set
+# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_DOUBLEFAULT=y
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+# CONFIG_OPTIMIZE_INLINING is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+# CONFIG_CRYPTO_AUTHENC is not set
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_TWOFISH_586=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_HAVE_KVM=y
+# CONFIG_VIRTUALIZATION is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+CONFIG_CRC7=m
+CONFIG_LIBCRC32C=m
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/s5_info b/s5_info
new file mode 100644 (file)
index 0000000..26002fb
--- /dev/null
+++ b/s5_info
@@ -0,0 +1,24 @@
+s5_info()
+{
+       # common for all Eee PC models
+       PREDEV=snd-hda-codec-realtek
+       SNDDEV=snd-hda-intel
+       CAMDEV=uvcvideo
+       WIFDEV=libertas_sdio
+       BOARD_USES_TSLIB=true
+}
+
+detect_hardware()
+{
+       #load psb driver
+       modprobe i2c-algo-bit
+       insmod /system/lib/modules/drm.ko
+       insmod /system/lib/modules/psb.ko
+       
+       #enable TS
+       /system/xbin/iowrite 0x1de 0x5a
+       /system/xbin/iowrite 0x1df 0xa5
+       /system/xbin/ioread 0x1de
+
+       s5_info
+}
diff --git a/system.prop b/system.prop
new file mode 100644 (file)
index 0000000..6b84b48
--- /dev/null
@@ -0,0 +1,3 @@
+# system.prop for s5
+#wlan.driver.status=ok
+# end system.prop for s5
diff --git a/ts.conf b/ts.conf
new file mode 100644 (file)
index 0000000..a284a08
--- /dev/null
+++ b/ts.conf
@@ -0,0 +1,31 @@
+# Uncomment if you wish to use the linux input layer event interface
+module_raw input-raw
+
+# Uncomment if you're using a Sharp Zaurus SL-5500/SL-5000d
+# module_raw collie
+
+# Uncomment if you're using a Sharp Zaurus SL-C700/C750/C760/C860
+# module_raw corgi
+
+# Uncomment if you're using a device with a UCB1200/1300/1400 TS interface
+# module_raw ucb1x00
+
+# Uncomment if you're using an HP iPaq h3600 or similar
+# module_raw h3600
+
+# Uncomment if you're using a Hitachi Webpad
+# module_raw mk712
+
+# Uncomment if you're using an IBM Arctic II
+# module_raw arctic2
+
+# Uncomment if you're using a Samsung Q1U
+# module_raw galax
+
+# Uncomment if you're using a touchkit device
+# module_raw touchkit
+
+module pthres pmin=1
+module variance delta=30
+module dejitter delta=100
+module linear
diff --git a/ts.env b/ts.env
new file mode 100644 (file)
index 0000000..da04578
--- /dev/null
+++ b/ts.env
@@ -0,0 +1,6 @@
+TSLIB_CONSOLEDEVICE=none
+TSLIB_FBDEVICE=/dev/graphics/fb0
+TSLIB_TSDEVICE=/dev/input/event4
+TSLIB_CALIBFILE=/data/system/tslib/pointercal
+TSLIB_CONFFILE=/system/etc/ts.conf
+TSLIB_PLUGINDIR=/system/lib/ts