From 42e6f8da6d694e77678b7ffd8a32a5e9ab56efe3 Mon Sep 17 00:00:00 2001 From: Alan Olsen Date: Thu, 15 Oct 2009 10:42:37 -0700 Subject: [PATCH] Moorestown graphics consolidation patch v2.10 Includes all patches through v2.10 of the PSB drivers as well as Alpha2-2.9-mrst-GFX-driver-incremental-restore-MSIreg-in-PCIx.patch. Signed-off-by: Thomas Hellstrom Signed-off-by: Sophia (Chia-Hung) Kuo Signed-off-by: Alan Olsen --- drivers/gpu/drm/Kconfig | 12 + drivers/gpu/drm/Makefile | 3 +- drivers/gpu/drm/drm_crtc.c | 130 ++ drivers/gpu/drm/drm_drv.c | 13 +- drivers/gpu/drm/drm_global.c | 107 + drivers/gpu/drm/drm_irq.c | 30 + drivers/gpu/drm/psb/Makefile | 19 + drivers/gpu/drm/psb/lnc_topaz.c | 676 +++++++ drivers/gpu/drm/psb/lnc_topaz.h | 902 +++++++++ drivers/gpu/drm/psb/lnc_topazinit.c | 2058 ++++++++++++++++++++ drivers/gpu/drm/psb/psb_bl.c | 232 +++ drivers/gpu/drm/psb/psb_buffer.c | 519 +++++ drivers/gpu/drm/psb/psb_dpst.c | 208 ++ drivers/gpu/drm/psb/psb_dpst.h | 90 + drivers/gpu/drm/psb/psb_drm.h | 716 +++++++ drivers/gpu/drm/psb/psb_drv.c | 2239 +++++++++++++++++++++ drivers/gpu/drm/psb/psb_drv.h | 1224 ++++++++++++ drivers/gpu/drm/psb/psb_fb.c | 1833 +++++++++++++++++ drivers/gpu/drm/psb/psb_fb.h | 47 + drivers/gpu/drm/psb/psb_fence.c | 359 ++++ drivers/gpu/drm/psb/psb_gtt.c | 278 +++ drivers/gpu/drm/psb/psb_hotplug.c | 427 ++++ drivers/gpu/drm/psb/psb_hotplug.h | 96 + drivers/gpu/drm/psb/psb_intel_bios.c | 309 +++ drivers/gpu/drm/psb/psb_intel_bios.h | 436 +++++ drivers/gpu/drm/psb/psb_intel_display.c | 2484 ++++++++++++++++++++++++ drivers/gpu/drm/psb/psb_intel_display.h | 31 + drivers/gpu/drm/psb/psb_intel_drv.h | 246 +++ drivers/gpu/drm/psb/psb_intel_dsi.c | 1798 +++++++++++++++++ drivers/gpu/drm/psb/psb_intel_i2c.c | 179 ++ drivers/gpu/drm/psb/psb_intel_lvds.c | 1343 +++++++++++++ drivers/gpu/drm/psb/psb_intel_modes.c | 64 + drivers/gpu/drm/psb/psb_intel_reg.h | 1015 ++++++++++ drivers/gpu/drm/psb/psb_intel_sdvo.c | 1350 +++++++++++++ drivers/gpu/drm/psb/psb_intel_sdvo_regs.h | 345 ++++ drivers/gpu/drm/psb/psb_irq.c | 621 ++++++ drivers/gpu/drm/psb/psb_mmu.c | 1073 ++++++++++ drivers/gpu/drm/psb/psb_msvdx.c | 855 ++++++++ drivers/gpu/drm/psb/psb_msvdx.h | 527 +++++ drivers/gpu/drm/psb/psb_msvdxinit.c | 747 +++++++ drivers/gpu/drm/psb/psb_powermgmt.c | 1146 +++++++++++ drivers/gpu/drm/psb/psb_powermgmt.h | 73 + drivers/gpu/drm/psb/psb_reg.h | 574 ++++++ drivers/gpu/drm/psb/psb_reset.c | 484 +++++ drivers/gpu/drm/psb/psb_scene.c | 523 +++++ drivers/gpu/drm/psb/psb_scene.h | 119 ++ drivers/gpu/drm/psb/psb_schedule.c | 1593 +++++++++++++++ drivers/gpu/drm/psb/psb_schedule.h | 181 ++ drivers/gpu/drm/psb/psb_setup.c | 18 + drivers/gpu/drm/psb/psb_sgx.c | 1784 +++++++++++++++++ drivers/gpu/drm/psb/psb_sgx.h | 41 + drivers/gpu/drm/psb/psb_socket.c | 340 ++++ drivers/gpu/drm/psb/psb_ttm_glue.c | 342 ++++ drivers/gpu/drm/psb/psb_umevents.c | 490 +++++ drivers/gpu/drm/psb/psb_umevents.h | 150 ++ drivers/gpu/drm/psb/psb_xhw.c | 652 +++++++ drivers/gpu/drm/psb/ttm/ttm_agp_backend.c | 149 ++ drivers/gpu/drm/psb/ttm/ttm_bo.c | 1716 ++++++++++++++++ drivers/gpu/drm/psb/ttm/ttm_bo_api.h | 578 ++++++ drivers/gpu/drm/psb/ttm/ttm_bo_driver.h | 859 ++++++++ drivers/gpu/drm/psb/ttm/ttm_bo_util.c | 536 +++++ drivers/gpu/drm/psb/ttm/ttm_bo_vm.c | 596 ++++++ drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c | 115 ++ drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h | 110 ++ drivers/gpu/drm/psb/ttm/ttm_fence.c | 607 ++++++ drivers/gpu/drm/psb/ttm/ttm_fence_api.h | 277 +++ drivers/gpu/drm/psb/ttm/ttm_fence_driver.h | 309 +++ drivers/gpu/drm/psb/ttm/ttm_fence_user.c | 242 +++ drivers/gpu/drm/psb/ttm/ttm_fence_user.h | 147 ++ drivers/gpu/drm/psb/ttm/ttm_lock.c | 162 ++ drivers/gpu/drm/psb/ttm/ttm_lock.h | 181 ++ drivers/gpu/drm/psb/ttm/ttm_memory.c | 232 +++ drivers/gpu/drm/psb/ttm/ttm_memory.h | 154 ++ drivers/gpu/drm/psb/ttm/ttm_object.c | 444 +++++ drivers/gpu/drm/psb/ttm/ttm_object.h | 269 +++ drivers/gpu/drm/psb/ttm/ttm_pat_compat.c | 178 ++ drivers/gpu/drm/psb/ttm/ttm_pat_compat.h | 41 + drivers/gpu/drm/psb/ttm/ttm_placement_common.h | 98 + drivers/gpu/drm/psb/ttm/ttm_placement_user.c | 468 +++++ drivers/gpu/drm/psb/ttm/ttm_placement_user.h | 259 +++ drivers/gpu/drm/psb/ttm/ttm_regman.h | 74 + drivers/gpu/drm/psb/ttm/ttm_tt.c | 655 +++++++ drivers/gpu/drm/psb/ttm/ttm_userobj_api.h | 79 + include/drm/drm.h | 1 + include/drm/drmP.h | 30 + include/drm/drm_crtc.h | 12 + include/drm/drm_mode.h | 18 + include/linux/backlight.h | 3 + 89 files changed, 43758 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/drm_global.c create mode 100644 drivers/gpu/drm/psb/Makefile create mode 100644 drivers/gpu/drm/psb/lnc_topaz.c create mode 100644 drivers/gpu/drm/psb/lnc_topaz.h create mode 100644 drivers/gpu/drm/psb/lnc_topazinit.c create mode 100644 drivers/gpu/drm/psb/psb_bl.c create mode 100644 drivers/gpu/drm/psb/psb_buffer.c create mode 100644 drivers/gpu/drm/psb/psb_dpst.c create mode 100644 drivers/gpu/drm/psb/psb_dpst.h create mode 100644 drivers/gpu/drm/psb/psb_drm.h create mode 100644 drivers/gpu/drm/psb/psb_drv.c create mode 100644 drivers/gpu/drm/psb/psb_drv.h create mode 100644 drivers/gpu/drm/psb/psb_fb.c create mode 100644 drivers/gpu/drm/psb/psb_fb.h create mode 100644 drivers/gpu/drm/psb/psb_fence.c create mode 100644 drivers/gpu/drm/psb/psb_gtt.c create mode 100644 drivers/gpu/drm/psb/psb_hotplug.c create mode 100644 drivers/gpu/drm/psb/psb_hotplug.h create mode 100644 drivers/gpu/drm/psb/psb_intel_bios.c create mode 100644 drivers/gpu/drm/psb/psb_intel_bios.h create mode 100644 drivers/gpu/drm/psb/psb_intel_display.c create mode 100644 drivers/gpu/drm/psb/psb_intel_display.h create mode 100644 drivers/gpu/drm/psb/psb_intel_drv.h create mode 100644 drivers/gpu/drm/psb/psb_intel_dsi.c create mode 100644 drivers/gpu/drm/psb/psb_intel_i2c.c create mode 100644 drivers/gpu/drm/psb/psb_intel_lvds.c create mode 100644 drivers/gpu/drm/psb/psb_intel_modes.c create mode 100644 drivers/gpu/drm/psb/psb_intel_reg.h create mode 100644 drivers/gpu/drm/psb/psb_intel_sdvo.c create mode 100644 drivers/gpu/drm/psb/psb_intel_sdvo_regs.h create mode 100644 drivers/gpu/drm/psb/psb_irq.c create mode 100644 drivers/gpu/drm/psb/psb_mmu.c create mode 100644 drivers/gpu/drm/psb/psb_msvdx.c create mode 100644 drivers/gpu/drm/psb/psb_msvdx.h create mode 100644 drivers/gpu/drm/psb/psb_msvdxinit.c create mode 100644 drivers/gpu/drm/psb/psb_powermgmt.c create mode 100644 drivers/gpu/drm/psb/psb_powermgmt.h create mode 100644 drivers/gpu/drm/psb/psb_reg.h create mode 100644 drivers/gpu/drm/psb/psb_reset.c create mode 100644 drivers/gpu/drm/psb/psb_scene.c create mode 100644 drivers/gpu/drm/psb/psb_scene.h create mode 100644 drivers/gpu/drm/psb/psb_schedule.c create mode 100644 drivers/gpu/drm/psb/psb_schedule.h create mode 100644 drivers/gpu/drm/psb/psb_setup.c create mode 100644 drivers/gpu/drm/psb/psb_sgx.c create mode 100644 drivers/gpu/drm/psb/psb_sgx.h create mode 100644 drivers/gpu/drm/psb/psb_socket.c create mode 100644 drivers/gpu/drm/psb/psb_ttm_glue.c create mode 100644 drivers/gpu/drm/psb/psb_umevents.c create mode 100644 drivers/gpu/drm/psb/psb_umevents.h create mode 100644 drivers/gpu/drm/psb/psb_xhw.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_agp_backend.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_api.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_driver.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_util.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_bo_vm.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_api.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_driver.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_user.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_fence_user.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_lock.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_lock.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_memory.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_memory.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_object.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_object.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_pat_compat.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_pat_compat.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_common.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_user.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_placement_user.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_regman.h create mode 100644 drivers/gpu/drm/psb/ttm/ttm_tt.c create mode 100644 drivers/gpu/drm/psb/ttm/ttm_userobj_api.h diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 39b393d..9bd8ca1 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -143,3 +143,15 @@ config DRM_SAVAGE help Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister chipset. If M is selected the module will be called savage. + +config DRM_PSB + tristate "Intel Poulsbo/Moorestown" + depends on DRM && PCI + select FB_CFB_COPYAREA + select FB_CFB_FILLRECT + select FB_CFB_IMAGEBLIT + select MRST_RAR_HANDLER + help + Choose this option if you have a Poulsbo or Moorestown platform. + If M is selected the module will be called psb. + diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index d76f167..4989b1e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -15,12 +15,13 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \ - drm_info.o drm_debugfs.o + drm_info.o drm_debugfs.o drm_global.o drm-$(CONFIG_COMPAT) += drm_ioc32.o obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM_TTM) += ttm/ +obj-$(CONFIG_DRM_PSB) +=psb/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ obj-$(CONFIG_DRM_RADEON)+= radeon/ diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 2f631c7..11cd2e8 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -146,6 +146,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, + { DRM_MODE_CONNECTOR_MIPI, "MIPI", 0 }, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = @@ -154,6 +155,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] = { DRM_MODE_ENCODER_TMDS, "TMDS" }, { DRM_MODE_ENCODER_LVDS, "LVDS" }, { DRM_MODE_ENCODER_TVDAC, "TV" }, + { DRM_MODE_ENCODER_MIPI, "MIPI" }, }; char *drm_get_encoder_name(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index b7f3a41..81195a7 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -344,6 +345,8 @@ static int __init drm_core_init(void) DRM_INFO("Initialized %s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); + drm_global_init(); + return 0; err_p3: drm_sysfs_destroy(); @@ -357,6 +360,7 @@ err_p1: static void __exit drm_core_exit(void) { + drm_global_release(); remove_proc_entry("dri", NULL); debugfs_remove(drm_debugfs_root); drm_sysfs_destroy(); @@ -408,9 +412,16 @@ static int drm_version(struct drm_device *dev, void *data, * Looks up the ioctl function in the ::ioctls table, checking for root * previleges if so required, and dispatches to the respective function. */ + int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { + return drm_unlocked_ioctl(filp, cmd, arg); +} +EXPORT_SYMBOL(drm_ioctl); + +long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->minor->dev; struct drm_ioctl_desc *ioctl; @@ -493,7 +504,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, return retcode; } -EXPORT_SYMBOL(drm_ioctl); +EXPORT_SYMBOL(drm_unlocked_ioctl); struct drm_local_map *drm_getsarea(struct drm_device *dev) { diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c new file mode 100644 index 0000000..e054c4f --- /dev/null +++ b/drivers/gpu/drm/drm_global.c @@ -0,0 +1,107 @@ +/************************************************************************** + * + * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +#include +struct drm_global_item { + struct mutex mutex; + void *object; + int refcount; +}; + +static struct drm_global_item glob[DRM_GLOBAL_NUM]; + +void drm_global_init(void) +{ + int i; + + for (i = 0; i < DRM_GLOBAL_NUM; ++i) { + struct drm_global_item *item = &glob[i]; + mutex_init(&item->mutex); + item->object = NULL; + item->refcount = 0; + } +} + +void drm_global_release(void) +{ + int i; + for (i = 0; i < DRM_GLOBAL_NUM; ++i) { + struct drm_global_item *item = &glob[i]; + BUG_ON(item->object != NULL); + BUG_ON(item->refcount != 0); + } +} + +int drm_global_item_ref(struct drm_global_reference *ref) +{ + int ret; + struct drm_global_item *item = &glob[ref->global_type]; + void *object; + + mutex_lock(&item->mutex); + if (item->refcount == 0) { + item->object = kmalloc(ref->size, GFP_KERNEL); + if (unlikely(item->object == NULL)) { + ret = -ENOMEM; + goto out_err; + } + + ref->object = item->object; + ret = ref->init(ref); + if (unlikely(ret != 0)) + goto out_err; + + ++item->refcount; + } + ref->object = item->object; + object = item->object; + mutex_unlock(&item->mutex); + return 0; + out_err: + kfree(item->object); + mutex_unlock(&item->mutex); + item->object = NULL; + return ret; +} + +EXPORT_SYMBOL(drm_global_item_ref); + +void drm_global_item_unref(struct drm_global_reference *ref) +{ + struct drm_global_item *item = &glob[ref->global_type]; + + mutex_lock(&item->mutex); + BUG_ON(item->refcount == 0); + BUG_ON(ref->object != item->object); + if (--item->refcount == 0) { + ref->release(ref); + kfree(item->object); + item->object = NULL; + } + mutex_unlock(&item->mutex); +} + +EXPORT_SYMBOL(drm_global_item_unref); diff --git a/drivers/gpu/drm/psb/Makefile b/drivers/gpu/drm/psb/Makefile new file mode 100644 index 0000000..67319ba --- /dev/null +++ b/drivers/gpu/drm/psb/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y := -Idrivers/gpu/drm/psb -Iinclude/drm -Iinclude/linux + +psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o \ + psb_buffer.o psb_gtt.o psb_schedule.o psb_scene.o \ + psb_reset.o psb_xhw.o psb_msvdx.o psb_bl.o psb_intel_bios.o\ + psb_umevents.o psb_hotplug.o psb_socket.o psb_dpst.o \ + psb_powermgmt.o lnc_topaz.o lnc_topazinit.o \ + psb_msvdxinit.o psb_ttm_glue.o psb_fb.o psb_setup.o \ + ttm/ttm_object.o ttm/ttm_lock.o ttm/ttm_fence_user.o \ + ttm/ttm_fence.o ttm/ttm_tt.o ttm/ttm_execbuf_util.o \ + ttm/ttm_bo.o ttm/ttm_bo_util.o ttm/ttm_placement_user.o \ + ttm/ttm_bo_vm.o ttm/ttm_pat_compat.o ttm/ttm_memory.o + +obj-$(CONFIG_DRM_PSB) += psb.o + diff --git a/drivers/gpu/drm/psb/lnc_topaz.c b/drivers/gpu/drm/psb/lnc_topaz.c new file mode 100644 index 0000000..adabac5 --- /dev/null +++ b/drivers/gpu/drm/psb/lnc_topaz.c @@ -0,0 +1,676 @@ +/** + * file lnc_topaz.c + * TOPAZ I/O operations and IRQ handling + * + */ + +/************************************************************************** + * + * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA + * Copyright (c) Imagination Technologies Limited, UK + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/* include headers */ +/* #define DRM_DEBUG_CODE 2 */ + +#include +#include + +#include "psb_drv.h" +#include "psb_drm.h" +#include "lnc_topaz.h" +#include "psb_powermgmt.h" + +#include +#include + + +/* static function define */ +static int lnc_topaz_deliver_command(struct drm_device *dev, + struct ttm_buffer_object *cmd_buffer, + unsigned long cmd_offset, + unsigned long cmd_size, + void **topaz_cmd, uint32_t sequence, + int copy_cmd); +static int lnc_topaz_send(struct drm_device *dev, void *cmd, + unsigned long cmd_size, uint32_t sync_seq); +static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd); +static int lnc_topaz_dequeue_send(struct drm_device *dev); +static int lnc_topaz_save_command(struct drm_device *dev, void *cmd, + unsigned long cmd_size, uint32_t sequence); + +void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + uint32_t clr_flag = lnc_topaz_queryirq(dev); + struct topaz_private *topaz_priv = dev_priv->topaz_private; + uint32_t cur_seq; + + lnc_topaz_clearirq(dev, clr_flag); + + /* ignore non-SYNC interrupts */ + if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0) + return; + + cur_seq = *(uint32_t *)topaz_priv->topaz_sync_addr; + + PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n", + cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]); + + psb_fence_handler(dev, LNC_ENGINE_ENCODE); + + /* save frame skip flag for query */ + topaz_priv->frame_skip = CCB_CTRL_FRAMESKIP(dev_priv); + + topaz_priv->topaz_busy = 1; + lnc_topaz_dequeue_send(dev); + + if (drm_topaz_pmpolicy == PSB_PMPOLICY_POWERDOWN) + schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0); +} + +static int lnc_submit_encode_cmdbuf(struct drm_device *dev, + struct ttm_buffer_object *cmd_buffer, + unsigned long cmd_offset, unsigned long cmd_size, + struct ttm_fence_object *fence) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + unsigned long irq_flags; + int ret = 0; + void *cmd; + uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE]; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + PSB_DEBUG_GENERAL("TOPAZ: command submit\n"); + + PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", topaz_priv->topaz_busy); + + if (topaz_priv->topaz_fw_loaded == 0) { + /* #.# load fw to driver */ + PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n"); + ret = topaz_init_fw(dev); + if (ret != 0) { + /* FIXME: find a proper return value */ + DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail," + "ensure udevd is configured correctly!\n"); + + return -EFAULT; + } + topaz_priv->topaz_fw_loaded = 1; + } + + /* # schedule watchdog */ + /* psb_schedule_watchdog(dev_priv); */ + + /* # spin lock irq save [msvdx_lock] */ + spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags); + + /* # if topaz need to reset, reset it */ + if (topaz_priv->topaz_needs_reset) { + /* #.# reset it */ + spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags); + PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n"); + + if (lnc_topaz_reset(dev_priv)) { + ret = -EBUSY; + DRM_ERROR("TOPAZ: reset failed.\n"); + return ret; + } + + PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n"); + + /* #.# upload firmware */ + if (topaz_setup_fw(dev, topaz_priv->topaz_cur_codec)) { + DRM_ERROR("TOPAZ: upload FW to HW failed\n"); + return -EBUSY; + } + + spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags); + } + + if (!topaz_priv->topaz_busy) { + /* # direct map topaz command if topaz is free */ + PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n", + sequence); + + topaz_priv->topaz_busy = 1; + spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags); + + ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset, + cmd_size, NULL, sequence, 0); + + if (ret) { + DRM_ERROR("TOPAZ: failed to extract cmd...\n"); + return ret; + } + } else { + PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n", + sequence); + cmd = NULL; + + spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags); + + ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset, + cmd_size, &cmd, sequence, 1); + if (cmd == NULL || ret) { + DRM_ERROR("TOPAZ: map command for save fialed\n"); + return ret; + } + + ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence); + if (ret) + DRM_ERROR("TOPAZ: save command failed\n"); + } + + return ret; +} + +static int lnc_topaz_save_command(struct drm_device *dev, void *cmd, + unsigned long cmd_size, uint32_t sequence) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct lnc_topaz_cmd_queue *topaz_cmd; + unsigned long irq_flags; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n", + sequence); + + topaz_cmd = kzalloc(sizeof(struct lnc_topaz_cmd_queue), + GFP_KERNEL); + if (topaz_cmd == NULL) { + mutex_unlock(&topaz_priv->topaz_mutex); + DRM_ERROR("TOPAZ: out of memory....\n"); + return -ENOMEM; + } + + topaz_cmd->cmd = cmd; + topaz_cmd->cmd_size = cmd_size; + topaz_cmd->sequence = sequence; + + spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags); + list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue); + if (!topaz_priv->topaz_busy) { + /* topaz_priv->topaz_busy = 1; */ + PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n"); + lnc_topaz_dequeue_send(dev); + PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n"); + } + + spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags); + + return 0; +} + + +int lnc_cmdbuf_video(struct drm_file *priv, + struct list_head *validate_list, + uint32_t fence_type, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct psb_ttm_fence_rep *fence_arg) +{ + struct drm_device *dev = priv->minor->dev; + struct ttm_fence_object *fence = NULL; + int ret; + + ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset, + arg->cmdbuf_size, fence); + if (ret) + return ret; + + /* workaround for interrupt issue */ + psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags, + validate_list, fence_arg, &fence); + + if (fence) + ttm_fence_object_unref(&fence); + + mutex_lock(&cmd_buffer->mutex); + if (cmd_buffer->sync_obj != NULL) + ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj); + mutex_unlock(&cmd_buffer->mutex); + + return 0; +} + +static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + uint32_t sync_cmd[3]; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + +#if 0 + struct ttm_fence_device *fdev = &dev_priv->fdev; + struct ttm_fence_class_manager *fc = + &fdev->fence_class[LNC_ENGINE_ENCODE]; + unsigned long irq_flags; +#endif +#if LNC_TOPAZ_NO_IRQ + uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr; + int count = 10000; + uint32_t cur_seq; +#endif + + /* insert a SYNC command here */ + topaz_priv->topaz_sync_cmd_seq = (1 << 15) | + topaz_priv->topaz_cmd_seq++; + sync_cmd[0] = 1 | (MTX_CMDID_SYNC << 1) | (3 << 8) | + (topaz_priv->topaz_sync_cmd_seq << 16); + sync_cmd[1] = topaz_priv->topaz_sync_offset; + sync_cmd[2] = sync_seq; + + PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x)," + "sync_seq (0x%08x)\n", + topaz_priv->topaz_sync_cmd_seq, sync_seq); + + lnc_mtx_send(dev_priv, sync_cmd); + +#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */ + /* # poll topaz register for certain times */ + while (count && *sync_p != sync_seq) { + DRM_UDELAY(100); + --count; + } + if ((count == 0) && (*sync_p != sync_seq)) { + DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n", + sync_seq, *sync_p); + return -EBUSY; + } + PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p); + + topaz_priv->topaz_busy = 0; + + /* XXX: check psb_fence_handler is suitable for topaz */ + cur_seq = *sync_p; +#if 0 + write_lock_irqsave(&fc->lock, irq_flags); + ttm_fence_handler(fdev, LNC_ENGINE_ENCODE, + cur_seq, + _PSB_FENCE_TYPE_EXE, 0); + write_unlock_irqrestore(&fc->lock, irq_flags); +#endif +#endif + return 0; +} + +int +lnc_topaz_deliver_command(struct drm_device *dev, + struct ttm_buffer_object *cmd_buffer, + unsigned long cmd_offset, unsigned long cmd_size, + void **topaz_cmd, uint32_t sequence, + int copy_cmd) +{ + unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK; + struct ttm_bo_kmap_obj cmd_kmap; + bool is_iomem; + int ret; + unsigned char *cmd_start, *tmp; + + ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, + &cmd_kmap); + if (ret) { + DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret); + return ret; + } + cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap, + &is_iomem) + cmd_page_offset; + + if (copy_cmd) { + PSB_DEBUG_GENERAL("TOPAZ: queue commands\n"); + tmp = kzalloc(cmd_size, GFP_KERNEL); + if (tmp == NULL) { + ret = -ENOMEM; + goto out; + } + memcpy(tmp, cmd_start, cmd_size); + *topaz_cmd = tmp; + } else { + PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n"); + ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence); + if (ret) { + DRM_ERROR("TOPAZ: commit commands failed.\n"); + ret = -EINVAL; + } + } + +out: + PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n", + cmd_size, sequence, copy_cmd); + + ttm_bo_kunmap(&cmd_kmap); + + return ret; +} + +int +lnc_topaz_send(struct drm_device *dev, void *cmd, + unsigned long cmd_size, uint32_t sync_seq) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + int ret = 0; + unsigned char *command = (unsigned char *) cmd; + struct topaz_cmd_header *cur_cmd_header; + uint32_t cur_cmd_size, cur_cmd_id; + uint32_t codec; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n"); + + while (cmd_size > 0) { + cur_cmd_header = (struct topaz_cmd_header *) command; + cur_cmd_size = cur_cmd_header->size * 4; + cur_cmd_id = cur_cmd_header->id; + + switch (cur_cmd_id) { + case MTX_CMDID_SW_NEW_CODEC: + codec = *((uint32_t *) cmd + 1); + + PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n", + codec_to_string(codec), codec); + if (topaz_setup_fw(dev, codec)) { + DRM_ERROR("TOPAZ: upload FW to HW failed\n"); + return -EBUSY; + } + + topaz_priv->topaz_cur_codec = codec; + break; + + case MTX_CMDID_SW_ENTER_LOWPOWER: + PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n"); + PSB_DEBUG_GENERAL("XXX: implement it\n"); + break; + + case MTX_CMDID_SW_LEAVE_LOWPOWER: + PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n"); + PSB_DEBUG_GENERAL("XXX: implement it\n"); + break; + + /* ordinary commmand */ + case MTX_CMDID_START_PIC: + /* XXX: specially handle START_PIC hw command */ + CCB_CTRL_SET_QP(dev_priv, + *(command + cur_cmd_size - 4)); + /* strip the QP parameter (it's software arg) */ + cur_cmd_header->size--; + default: + cur_cmd_header->seq = 0x7fff & + topaz_priv->topaz_cmd_seq++; + + PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d)," + " seq (0x%04x)\n", + cmd_to_string(cur_cmd_id), + cur_cmd_size, cur_cmd_header->seq); + ret = lnc_mtx_send(dev_priv, command); + if (ret) { + DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret); + goto out; + } + break; + } + + command += cur_cmd_size; + cmd_size -= cur_cmd_size; + } + lnc_topaz_sync(dev, sync_seq); +out: + return ret; +} + +static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd) +{ + struct topaz_cmd_header *cur_cmd_header = + (struct topaz_cmd_header *) cmd; + uint32_t cmd_size = cur_cmd_header->size; + uint32_t read_index, write_index; + const uint32_t *cmd_pointer = (uint32_t *) cmd; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + int ret = 0; + + /* # enable all clock */ + + write_index = topaz_priv->topaz_cmd_windex; + if (write_index + cmd_size + 1 > topaz_priv->topaz_ccb_size) { + int free_space = topaz_priv->topaz_ccb_size - write_index; + + PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n"); + if (free_space > 0) { + struct topaz_cmd_header pad_cmd; + + pad_cmd.id = MTX_CMDID_NULL; + pad_cmd.size = free_space; + pad_cmd.seq = 0x7fff & topaz_priv->topaz_cmd_seq; + + PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:" + " size(%d),seq (0x%04x)\n", + pad_cmd.size, pad_cmd.seq); + + TOPAZ_BEGIN_CCB(dev_priv); + TOPAZ_OUT_CCB(dev_priv, pad_cmd.val); + TOPAZ_END_CCB(dev_priv, 1); + + POLL_WB_SEQ(dev_priv, pad_cmd.seq); + ++topaz_priv->topaz_cmd_seq; + } + POLL_WB_RINDEX(dev_priv, 0); + if (ret == 0) + topaz_priv->topaz_cmd_windex = 0; + else { + DRM_ERROR("TOPAZ: poll rindex timeout\n"); + return ret; /* HW may hang, need reset */ + } + PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n"); + } + + read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */ + write_index = topaz_priv->topaz_cmd_windex; + + PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n", + write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv)); + TOPAZ_BEGIN_CCB(dev_priv); + while (cmd_size > 0) { + TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++); + --cmd_size; + } + TOPAZ_END_CCB(dev_priv, 1); + +#if 0 + DRM_UDELAY(1000); + lnc_topaz_clearirq(dev, + lnc_topaz_queryirq(dev)); + LNC_TRACEL("TOPAZ: after clear, query again\n"); + lnc_topaz_queryirq(dev_priv); +#endif + + return ret; +} + +int lnc_topaz_dequeue_send(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct lnc_topaz_cmd_queue *topaz_cmd = NULL; + int ret; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n"); + + if (list_empty(&topaz_priv->topaz_queue)) { + topaz_priv->topaz_busy = 0; + return 0; + } + + topaz_cmd = list_first_entry(&topaz_priv->topaz_queue, + struct lnc_topaz_cmd_queue, head); + + PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence); + ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size, + topaz_cmd->sequence); + if (ret) { + DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n"); + ret = -EINVAL; + } + + list_del(&topaz_cmd->head); + kfree(topaz_cmd->cmd); + kfree(topaz_cmd + ); + + return ret; +} + +void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count) +{ + PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count); + MTX_WRITE32(MTX_CR_MTX_KICK, kick_count); +} + +int lnc_check_topaz_idle(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + uint32_t reg_val; + + if (topaz_priv->topaz_busy) + return -EBUSY; + + MVEA_READ32(MVEA_CR_MVEA_BUSY, ®_val); + if (reg_val != 0) + return -EBUSY; + + MVEA_READ32(MVEA_CR_MVEA_DMACMDFIFO_WAIT, ®_val); + if (reg_val != 0) + return -EBUSY; + + MVEA_READ32(MVEA_CR_MVEA_DMACMDFIFO_STATUS, ®_val); + if ((reg_val & (1 << 8)) == 0) + return -EBUSY; + + return 0; /* we think it is idle */ +} + +int lnc_wait_topaz_idle(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct ttm_fence_device *fdev = &dev_priv->fdev; + struct ttm_fence_class_manager *fc = + &fdev->fence_class[LNC_ENGINE_ENCODE]; + struct ttm_fence_object *fence, *next; + unsigned long _end = jiffies + 5 * DRM_HZ; + int signaled = 0; + int ret = 0; + + /* Ensure that all pending IRQs are serviced, */ + /* + * Save the last MSVDX fence in dev_priv instead!!! + * Need to be fc->write_locked while accessing a fence from the ring. + */ + list_for_each_entry_safe(fence, next, &fc->ring, ring) { + do { + signaled = ttm_fence_object_signaled(fence, + _PSB_FENCE_TYPE_EXE); + if (signaled) + break; + if (time_after_eq(jiffies, _end)) { + PSB_DEBUG_PM("TOPAZIDLE: fence 0x%x didn't get" + "signaled for 3 secs\n", + (unsigned int) fence); + break; + } + DRM_UDELAY(1000); + } while (1); + } + + do { + ret = lnc_check_topaz_idle(dev); + if (ret == 0) + break; + + if (time_after_eq(jiffies, _end)) { + PSB_DEBUG_PM("TOPAZIDLE: wait HW idle time out\n"); + break; + } + DRM_UDELAY(1000); + } while (1); + + return ret; +} + +int lnc_video_frameskip(struct drm_device *dev, uint64_t user_pointer) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + int ret; + + ret = copy_to_user((void __user *) ((unsigned long)user_pointer), + &topaz_priv->frame_skip, sizeof(topaz_priv->frame_skip)); + + if (ret) + return -EFAULT; + + return 0; +} + +static void lnc_topaz_flush_cmd_queue(struct topaz_private *topaz_priv) +{ + struct lnc_topaz_cmd_queue *entry, *next; + + /* remind to reset topaz */ + topaz_priv->topaz_needs_reset = 1; + + if (list_empty(&topaz_priv->topaz_queue)) { + topaz_priv->topaz_busy = 0; + return; + } + + /* flush all command in queue */ + list_for_each_entry_safe(entry, next, + &topaz_priv->topaz_queue, + head) { + list_del(&entry->head); + kfree(entry->cmd); + kfree(entry); + } + + return; +} + +void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev) +{ + struct drm_psb_private *dev_priv = + container_of(fdev, struct drm_psb_private, fdev); + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + lnc_topaz_flush_cmd_queue(topaz_priv); +} + +inline int psb_try_power_down_topaz(struct drm_device *dev) +{ + return powermgmt_suspend_islands(dev->pdev, PSB_VIDEO_ENC_ISLAND, false); +} diff --git a/drivers/gpu/drm/psb/lnc_topaz.h b/drivers/gpu/drm/psb/lnc_topaz.h new file mode 100644 index 0000000..c48cab0 --- /dev/null +++ b/drivers/gpu/drm/psb/lnc_topaz.h @@ -0,0 +1,902 @@ +/************************************************************************** + * + * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA + * Copyright (c) Imagination Technologies Limited, UK + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef _LNC_TOPAZ_H_ +#define _LNC_TOPAZ_H_ + +#include "psb_drv.h" + +#define LNC_TOPAZ_NO_IRQ 0 +#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4) + +extern int drm_topaz_pmpolicy; + +/* + * MACROS to insert values into fields within a word. The basename of the + * field must have MASK_BASENAME and SHIFT_BASENAME constants. + */ +#define MM_WRITE32(base, offset, value) \ +do { \ + *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \ + + base + offset)) = value; \ +} while (0) + +#define MM_READ32(base, offset, pointer) \ +do { \ + *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\ + + base + offset)); \ +} while (0) + +#define F_MASK(basename) (MASK_##basename) +#define F_SHIFT(basename) (SHIFT_##basename) + +#define F_ENCODE(val, basename) \ + (((val) << (F_SHIFT(basename))) & (F_MASK(basename))) + +/* MVEA macro */ +#define MVEA_START 0x03000 + +#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value) +#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer); + +#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */ +#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */ +#define F_ENCODE_MVEA(val, basename) \ + (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename))) + +/* VLC macro */ +#define TOPAZ_VLC_START 0x05000 + +/* TOPAZ macro */ +#define TOPAZ_START 0x02000 + +#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value) +#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer) + +#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename) +#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename) +#define F_ENCODE_TOPAZ(val, basename) \ + (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename))) + +/* MTX macro */ +#define MTX_START 0x0 + +#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value) +#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer) + +/* DMAC macro */ +#define DMAC_START 0x0f000 + +#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value) +#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer) + +#define F_MASK_DMAC(basename) (MASK_DMAC_##basename) +#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename) +#define F_ENCODE_DMAC(val, basename) \ + (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename))) + + +/* Register CR_IMG_TOPAZ_INTENAB */ +#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008 +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008 + +#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008 + +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008 + +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008 + +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008 + +#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C + +#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004 +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004 + +#define MTX_CCBCTRL_ROFF 0 +#define MTX_CCBCTRL_COMPLETE 4 +#define MTX_CCBCTRL_CCBSIZE 8 +#define MTX_CCBCTRL_QP 12 +#define MTX_CCBCTRL_FRAMESKIP 20 +#define MTX_CCBCTRL_INITQP 24 + +#define TOPAZ_CR_MMU_STATUS 0x001C +#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001 +#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0 +#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C + +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C + +#define TOPAZ_CR_MMU_MEM_REQ 0x0020 +#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF +#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0 +#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020 + +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C + +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C + +#define MTX_CR_MTX_KICK 0x0080 +#define MASK_MTX_MTX_KICK 0x0000FFFF +#define SHIFT_MTX_MTX_KICK 0 +#define REGNUM_MTX_MTX_KICK 0x0080 + +#define MTX_DATA_MEM_BASE 0x82880000 + +#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108 +#define MASK_MTX_MTX_MCMR 0x00000001 +#define SHIFT_MTX_MTX_MCMR 0 +#define REGNUM_MTX_MTX_MCMR 0x0108 + +#define MASK_MTX_MTX_MCMID 0x0FF00000 +#define SHIFT_MTX_MTX_MCMID 20 +#define REGNUM_MTX_MTX_MCMID 0x0108 + +#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC +#define SHIFT_MTX_MTX_MCM_ADDR 2 +#define REGNUM_MTX_MTX_MCM_ADDR 0x0108 + +#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C +#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001 +#define SHIFT_MTX_MTX_MTX_MCM_STAT 0 +#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C + +#define MASK_MTX_MTX_MCMAI 0x00000002 +#define SHIFT_MTX_MTX_MCMAI 1 +#define REGNUM_MTX_MTX_MCMAI 0x0108 + +#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104 + +#define MVEA_CR_MVEA_BUSY 0x0018 +#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C +#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020 + +#define MVEA_CR_IMG_MVEA_SRST 0x0000 +#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001 +#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0 +#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000 + +#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002 +#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1 +#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000 + +#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004 +#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2 +#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000 + +#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008 +#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3 +#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000 + +#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010 +#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4 +#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000 + +#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020 +#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5 +#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000 + +#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0 +#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0 + +#define TOPAZ_MTX_PC (0x00000005) +#define PC_START_ADDRESS (0x80900000) + +#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014 +#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001 +#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0 +#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014 + +#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002 +#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1 +#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014 + +#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002 +#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1 +#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010 + +#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8 +#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC +#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000 +#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000 + +#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C + +#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004 +#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2 +#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C + +#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018 +#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3 +#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C + +#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108 + +#define TOPAZ_CR_MMU_CONTROL0 0x0024 +#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800 +#define SHIFT_TOPAZ_CR_MMU_BYPASS 11 +#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024 + +#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X))) +#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000 +#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12 +#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030 + +#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008 +#define SHIFT_TOPAZ_CR_MMU_INVALDC 3 +#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024 + +#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004 +#define SHIFT_TOPAZ_CR_MMU_FLUSH 2 +#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024 + +#define TOPAZ_CR_MMU_BANK_INDEX 0x0038 +#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2))) +#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2)) +#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038 + +#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010 +#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001 +#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0 +#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010 + +#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c +#define TXRPT_WAITONKICK_VALUE 0x8ade0000 + +#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002 + +#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000 +#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001 + +#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004 + +#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200 +#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001 + +#define MTX_CR_MTX_SYSC_CDMAA 0x0344 +#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC +#define SHIFT_MTX_CDMAA_ADDRESS 2 +#define REGNUM_MTX_CDMAA_ADDRESS 0x0344 + +#define MTX_CR_MTX_SYSC_CDMAC 0x0340 +#define MASK_MTX_LENGTH 0x0000FFFF +#define SHIFT_MTX_LENGTH 0 +#define REGNUM_MTX_LENGTH 0x0340 + +#define MASK_MTX_BURSTSIZE 0x07000000 +#define SHIFT_MTX_BURSTSIZE 24 +#define REGNUM_MTX_BURSTSIZE 0x0340 + +#define MASK_MTX_RNW 0x00020000 +#define SHIFT_MTX_RNW 17 +#define REGNUM_MTX_RNW 0x0340 + +#define MASK_MTX_ENABLE 0x00010000 +#define SHIFT_MTX_ENABLE 16 +#define REGNUM_MTX_ENABLE 0x0340 + +#define MASK_MTX_LENGTH 0x0000FFFF +#define SHIFT_MTX_LENGTH 0 +#define REGNUM_MTX_LENGTH 0x0340 + +#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000 +#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000 + +#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000 + +#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002 +#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1 +#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000 + +#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024 +#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001 +#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0 +#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024 + +#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002 +#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1 +#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024 + +#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004 +#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2 +#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024 + +#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008 +#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3 +#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024 + +#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040 +#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001 +#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0 +#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040 + +#define MTX_CR_MTX_SYSC_CDMAT 0x0350 +#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF +#define SHIFT_MTX_TRANSFERDATA 0 +#define REGNUM_MTX_TRANSFERDATA 0x0350 + +#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X))) +#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000 +#define SHIFT_IMG_SOC_TRANSFER_FIN 17 +#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C + +#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X))) +#define MASK_IMG_SOC_CNT 0x0000FFFF +#define SHIFT_IMG_SOC_CNT 0 +#define REGNUM_IMG_SOC_CNT 0x0004 + +#define MASK_IMG_SOC_EN 0x00010000 +#define SHIFT_IMG_SOC_EN 16 +#define REGNUM_IMG_SOC_EN 0x0004 + +#define MASK_IMG_SOC_LIST_EN 0x00040000 +#define SHIFT_IMG_SOC_LIST_EN 18 +#define REGNUM_IMG_SOC_LIST_EN 0x0004 + +#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X))) +#define MASK_IMG_SOC_PER_HOLD 0x0000007F +#define SHIFT_IMG_SOC_PER_HOLD 0 +#define REGNUM_IMG_SOC_PER_HOLD 0x0018 + +#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X))) +#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF +#define SHIFT_IMG_SOC_START_ADDRESS 0 +#define REGNUM_IMG_SOC_START_ADDRESS 0x0000 + +#define MASK_IMG_SOC_BSWAP 0x40000000 +#define SHIFT_IMG_SOC_BSWAP 30 +#define REGNUM_IMG_SOC_BSWAP 0x0004 + +#define MASK_IMG_SOC_PW 0x18000000 +#define SHIFT_IMG_SOC_PW 27 +#define REGNUM_IMG_SOC_PW 0x0004 + +#define MASK_IMG_SOC_DIR 0x04000000 +#define SHIFT_IMG_SOC_DIR 26 +#define REGNUM_IMG_SOC_DIR 0x0004 + +#define MASK_IMG_SOC_PI 0x03000000 +#define SHIFT_IMG_SOC_PI 24 +#define REGNUM_IMG_SOC_PI 0x0004 +#define IMG_SOC_PI_1 0x00000002 +#define IMG_SOC_PI_2 0x00000001 +#define IMG_SOC_PI_4 0x00000000 + +#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000 +#define SHIFT_IMG_SOC_TRANSFER_IEN 29 +#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004 + +#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \ + ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \ + (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \ + (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \ + (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \ + (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT)) + +#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X))) +#define MASK_IMG_SOC_EXT_SA 0x0000000F +#define SHIFT_IMG_SOC_EXT_SA 0 +#define REGNUM_IMG_SOC_EXT_SA 0x0008 + +#define MASK_IMG_SOC_ACC_DEL 0xE0000000 +#define SHIFT_IMG_SOC_ACC_DEL 29 +#define REGNUM_IMG_SOC_ACC_DEL 0x0008 + +#define MASK_IMG_SOC_INCR 0x08000000 +#define SHIFT_IMG_SOC_INCR 27 +#define REGNUM_IMG_SOC_INCR 0x0008 + +#define MASK_IMG_SOC_BURST 0x07000000 +#define SHIFT_IMG_SOC_BURST 24 +#define REGNUM_IMG_SOC_BURST 0x0008 + +#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \ +((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \ +(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \ +(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST)) + +#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X))) +#define MASK_IMG_SOC_ADDR 0x007FFFFF +#define SHIFT_IMG_SOC_ADDR 0 +#define REGNUM_IMG_SOC_ADDR 0x0014 + +/* **************** DMAC define **************** */ +enum DMAC_eBSwap { + DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */ + DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */ +}; + +enum DMAC_ePW { + DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */ + DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */ + DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */ +}; + +enum DMAC_eAccDel { + DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */ + DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */ + DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */ + DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */ + DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */ + DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */ + DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */ + DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */ +}; + +enum DMAC_eBurst { + DMAC_BURST_0 = 0x0, /* !< burst size of 0 */ + DMAC_BURST_1 = 0x1, /* !< burst size of 1 */ + DMAC_BURST_2 = 0x2, /* !< burst size of 2 */ + DMAC_BURST_3 = 0x3, /* !< burst size of 3 */ + DMAC_BURST_4 = 0x4, /* !< burst size of 4 */ + DMAC_BURST_5 = 0x5, /* !< burst size of 5 */ + DMAC_BURST_6 = 0x6, /* !< burst size of 6 */ + DMAC_BURST_7 = 0x7, /* !< burst size of 7 */ +}; + +/* commands for topaz,shared with user space driver */ +enum drm_lnc_topaz_cmd { + MTX_CMDID_NULL = 0, + MTX_CMDID_DO_HEADER = 1, + MTX_CMDID_ENCODE_SLICE = 2, + MTX_CMDID_WRITEREG = 3, + MTX_CMDID_START_PIC = 4, + MTX_CMDID_END_PIC = 5, + MTX_CMDID_SYNC = 6, + MTX_CMDID_ENCODE_ONE_ROW = 7, + MTX_CMDID_FLUSH = 8, + MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c, + MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e, + MTX_CMDID_SW_NEW_CODEC = 0x7f +}; + +/* codecs topaz supports,shared with user space driver */ +enum drm_lnc_topaz_codec { + IMG_CODEC_JPEG = 0, + IMG_CODEC_H264_NO_RC, + IMG_CODEC_H264_VBR, + IMG_CODEC_H264_CBR, + IMG_CODEC_H263_NO_RC, + IMG_CODEC_H263_VBR, + IMG_CODEC_H263_CBR, + IMG_CODEC_MPEG4_NO_RC, + IMG_CODEC_MPEG4_VBR, + IMG_CODEC_MPEG4_CBR, + IMG_CODEC_NUM +}; + +/* XXX: it's a copy of msvdx cmd queue. should have some change? */ +struct lnc_topaz_cmd_queue { + struct list_head head; + void *cmd; + unsigned long cmd_size; + uint32_t sequence; +}; + + +struct topaz_cmd_header { + union { + struct { + unsigned long enable_interrupt:1; + unsigned long id:7; + unsigned long size:8; + unsigned long seq:16; + }; + uint32_t val; + }; +}; + +/* define structure */ +/* firmware file's info head */ +struct topaz_fwinfo { + unsigned int ver:16; + unsigned int codec:16; + + unsigned int text_size; + unsigned int data_size; + unsigned int data_location; +}; + +/* firmware data array define */ +struct topaz_codec_fw { + uint32_t ver; + uint32_t codec; + + uint32_t text_size; + uint32_t data_size; + uint32_t data_location; + + struct ttm_buffer_object *text; + struct ttm_buffer_object *data; +}; + +struct topaz_private { + /* current video task */ + unsigned int pmstate; + struct sysfs_dirent *sysfs_pmstate; + int frame_skip; + + void *topaz_mtx_reg_state; + struct ttm_buffer_object *topaz_mtx_data_mem; + uint32_t topaz_cur_codec; + uint32_t cur_mtx_data_size; + int topaz_needs_reset; + + /* + *topaz command queue + */ + spinlock_t topaz_lock; + struct mutex topaz_mutex; + struct list_head topaz_queue; + int topaz_busy; /* 0 means topaz is free */ + int topaz_fw_loaded; + + /* topaz ccb data */ + /* XXX: should the addr stored by 32 bits? more compatible way?? */ + uint32_t topaz_ccb_buffer_addr; + uint32_t topaz_ccb_ctrl_addr; + uint32_t topaz_ccb_size; + uint32_t topaz_cmd_windex; + uint16_t topaz_cmd_seq; + + uint32_t stored_initial_qp; + uint32_t topaz_dash_access_ctrl; + + struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */ + struct ttm_bo_kmap_obj topaz_bo_kmap; + void *topaz_ccb_wb; + uint32_t topaz_wb_offset; + uint32_t *topaz_sync_addr; + uint32_t topaz_sync_offset; + uint32_t topaz_sync_cmd_seq; + uint32_t topaz_mtx_saved; + + /* firmware */ + struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM]; +}; + +/* external function declare */ +/* lnc_topazinit.c */ +int lnc_topaz_init(struct drm_device *dev); +int lnc_topaz_uninit(struct drm_device *dev); +int lnc_topaz_reset(struct drm_psb_private *dev_priv); +int topaz_init_fw(struct drm_device *dev); +int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec); +int topaz_wait_for_register(struct drm_psb_private *dev_priv, + uint32_t addr, uint32_t value, + uint32_t enable); +void topaz_write_mtx_mem(struct drm_psb_private *dev_priv, + uint32_t byte_addr, uint32_t val); +uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv, + uint32_t byte_addr); +void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv, + uint32_t addr); +void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv, + uint32_t val); +void topaz_mmu_flushcache(struct drm_psb_private *dev_priv); +int lnc_topaz_save_mtx_state(struct drm_device *dev); +int lnc_topaz_restore_mtx_state(struct drm_device *dev); + +/* lnc_topaz.c */ +void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat); + +int lnc_cmdbuf_video(struct drm_file *priv, + struct list_head *validate_list, + uint32_t fence_type, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct psb_ttm_fence_rep *fence_arg); + +void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout); +void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev); + +uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver); +int lnc_wait_topaz_idle(struct drm_device *dev); +int lnc_check_topaz_idle(struct drm_device *dev); + +/* macros to get/set CCB control data */ +#define WB_CCB_CTRL_RINDEX(dev_priv) \ +(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb)) + +#define WB_CCB_CTRL_SEQ(dev_priv) \ +(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb\ + + 1)) + +#define POLL_WB_RINDEX(dev_priv, value) \ +do { \ + int i; \ + for (i = 0; i < 10000; i++) { \ + if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \ + break; \ + else \ + DRM_UDELAY(100); \ + } \ + if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \ + DRM_ERROR("TOPAZ: poll rindex timeout\n"); \ + ret = -EBUSY; \ + } \ +} while (0) + +#define POLL_WB_SEQ(dev_priv, value) \ +do { \ + int i; \ + for (i = 0; i < 10000; i++) { \ + if (CCB_CTRL_SEQ(dev_priv) == value) \ + break; \ + else \ + DRM_UDELAY(1000); \ + } \ + if (CCB_CTRL_SEQ(dev_priv) != value) { \ + DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%08x(mtx) vs 0x%08x\n",\ + WB_CCB_CTRL_SEQ(dev_priv), value); \ + ret = -EBUSY; \ + } \ +} while (0) + +#define CCB_CTRL_RINDEX(dev_priv) \ + topaz_read_mtx_mem(dev_priv, \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \ + + MTX_CCBCTRL_ROFF) + +#define CCB_CTRL_RINDEX(dev_priv) \ + topaz_read_mtx_mem(dev_priv, \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \ + + MTX_CCBCTRL_ROFF) + +#define CCB_CTRL_QP(dev_priv) \ + topaz_read_mtx_mem(dev_priv, \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \ + + MTX_CCBCTRL_QP) + +#define CCB_CTRL_SEQ(dev_priv) \ + topaz_read_mtx_mem(dev_priv, \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \ + + MTX_CCBCTRL_COMPLETE) + +#define CCB_CTRL_FRAMESKIP(dev_priv) \ + topaz_read_mtx_mem(dev_priv, \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \ + + MTX_CCBCTRL_FRAMESKIP) + +#define CCB_CTRL_SET_QP(dev_priv, qp) \ + topaz_write_mtx_mem(dev_priv, \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \ + + MTX_CCBCTRL_QP, qp) + +#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \ + topaz_write_mtx_mem(dev_priv, \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \ + + MTX_CCBCTRL_INITQP, qp) + + +#define TOPAZ_BEGIN_CCB(dev_priv) \ + topaz_write_mtx_mem_multiple_setup(dev_priv, \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_buffer_addr + \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex * 4) + +#define TOPAZ_OUT_CCB(dev_priv, cmd) \ +do { \ + topaz_write_mtx_mem_multiple(dev_priv, cmd); \ + ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex++; \ +} while (0) + +#define TOPAZ_END_CCB(dev_priv, kick_count) \ + topaz_mtx_kick(dev_priv, 1); + +static inline char *cmd_to_string(int cmd_id) +{ + switch (cmd_id) { + case MTX_CMDID_START_PIC: + return "MTX_CMDID_START_PIC"; + case MTX_CMDID_END_PIC: + return "MTX_CMDID_END_PIC"; + case MTX_CMDID_DO_HEADER: + return "MTX_CMDID_DO_HEADER"; + case MTX_CMDID_ENCODE_SLICE: + return "MTX_CMDID_ENCODE_SLICE"; + case MTX_CMDID_SYNC: + return "MTX_CMDID_SYNC"; + + default: + return "Undefined command"; + + } +} + +static inline char *codec_to_string(int codec) +{ + switch (codec) { + case IMG_CODEC_H264_NO_RC: + return "H264_NO_RC"; + case IMG_CODEC_H264_VBR: + return "H264_VBR"; + case IMG_CODEC_H264_CBR: + return "H264_CBR"; + case IMG_CODEC_H263_NO_RC: + return "H263_NO_RC"; + case IMG_CODEC_H263_VBR: + return "H263_VBR"; + case IMG_CODEC_H263_CBR: + return "H263_CBR"; + case IMG_CODEC_MPEG4_NO_RC: + return "MPEG4_NO_RC"; + case IMG_CODEC_MPEG4_VBR: + return "MPEG4_VBR"; + case IMG_CODEC_MPEG4_CBR: + return "MPEG4_CBR"; + default: + return "Undefined codec"; + } +} + + +static inline void lnc_topaz_enableirq(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + /* uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG; */ + + PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n"); + + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) | + /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */ + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) | + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) | + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT)); + + /* write in psb_irq.c */ + /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */ +} + +static inline void lnc_topaz_disableirq(struct drm_device *dev) +{ + + struct drm_psb_private *dev_priv = dev->dev_private; + /* uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG); */ + + PSB_DEBUG_INIT("TOPAZ: disable IRQ\n"); + + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0); + + /* write in psb_irq.c */ + /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */ +} + + +static inline void lnc_topaz_clearirq(struct drm_device *dev, + uint32_t clear_topaz) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + + PSB_DEBUG_INIT("TOPAZ: clear IRQ\n"); + if (clear_topaz != 0) + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz); + + /* PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */ +} + +static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + uint32_t val, /* iir, */ clear = 0; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val); + /* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */ + + (void) topaz_priv; + + if ((val == 0) /* && (iir == 0) */) {/* no interrupt */ + PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n"); + return 0; + } + + PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x\n", val); + + if (val & (1<<31)) + PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x," + "sync seq: 0x%08x vs 0x%08x (MTX)\n", + CCB_CTRL_SEQ(dev_priv), + dev_priv->sequence[LNC_ENGINE_ENCODE], + *(uint32_t *)topaz_priv->topaz_sync_addr); + else + PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x," + "sync seq: 0x%08x vs 0x%08x (MTX)\n", + CCB_CTRL_SEQ(dev_priv), + dev_priv->sequence[LNC_ENGINE_ENCODE], + *(uint32_t *)topaz_priv->topaz_sync_addr); + + if (val & 0x8) { + uint32_t mmu_status, mmu_req; + + TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status); + TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req); + + PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, " + "address=0x%08x,mem req=0x%08x\n", + mmu_status, mmu_req); + clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT); + } + + if (val & 0x4) { + PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n"); + clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT); + } + + if (val & 0x2) { + PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n"); + clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX); + } + + if (val & 0x1) { + PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n"); + clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA); + } + + return clear; +} + + +#define TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state) \ +do { \ + topaz_priv->pmstate = new_state; \ + sysfs_notify_dirent(topaz_priv->sysfs_pmstate); \ + PSB_DEBUG_PM("TOPAZ: %s\n", \ + (new_state == PSB_PMSTATE_POWERUP) ? "powerup" \ + : ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown" \ + : "clockgated")); \ +} while (0) + +#endif /* _LNC_TOPAZ_H_ */ diff --git a/drivers/gpu/drm/psb/lnc_topazinit.c b/drivers/gpu/drm/psb/lnc_topazinit.c new file mode 100644 index 0000000..2e8365c --- /dev/null +++ b/drivers/gpu/drm/psb/lnc_topazinit.c @@ -0,0 +1,2058 @@ +/** + * file lnc_topazinit.c + * TOPAZ initialization and mtx-firmware upload + * + */ + +/************************************************************************** + * + * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA + * Copyright (c) Imagination Technologies Limited, UK + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/* NOTE: (READ BEFORE REFINE CODE) + * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size + * measured by word to DMAC. + * + * + * + */ + +/* include headers */ + +/* #define DRM_DEBUG_CODE 2 */ + +#include + +#include +#include + +#include "psb_drv.h" +#include "lnc_topaz.h" +#include "psb_powermgmt.h" + +/* WARNING: this define is very important */ +#define RAM_SIZE (1024 * 24) + +/* register default values + * THIS HEADER IS ONLY INCLUDE ONCE*/ +static unsigned long topaz_default_regs[183][3] = { + {MVEA_START, 0x00000000, 0x00000000}, + {MVEA_START, 0x00000004, 0x00000400}, + {MVEA_START, 0x00000008, 0x00000000}, + {MVEA_START, 0x0000000C, 0x00000000}, + {MVEA_START, 0x00000010, 0x00000000}, + {MVEA_START, 0x00000014, 0x00000000}, + {MVEA_START, 0x00000018, 0x00000000}, + {MVEA_START, 0x0000001C, 0x00000000}, + {MVEA_START, 0x00000020, 0x00000120}, + {MVEA_START, 0x00000024, 0x00000000}, + {MVEA_START, 0x00000028, 0x00000000}, + {MVEA_START, 0x00000100, 0x00000000}, + {MVEA_START, 0x00000104, 0x00000000}, + {MVEA_START, 0x00000108, 0x00000000}, + {MVEA_START, 0x0000010C, 0x00000000}, + {MVEA_START, 0x0000011C, 0x00000001}, + {MVEA_START, 0x0000012C, 0x00000000}, + {MVEA_START, 0x00000180, 0x00000000}, + {MVEA_START, 0x00000184, 0x00000000}, + {MVEA_START, 0x00000188, 0x00000000}, + {MVEA_START, 0x0000018C, 0x00000000}, + {MVEA_START, 0x00000190, 0x00000000}, + {MVEA_START, 0x00000194, 0x00000000}, + {MVEA_START, 0x00000198, 0x00000000}, + {MVEA_START, 0x0000019C, 0x00000000}, + {MVEA_START, 0x000001A0, 0x00000000}, + {MVEA_START, 0x000001A4, 0x00000000}, + {MVEA_START, 0x000001A8, 0x00000000}, + {MVEA_START, 0x000001AC, 0x00000000}, + {MVEA_START, 0x000001B0, 0x00000000}, + {MVEA_START, 0x000001B4, 0x00000000}, + {MVEA_START, 0x000001B8, 0x00000000}, + {MVEA_START, 0x000001BC, 0x00000000}, + {MVEA_START, 0x000001F8, 0x00000000}, + {MVEA_START, 0x000001FC, 0x00000000}, + {MVEA_START, 0x00000200, 0x00000000}, + {MVEA_START, 0x00000204, 0x00000000}, + {MVEA_START, 0x00000208, 0x00000000}, + {MVEA_START, 0x0000020C, 0x00000000}, + {MVEA_START, 0x00000210, 0x00000000}, + {MVEA_START, 0x00000220, 0x00000001}, + {MVEA_START, 0x00000224, 0x0000001F}, + {MVEA_START, 0x00000228, 0x00000100}, + {MVEA_START, 0x0000022C, 0x00001F00}, + {MVEA_START, 0x00000230, 0x00000101}, + {MVEA_START, 0x00000234, 0x00001F1F}, + {MVEA_START, 0x00000238, 0x00001F01}, + {MVEA_START, 0x0000023C, 0x0000011F}, + {MVEA_START, 0x00000240, 0x00000200}, + {MVEA_START, 0x00000244, 0x00001E00}, + {MVEA_START, 0x00000248, 0x00000002}, + {MVEA_START, 0x0000024C, 0x0000001E}, + {MVEA_START, 0x00000250, 0x00000003}, + {MVEA_START, 0x00000254, 0x0000001D}, + {MVEA_START, 0x00000258, 0x00001F02}, + {MVEA_START, 0x0000025C, 0x00000102}, + {MVEA_START, 0x00000260, 0x0000011E}, + {MVEA_START, 0x00000264, 0x00000000}, + {MVEA_START, 0x00000268, 0x00000000}, + {MVEA_START, 0x0000026C, 0x00000000}, + {MVEA_START, 0x00000270, 0x00000000}, + {MVEA_START, 0x00000274, 0x00000000}, + {MVEA_START, 0x00000278, 0x00000000}, + {MVEA_START, 0x00000280, 0x00008000}, + {MVEA_START, 0x00000284, 0x00000000}, + {MVEA_START, 0x00000288, 0x00000000}, + {MVEA_START, 0x0000028C, 0x00000000}, + {MVEA_START, 0x00000314, 0x00000000}, + {MVEA_START, 0x00000318, 0x00000000}, + {MVEA_START, 0x0000031C, 0x00000000}, + {MVEA_START, 0x00000320, 0x00000000}, + {MVEA_START, 0x00000324, 0x00000000}, + {MVEA_START, 0x00000348, 0x00000000}, + {MVEA_START, 0x00000380, 0x00000000}, + {MVEA_START, 0x00000384, 0x00000000}, + {MVEA_START, 0x00000388, 0x00000000}, + {MVEA_START, 0x0000038C, 0x00000000}, + {MVEA_START, 0x00000390, 0x00000000}, + {MVEA_START, 0x00000394, 0x00000000}, + {MVEA_START, 0x00000398, 0x00000000}, + {MVEA_START, 0x0000039C, 0x00000000}, + {MVEA_START, 0x000003A0, 0x00000000}, + {MVEA_START, 0x000003A4, 0x00000000}, + {MVEA_START, 0x000003A8, 0x00000000}, + {MVEA_START, 0x000003B0, 0x00000000}, + {MVEA_START, 0x000003B4, 0x00000000}, + {MVEA_START, 0x000003B8, 0x00000000}, + {MVEA_START, 0x000003BC, 0x00000000}, + {MVEA_START, 0x000003D4, 0x00000000}, + {MVEA_START, 0x000003D8, 0x00000000}, + {MVEA_START, 0x000003DC, 0x00000000}, + {MVEA_START, 0x000003E0, 0x00000000}, + {MVEA_START, 0x000003E4, 0x00000000}, + {MVEA_START, 0x000003EC, 0x00000000}, + {MVEA_START, 0x000002D0, 0x00000000}, + {MVEA_START, 0x000002D4, 0x00000000}, + {MVEA_START, 0x000002D8, 0x00000000}, + {MVEA_START, 0x000002DC, 0x00000000}, + {MVEA_START, 0x000002E0, 0x00000000}, + {MVEA_START, 0x000002E4, 0x00000000}, + {MVEA_START, 0x000002E8, 0x00000000}, + {MVEA_START, 0x000002EC, 0x00000000}, + {MVEA_START, 0x000002F0, 0x00000000}, + {MVEA_START, 0x000002F4, 0x00000000}, + {MVEA_START, 0x000002F8, 0x00000000}, + {MVEA_START, 0x000002FC, 0x00000000}, + {MVEA_START, 0x00000300, 0x00000000}, + {MVEA_START, 0x00000304, 0x00000000}, + {MVEA_START, 0x00000308, 0x00000000}, + {MVEA_START, 0x0000030C, 0x00000000}, + {MVEA_START, 0x00000290, 0x00000000}, + {MVEA_START, 0x00000294, 0x00000000}, + {MVEA_START, 0x00000298, 0x00000000}, + {MVEA_START, 0x0000029C, 0x00000000}, + {MVEA_START, 0x000002A0, 0x00000000}, + {MVEA_START, 0x000002A4, 0x00000000}, + {MVEA_START, 0x000002A8, 0x00000000}, + {MVEA_START, 0x000002AC, 0x00000000}, + {MVEA_START, 0x000002B0, 0x00000000}, + {MVEA_START, 0x000002B4, 0x00000000}, + {MVEA_START, 0x000002B8, 0x00000000}, + {MVEA_START, 0x000002BC, 0x00000000}, + {MVEA_START, 0x000002C0, 0x00000000}, + {MVEA_START, 0x000002C4, 0x00000000}, + {MVEA_START, 0x000002C8, 0x00000000}, + {MVEA_START, 0x000002CC, 0x00000000}, + {MVEA_START, 0x00000080, 0x00000000}, + {MVEA_START, 0x00000084, 0x80705700}, + {MVEA_START, 0x00000088, 0x00000000}, + {MVEA_START, 0x0000008C, 0x00000000}, + {MVEA_START, 0x00000090, 0x00000000}, + {MVEA_START, 0x00000094, 0x00000000}, + {MVEA_START, 0x00000098, 0x00000000}, + {MVEA_START, 0x0000009C, 0x00000000}, + {MVEA_START, 0x000000A0, 0x00000000}, + {MVEA_START, 0x000000A4, 0x00000000}, + {MVEA_START, 0x000000A8, 0x00000000}, + {MVEA_START, 0x000000AC, 0x00000000}, + {MVEA_START, 0x000000B0, 0x00000000}, + {MVEA_START, 0x000000B4, 0x00000000}, + {MVEA_START, 0x000000B8, 0x00000000}, + {MVEA_START, 0x000000BC, 0x00000000}, + {MVEA_START, 0x000000C0, 0x00000000}, + {MVEA_START, 0x000000C4, 0x00000000}, + {MVEA_START, 0x000000C8, 0x00000000}, + {MVEA_START, 0x000000CC, 0x00000000}, + {MVEA_START, 0x000000D0, 0x00000000}, + {MVEA_START, 0x000000D4, 0x00000000}, + {MVEA_START, 0x000000D8, 0x00000000}, + {MVEA_START, 0x000000DC, 0x00000000}, + {MVEA_START, 0x000000E0, 0x00000000}, + {MVEA_START, 0x000000E4, 0x00000000}, + {MVEA_START, 0x000000E8, 0x00000000}, + {MVEA_START, 0x000000EC, 0x00000000}, + {MVEA_START, 0x000000F0, 0x00000000}, + {MVEA_START, 0x000000F4, 0x00000000}, + {MVEA_START, 0x000000F8, 0x00000000}, + {MVEA_START, 0x000000FC, 0x00000000}, + {TOPAZ_VLC_START, 0x00000000, 0x00000000}, + {TOPAZ_VLC_START, 0x00000004, 0x00000000}, + {TOPAZ_VLC_START, 0x00000008, 0x00000000}, + {TOPAZ_VLC_START, 0x0000000C, 0x00000000}, + {TOPAZ_VLC_START, 0x00000010, 0x00000000}, + {TOPAZ_VLC_START, 0x00000014, 0x00000000}, + {TOPAZ_VLC_START, 0x0000001C, 0x00000000}, + {TOPAZ_VLC_START, 0x00000020, 0x00000000}, + {TOPAZ_VLC_START, 0x00000024, 0x00000000}, + {TOPAZ_VLC_START, 0x0000002C, 0x00000000}, + {TOPAZ_VLC_START, 0x00000034, 0x00000000}, + {TOPAZ_VLC_START, 0x00000038, 0x00000000}, + {TOPAZ_VLC_START, 0x0000003C, 0x00000000}, + {TOPAZ_VLC_START, 0x00000040, 0x00000000}, + {TOPAZ_VLC_START, 0x00000044, 0x00000000}, + {TOPAZ_VLC_START, 0x00000048, 0x00000000}, + {TOPAZ_VLC_START, 0x0000004C, 0x00000000}, + {TOPAZ_VLC_START, 0x00000050, 0x00000000}, + {TOPAZ_VLC_START, 0x00000054, 0x00000000}, + {TOPAZ_VLC_START, 0x00000058, 0x00000000}, + {TOPAZ_VLC_START, 0x0000005C, 0x00000000}, + {TOPAZ_VLC_START, 0x00000060, 0x00000000}, + {TOPAZ_VLC_START, 0x00000064, 0x00000000}, + {TOPAZ_VLC_START, 0x00000068, 0x00000000}, + {TOPAZ_VLC_START, 0x0000006C, 0x00000000} +}; + +#define FIRMWARE_NAME "topaz_fw.bin" + +/* static function define */ +static int topaz_upload_fw(struct drm_device *dev, + enum drm_lnc_topaz_codec codec); +static inline void topaz_set_default_regs(struct drm_psb_private + *dev_priv); + +#define UPLOAD_FW_BY_DMA 1 + +#if UPLOAD_FW_BY_DMA +static void topaz_dma_transfer(struct drm_psb_private *dev_priv, + uint32_t channel, uint32_t src_phy_addr, + uint32_t offset, uint32_t dst_addr, + uint32_t byte_num, uint32_t is_increment, + uint32_t is_write); +#else +static void topaz_mtx_upload_by_register(struct drm_device *dev, + uint32_t mtx_mem, uint32_t addr, + uint32_t size, + struct ttm_buffer_object *buf); +#endif + +static void topaz_write_core_reg(struct drm_psb_private *dev_priv, + uint32_t reg, const uint32_t val); +static void topaz_read_core_reg(struct drm_psb_private *dev_priv, + uint32_t reg, uint32_t *ret_val); +static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv); +static void release_mtx_control_from_dash(struct drm_psb_private + *dev_priv); +static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv); +static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, + uint32_t size); +static void mtx_dma_write(struct drm_device *dev); + + +#define DEBUG_FUNCTION 0 + +#if DEBUG_FUNCTION +static int topaz_test_null(struct drm_device *dev, uint32_t seq); +static int topaz_test_sync(struct drm_device *dev, uint32_t seq, + uint32_t sync_seq); +static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value); +static void topaz_save_default_regs(struct drm_psb_private *dev_priv, + uint32_t *data); +static void topaz_restore_default_regs(struct drm_psb_private *dev_priv, + uint32_t *data); +static int topaz_test_sync_manual_alloc_page(struct drm_device *dev, + uint32_t seq, + uint32_t sync_seq, + uint32_t offset); +static int topaz_test_sync_tt_test(struct drm_device *dev, + uint32_t seq, + uint32_t sync_seq); +#endif + +uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv, + uint32_t byte_addr) +{ + uint32_t read_val; + uint32_t reg, bank_size, ram_bank_size, ram_id; + + TOPAZ_READ32(0x3c, ®); + reg = 0x0a0a0606; + bank_size = (reg & 0xF0000) >> 16; + + ram_bank_size = (uint32_t) (1 << (bank_size + 2)); + ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size; + + MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL, + F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) | + F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) | + F_ENCODE(1, MTX_MTX_MCMR)); + + /* ?? poll this reg? */ + topaz_wait_for_register(dev_priv, + MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS, + 1, 1); + + MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val); + + return read_val; +} + +void topaz_write_mtx_mem(struct drm_psb_private *dev_priv, + uint32_t byte_addr, uint32_t val) +{ + uint32_t ram_id = 0; + uint32_t reg, bank_size, ram_bank_size; + + TOPAZ_READ32(0x3c, ®); + + /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */ + reg = 0x0a0a0606; + + bank_size = (reg & 0xF0000) >> 16; + + ram_bank_size = (uint32_t) (1 << (bank_size + 2)); + ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size; + + MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL, + F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) | + F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR)); + + MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val); + + /* ?? poll this reg? */ + topaz_wait_for_register(dev_priv, + MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS, + 1, 1); + + return; +} + +void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv, + uint32_t byte_addr) +{ + uint32_t ram_id = 0; + uint32_t reg, bank_size, ram_bank_size; + + TOPAZ_READ32(0x3c, ®); + + reg = 0x0a0a0606; + + bank_size = (reg & 0xF0000) >> 16; + + ram_bank_size = (uint32_t) (1 << (bank_size + 2)); + ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size; + + MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL, + F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) | + F_ENCODE(1, MTX_MTX_MCMAI) | + F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR)); +} + +void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv, + uint32_t val) +{ + MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val); +} + + +int topaz_wait_for_register(struct drm_psb_private *dev_priv, + uint32_t addr, uint32_t value, uint32_t mask) +{ + uint32_t tmp; + uint32_t count = 10000; + + /* # poll topaz register for certain times */ + while (count) { + /* #.# read */ + MM_READ32(addr, 0, &tmp); + + if (value == (tmp & mask)) + return 0; + + /* #.# delay and loop */ + DRM_UDELAY(100); + --count; + } + + /* # now waiting is timeout, return 1 indicat failed */ + /* XXX: testsuit means a timeout 10000 */ + + DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), " + "actual 0x%08x (0x%08x & 0x%08x)\n", + addr, value, tmp & mask, tmp, mask); + + return -EBUSY; + +} + +static ssize_t psb_topaz_pmstate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct drm_psb_private *dev_priv; + struct topaz_private *topaz_priv; + unsigned int pmstate; + unsigned long flags; + int ret = -EINVAL; + + if (drm_dev == NULL) + return 0; + + dev_priv = drm_dev->dev_private; + topaz_priv = dev_priv->topaz_private; + pmstate = topaz_priv->pmstate; + + pmstate = topaz_priv->pmstate; + spin_lock_irqsave(&topaz_priv->topaz_lock, flags); + ret = sprintf(buf, "%s\n", + (pmstate == PSB_PMSTATE_POWERUP) ? "powerup" + : ((pmstate == PSB_PMSTATE_POWERDOWN) ? "powerdown" + : "clockgated")); + spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags); + + return ret; +} + +static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL); + + +/* this function finish the first part of initialization, the rest + * should be done in topaz_setup_fw + */ +int lnc_topaz_init(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->bdev; + uint32_t core_id, core_rev; + int ret = 0, n; + bool is_iomem; + struct topaz_private *topaz_priv; + void *topaz_bo_virt; + + PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n"); + topaz_priv = kmalloc(sizeof(struct topaz_private), GFP_KERNEL); + if (topaz_priv == NULL) + return -1; + + dev_priv->topaz_private = topaz_priv; + memset(topaz_priv, 0, sizeof(struct topaz_private)); + + /* get device --> drm_device --> drm_psb_private --> topaz_priv + * for psb_topaz_pmstate_show: topaz_pmpolicy + * if not pci_set_drvdata, can't get drm_device from device + */ + pci_set_drvdata(dev->pdev, dev); + if (device_create_file(&dev->pdev->dev, + &dev_attr_topaz_pmstate)) + DRM_ERROR("TOPAZ: could not create sysfs file\n"); + topaz_priv->sysfs_pmstate = sysfs_get_dirent( + dev->pdev->dev.kobj.sd, "topaz_pmstate"); + + topaz_priv = dev_priv->topaz_private; + + /* # initialize comand topaz queueing [msvdx_queue] */ + INIT_LIST_HEAD(&topaz_priv->topaz_queue); + /* # init mutex? CHECK: mutex usage [msvdx_mutex] */ + mutex_init(&topaz_priv->topaz_mutex); + /* # spin lock init? CHECK spin lock usage [msvdx_lock] */ + spin_lock_init(&topaz_priv->topaz_lock); + + /* # topaz status init. [msvdx_busy] */ + topaz_priv->topaz_busy = 0; + topaz_priv->topaz_cmd_seq = 0; + topaz_priv->topaz_fw_loaded = 0; + /* FIXME: workaround since JPEG firmware is not ready */ + topaz_priv->topaz_cur_codec = 1; + topaz_priv->cur_mtx_data_size = 0; + + topaz_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE, + GFP_KERNEL); + if (topaz_priv->topaz_mtx_reg_state == NULL) { + DRM_ERROR("TOPAZ: failed to allocate space " + "for mtx register\n"); + return -1; + } + + /* # gain write back structure,we may only need 32+4=40DW */ + ret = ttm_buffer_object_create(bdev, 4096, + ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, + 0, 0, 0, NULL, &(topaz_priv->topaz_bo)); + if (ret != 0) { + DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n"); + return ret; + } + + ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0, + topaz_priv->topaz_bo->num_pages, + &topaz_priv->topaz_bo_kmap); + if (ret) { + DRM_ERROR("TOPAZ: map topaz BO bo failed......\n"); + ttm_bo_unref(&topaz_priv->topaz_bo); + return ret; + } + + topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap, + &is_iomem); + topaz_priv->topaz_ccb_wb = (void *) topaz_bo_virt; + topaz_priv->topaz_wb_offset = topaz_priv->topaz_bo->offset; + topaz_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt + + 2048); + topaz_priv->topaz_sync_offset = topaz_priv->topaz_wb_offset + + 2048; + PSB_DEBUG_GENERAL("TOPAZ: alloc BO for WriteBack and SYNC\n"); + PSB_DEBUG_GENERAL("TOPAZ: WB offset=0x%08x\n", + topaz_priv->topaz_wb_offset); + PSB_DEBUG_GENERAL("TOPAZ: SYNC offset=0x%08x\n", + topaz_priv->topaz_sync_offset); + + *(topaz_priv->topaz_sync_addr) = ~0; /* reset sync seq */ + + /* # reset topaz */ + MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, + F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); + + MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, + F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); + + /* # set up MMU */ + topaz_mmu_hwsetup(dev_priv); + + PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place" + "when receiving user space commands\n"); + +#if 0 /* can't load FW here */ + /* #.# load fw to driver */ + PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n"); + ret = topaz_init_fw(dev); + if (ret != 0) + return -1; + + topaz_setup_fw(dev, IMG_CODEC_MPEG4_NO_RC);/* just for test */ +#endif + /* # minimal clock */ + + /* # return 0 */ + TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id); + TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev); + + PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n", + core_id, core_rev); + + /* create firmware storage */ + for (n = 1; n < IMG_CODEC_NUM; ++n) { + /* #.# malloc DRM object for fw storage */ + ret = ttm_buffer_object_create(bdev, 12 * 4096, + ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, + 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].text); + if (ret) { + DRM_ERROR("Failed to allocate firmware.\n"); + goto out; + } + + /* #.# malloc DRM object for fw storage */ + ret = ttm_buffer_object_create(bdev, 12 * 4096, + ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, + 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].data); + if (ret) { + DRM_ERROR("Failed to allocate firmware.\n"); + goto out; + } + } + + ret = ttm_buffer_object_create(bdev, + 12 * 4096, + ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_MMU | + TTM_PL_FLAG_NO_EVICT, + 0, 0, 0, NULL, + &topaz_priv->topaz_mtx_data_mem); + if (ret) { + DRM_ERROR("TOPAZ: failed to allocate ttm buffer for " + "mtx data save\n"); + goto out; + } + topaz_priv->cur_mtx_data_size = 0; + + PSB_DEBUG_INIT("TOPAZ:old clock gating disable = 0x%08x\n", + PSB_RVDC32(PSB_TOPAZ_CLOCKGATING)); + PSB_DEBUG_INIT("TOPAZ:rest MSDVX to disable clock gating\n"); + + PSB_WVDC32(0x00011fff, PSB_TOPAZ_CLOCKGATING); + + PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n", + PSB_RVDC32(PSB_TOPAZ_CLOCKGATING)); + + return 0; + +out: + for (n = 1; n < IMG_CODEC_NUM; ++n) { + if (topaz_priv->topaz_fw[n].text != NULL) + ttm_bo_unref(&topaz_priv->topaz_fw[n].text); + if (topaz_priv->topaz_fw[n].data != NULL) + ttm_bo_unref(&topaz_priv->topaz_fw[n].data); + } + + if (topaz_priv->topaz_mtx_data_mem != NULL) + ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem); + + return ret; +} + +int lnc_topaz_uninit(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + int n; + + /* flush MMU */ + PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n"); + /* topaz_mmu_flushcache (dev_priv); */ + + /* # reset TOPAZ chip */ + lnc_topaz_reset(dev_priv); + + /* release resources */ + /* # release write back memory */ + topaz_priv->topaz_ccb_wb = NULL; + + /* release mtx register save space */ + kfree(topaz_priv->topaz_mtx_reg_state); + + /* release mtx data memory save space */ + if (topaz_priv->topaz_mtx_data_mem) + ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem); + + /* # release firmware storage */ + for (n = 1; n < IMG_CODEC_NUM; ++n) { + if (topaz_priv->topaz_fw[n].text != NULL) + ttm_bo_unref(&topaz_priv->topaz_fw[n].text); + if (topaz_priv->topaz_fw[n].data != NULL) + ttm_bo_unref(&topaz_priv->topaz_fw[n].data); + } + + ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap); + ttm_bo_unref(&topaz_priv->topaz_bo); + + if (topaz_priv) { + pci_set_drvdata(dev->pdev, NULL); + device_remove_file(&dev->pdev->dev, &dev_attr_topaz_pmstate); + sysfs_put(topaz_priv->sysfs_pmstate); + topaz_priv->sysfs_pmstate = NULL; + + kfree(topaz_priv); + dev_priv->topaz_private = NULL; + } + + return 0; +} + +int lnc_topaz_reset(struct drm_psb_private *dev_priv) +{ + struct topaz_private *topaz_priv; + + topaz_priv = dev_priv->topaz_private; + topaz_priv->topaz_busy = 0; + topaz_priv->topaz_cmd_seq = 0; + topaz_priv->cur_mtx_data_size = 0; + topaz_priv->topaz_cmd_windex = 0; + topaz_priv->topaz_needs_reset = 0; + + /* # reset topaz */ + MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, + F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); + + MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, + F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); + + /* # set up MMU */ + topaz_mmu_hwsetup(dev_priv); + + return 0; +} + +/* read firmware bin file and load all data into driver */ +int topaz_init_fw(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + const struct firmware *raw = NULL; + unsigned char *ptr; + int ret = 0; + int n; + struct topaz_fwinfo *cur_fw; + int cur_size; + struct topaz_codec_fw *cur_codec; + struct ttm_buffer_object **cur_drm_obj; + struct ttm_bo_kmap_obj tmp_kmap; + bool is_iomem; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + topaz_priv->stored_initial_qp = 0; + + /* # get firmware */ + ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev); + if (ret != 0) { + DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret); + return ret; + } + + PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n"); + + if (raw && (raw->size < sizeof(struct topaz_fwinfo))) { + DRM_ERROR("TOPAZ: firmware file is not correct size.\n"); + goto out; + } + + ptr = (unsigned char *) raw->data; + + if (!ptr) { + DRM_ERROR("TOPAZ: failed to load firmware.\n"); + goto out; + } + + /* # load fw from file */ + PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n"); + cur_fw = NULL; + /* didn't use the first element */ + for (n = 1; n < IMG_CODEC_NUM; ++n) { + cur_fw = (struct topaz_fwinfo *) ptr; + + cur_codec = &topaz_priv->topaz_fw[cur_fw->codec]; + cur_codec->ver = cur_fw->ver; + cur_codec->codec = cur_fw->codec; + cur_codec->text_size = cur_fw->text_size; + cur_codec->data_size = cur_fw->data_size; + cur_codec->data_location = cur_fw->data_location; + + PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n", + codec_to_string(cur_fw->codec)); + + /* #.# handle text section */ + ptr += sizeof(struct topaz_fwinfo); + cur_drm_obj = &cur_codec->text; + cur_size = cur_fw->text_size; + + /* #.# fill DRM object with firmware data */ + ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages, + &tmp_kmap); + if (ret) { + PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret); + ttm_bo_unref(cur_drm_obj); + *cur_drm_obj = NULL; + goto out; + } + + memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr, + cur_size); + + ttm_bo_kunmap(&tmp_kmap); + + /* #.# handle data section */ + ptr += cur_fw->text_size; + cur_drm_obj = &cur_codec->data; + cur_size = cur_fw->data_size; + + /* #.# fill DRM object with firmware data */ + ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages, + &tmp_kmap); + if (ret) { + PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret); + ttm_bo_unref(cur_drm_obj); + *cur_drm_obj = NULL; + goto out; + } + + memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr, + cur_size); + + ttm_bo_kunmap(&tmp_kmap); + + /* #.# validate firmware */ + + /* #.# update ptr */ + ptr += cur_fw->data_size; + } + + release_firmware(raw); + + PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n"); + + return 0; + +out: + if (raw) { + PSB_DEBUG_GENERAL("release firmware....\n"); + release_firmware(raw); + } + + return -1; +} + +/* setup fw when start a new context */ +int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + uint32_t mem_size = RAM_SIZE; /* follow DDK */ + uint32_t verify_pc; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + +#if 0 + if (codec == topaz_priv->topaz_current_codec) { + LNC_TRACEL("TOPAZ: reuse previous codec\n"); + return 0; + } +#endif + + /* XXX: need to rest topaz? */ + PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n"); + + /* XXX: interrupt enable shouldn't be enable here, + * this funtion is called when interrupt is enable, + * but here, we've no choice since we have to call setup_fw by + * manual */ + /* # upload firmware, clear interruputs and start the firmware + * -- from hostutils.c in TestSuits*/ + + /* # reset MVEA */ + MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, + F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | + F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); + + MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, + F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | + F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); + + + topaz_mmu_hwsetup(dev_priv); + +#if !LNC_TOPAZ_NO_IRQ + psb_irq_uninstall_islands(dev, PSB_VIDEO_ENC_ISLAND); +#endif + + PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n"); + + topaz_set_default_regs(dev_priv); + + /* # reset mtx */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) | + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) | + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET)); + + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0); + + /* # upload fw by drm */ + PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n"); + + topaz_upload_fw(dev, codec); +#if 0 + /* allocate the space for context save & restore if needed */ + if (topaz_priv->topaz_mtx_data_mem == NULL) { + ret = ttm_buffer_object_create(bdev, + topaz_priv->cur_mtx_data_size * 4, + ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_MMU | + TTM_PL_FLAG_NO_EVICT, + 0, 0, 0, NULL, + &topaz_priv->topaz_mtx_data_mem); + if (ret) { + DRM_ERROR("TOPAZ: failed to allocate ttm buffer for " + "mtx data save\n"); + return -1; + } + } + PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n"); +#endif + + /* XXX: In power save mode, need to save the complete data memory + * and restore it. MTX_FWIF.c record the data size */ + PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n"); + + PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n"); + topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS); + + PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n"); + + topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc); + + /* enable auto clock is essential for this driver */ + TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE, + F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) | + F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE)); + MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING, + F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) | + F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) | + F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) | + F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE)); + + PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n", + verify_pc, PC_START_ADDRESS); + + /* # turn on MTX */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX)); + + MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET, + MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK); + + /* # poll on the interrupt which the firmware will generate */ + topaz_wait_for_register(dev_priv, + TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT, + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX), + F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX)); + + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, + F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX)); + + PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n"); + + /* # get ccb buffer addr -- file hostutils.c */ + topaz_priv->topaz_ccb_buffer_addr = + topaz_read_mtx_mem(dev_priv, + MTX_DATA_MEM_BASE + mem_size - 4); + topaz_priv->topaz_ccb_ctrl_addr = + topaz_read_mtx_mem(dev_priv, + MTX_DATA_MEM_BASE + mem_size - 8); + topaz_priv->topaz_ccb_size = + topaz_read_mtx_mem(dev_priv, + topaz_priv->topaz_ccb_ctrl_addr + + MTX_CCBCTRL_CCBSIZE); + + topaz_priv->topaz_cmd_windex = 0; + + PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n", + topaz_priv->topaz_ccb_buffer_addr, + topaz_priv->topaz_ccb_ctrl_addr, + topaz_priv->topaz_ccb_size); + + /* # write back the initial QP Value */ + topaz_write_mtx_mem(dev_priv, + topaz_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP, + topaz_priv->stored_initial_qp); + + PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n", + topaz_priv->topaz_wb_offset); + topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12, + topaz_priv->topaz_wb_offset); + + /* this kick is essential for mtx.... */ + *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x01020304; + topaz_mtx_kick(dev_priv, 1); + DRM_UDELAY(1000); + PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory," + " and here it is 0x%08x\n", + *((uint32_t *) topaz_priv->topaz_ccb_wb)); + + *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */ + PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n"); + + /* XXX: is there any need to record next cmd num?? + * we use fence seqence number to record it + */ + topaz_priv->topaz_busy = 0; + topaz_priv->topaz_cmd_seq = 0; + +#if !LNC_TOPAZ_NO_IRQ + psb_irq_preinstall_islands(dev, PSB_VIDEO_ENC_ISLAND); + psb_irq_postinstall_islands(dev, PSB_VIDEO_ENC_ISLAND); + lnc_topaz_enableirq(dev); +#endif + +#if 0 + topaz_mmu_flushcache(dev_priv); + topaz_test_null(dev, 0xe1e1); + topaz_test_null(dev, 0xe2e2); + topaz_test_sync(dev, 0xe2e2, 0x87654321); + + topaz_mmu_test(dev, 0x12345678); + topaz_test_null(dev, 0xe3e3); + topaz_mmu_test(dev, 0x8764321); + + topaz_test_null(dev, 0xe4e4); + topaz_test_null(dev, 0xf3f3); +#endif + + return 0; +} + +#if UPLOAD_FW_BY_DMA +int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + const struct topaz_codec_fw *cur_codec_fw; + uint32_t text_size, data_size; + uint32_t data_location; + uint32_t cur_mtx_data_size; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + /* # refer HLD document */ + + /* # MTX reset */ + PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n"); + MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET, + MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK); + + DRM_UDELAY(6000); + + /* # upload the firmware by DMA */ + cur_codec_fw = &topaz_priv->topaz_fw[codec]; + + PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d" + " data location(%d)\n", codec_to_string(codec), codec, + cur_codec_fw->text_size, cur_codec_fw->data_size, + cur_codec_fw->data_location); + + /* # upload text */ + text_size = cur_codec_fw->text_size / 4; + + /* setup the MTX to start recieving data: + use a register for the transfer which will point to the source + (MTX_CR_MTX_SYSC_CDMAT) */ + /* #.# fill the dst addr */ + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000); + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, + F_ENCODE(2, MTX_BURSTSIZE) | + F_ENCODE(0, MTX_RNW) | + F_ENCODE(1, MTX_ENABLE) | + F_ENCODE(text_size, MTX_LENGTH)); + + /* #.# set DMAC access to host memory via BIF */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); + + /* #.# transfer the codec */ + topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0, + MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0); + + /* #.# wait dma finish */ + topaz_wait_for_register(dev_priv, + DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0), + F_ENCODE(1, IMG_SOC_TRANSFER_FIN), + F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); + + /* #.# clear interrupt */ + DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); + + /* # return access to topaz core */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0); + + /* # upload data */ + data_size = cur_codec_fw->data_size / 4; + data_location = cur_codec_fw->data_location; + + /* #.# fill the dst addr */ + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, + 0x80900000 + (data_location - 0x82880000)); + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, + F_ENCODE(2, MTX_BURSTSIZE) | + F_ENCODE(0, MTX_RNW) | + F_ENCODE(1, MTX_ENABLE) | + F_ENCODE(data_size, MTX_LENGTH)); + + /* #.# set DMAC access to host memory via BIF */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); + + /* #.# transfer the codec */ + topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0, + MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0); + + /* #.# wait dma finish */ + topaz_wait_for_register(dev_priv, + DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0), + F_ENCODE(1, IMG_SOC_TRANSFER_FIN), + F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); + + /* #.# clear interrupt */ + DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); + + /* # return access to topaz core */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0); + + /* record this codec's mtx data size for + * context save & restore */ + /* FIXME: since non-root sighting fixed by pre allocated, + * only need to correct the buffer size + */ + cur_mtx_data_size = data_size; + if (topaz_priv->cur_mtx_data_size != cur_mtx_data_size) + topaz_priv->cur_mtx_data_size = cur_mtx_data_size; + + return 0; +} + +#else + +void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem, + uint32_t addr, uint32_t size, + struct ttm_buffer_object *buf) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + uint32_t *buf_p; + uint32_t debug_reg, bank_size, bank_ram_size, bank_count; + uint32_t cur_ram_id, ram_addr , ram_id; + int map_ret, lp; + struct ttm_bo_kmap_obj bo_kmap; + bool is_iomem; + uint32_t cur_addr; + + get_mtx_control_from_dash(dev_priv); + + map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap); + if (map_ret) { + DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret); + return; + } + buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem); + + + TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg); + debug_reg = 0x0a0a0606; + bank_size = (debug_reg & 0xf0000) >> 16; + bank_ram_size = 1 << (bank_size + 2); + + bank_count = (debug_reg & 0xf00) >> 8; + + topaz_wait_for_register(dev_priv, + MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET, + MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK, + MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK); + + cur_ram_id = -1; + cur_addr = addr; + for (lp = 0; lp < size / 4; ++lp) { + ram_id = mtx_mem + (cur_addr / bank_ram_size); + + if (cur_ram_id != ram_id) { + ram_addr = cur_addr >> 2; + + MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET, + F_ENCODE(ram_id, MTX_MTX_MCMID) | + F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) | + F_ENCODE(1, MTX_MTX_MCMAI)); + + cur_ram_id = ram_id; + } + cur_addr += 4; + + MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET, + *(buf_p + lp)); + + topaz_wait_for_register(dev_priv, + MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START, + MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK, + MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK); + } + + ttm_bo_kunmap(&bo_kmap); + + PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n"); + return; +} + +int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + const struct topaz_codec_fw *cur_codec_fw; + uint32_t text_size, data_size; + uint32_t data_location; + + /* # refer HLD document */ + /* # MTX reset */ + PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n"); + MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET, + MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK); + + DRM_UDELAY(6000); + + /* # upload the firmware by DMA */ + cur_codec_fw = &topaz_priv->topaz_fw[codec]; + + PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)" + " data location(0x%08x)\n", codec_to_string(codec), + cur_codec_fw->text_size, cur_codec_fw->data_size, + cur_codec_fw->data_location); + + /* # upload text */ + text_size = cur_codec_fw->text_size; + + topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM, + PC_START_ADDRESS - MTX_MEMORY_BASE, + text_size, cur_codec_fw->text); + + /* # upload data */ + data_size = cur_codec_fw->data_size; + data_location = cur_codec_fw->data_location; + + topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM, + data_location - 0x82880000, data_size, + cur_codec_fw->data); + + return 0; +} + +#endif /* UPLOAD_FW_BY_DMA */ + +void +topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel, + uint32_t src_phy_addr, uint32_t offset, + uint32_t soc_addr, uint32_t byte_num, + uint32_t is_increment, uint32_t is_write) +{ + uint32_t dmac_count; + uint32_t irq_stat; + uint32_t count; + + PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n"); + /* # check that no transfer is currently in progress and no + interrupts are outstanding ?? (why care interrupt) */ + DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count); + if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) + DRM_ERROR("TOPAZ: there is tranfer in progress\n"); + + /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/ + + /* no hold off period */ + DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0); + /* clear previous interrupts */ + DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0); + /* check irq status */ + DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat); + /* assert(0 == irq_stat); */ + if (0 != irq_stat) + DRM_ERROR("TOPAZ: there is hold up\n"); + + DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), + (src_phy_addr + offset)); + count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT, + is_write, DMAC_PWIDTH_32_BIT, byte_num); + /* generate an interrupt at the end of transfer */ + count |= MASK_IMG_SOC_TRANSFER_IEN; + count |= F_ENCODE(is_write, IMG_SOC_DIR); + DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count); + + DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel), + DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, + is_increment, DMAC_BURST_2)); + + DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr); + + /* Finally, rewrite the count register with + * the enable bit set to kick off the transfer + */ + DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN); + + PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n"); + + return; +} + +void topaz_set_default_regs(struct drm_psb_private *dev_priv) +{ + int n; + int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3); + + for (n = 0; n < count; n++) + MM_WRITE32(topaz_default_regs[n][0], + topaz_default_regs[n][1], + topaz_default_regs[n][2]); + +} + +void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg, + const uint32_t val) +{ + uint32_t tmp; + get_mtx_control_from_dash(dev_priv); + + /* put data into MTX_RW_DATA */ + MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val); + + /* request a write */ + tmp = reg & + ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK; + MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp); + + /* wait for operation finished */ + topaz_wait_for_register(dev_priv, + MTX_START + + MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, + MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, + MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK); + + release_mtx_control_from_dash(dev_priv); +} + +void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg, + uint32_t *ret_val) +{ + uint32_t tmp; + + get_mtx_control_from_dash(dev_priv); + + /* request a write */ + tmp = (reg & + ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK); + MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, + MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp); + + /* wait for operation finished */ + topaz_wait_for_register(dev_priv, + MTX_START + + MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, + MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, + MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK); + + /* read */ + MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, + ret_val); + + release_mtx_control_from_dash(dev_priv); +} + +void get_mtx_control_from_dash(struct drm_psb_private *dev_priv) +{ + int debug_reg_slave_val; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + /* GetMTXControlFromDash */ + TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, + F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) | + F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT)); + do { + TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, + &debug_reg_slave_val); + } while ((debug_reg_slave_val & 0x18) != 0); + + /* save access control */ + TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET, + &topaz_priv->topaz_dash_access_ctrl); +} + +void release_mtx_control_from_dash(struct drm_psb_private *dev_priv) +{ + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + /* restore access control */ + TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET, + topaz_priv->topaz_dash_access_ctrl); + + /* release bus */ + TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, + F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE)); +} + +void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv) +{ + uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu); + + /* bypass all request while MMU is being configured */ + TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, + F_ENCODE(1, TOPAZ_CR_MMU_BYPASS)); + + /* set MMU hardware at the page table directory */ + PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x " + "into MMU_DIR_LIST0/1\n", pd_addr); + TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr); + TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0); + + /* setup index register, all pointing to directory bank 0 */ + TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0); + + /* now enable MMU access for all requestors */ + TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0); +} + +void topaz_mmu_flushcache(struct drm_psb_private *dev_priv) +{ + uint32_t mmu_control; + + if (!powermgmt_is_hw_on(dev_priv->dev->pdev, PSB_VIDEO_ENC_ISLAND)) + return; + +#if 0 + PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache" + " so flush using the master core\n"); +#endif + /* XXX: disable interrupt */ + + TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control); + mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC); + mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH); + +#if 0 + PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n" + "still operating afterwards even if not cleared,\n" + "but may want to replace with MMU_FLUSH?\n"); +#endif + TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control); + + /* clear it */ + mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC)); + mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH)); + TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control); +} + +#if DEBUG_FUNCTION + +static int topaz_test_sync(struct drm_device *dev, uint32_t seq, + uint32_t sync_seq) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + uint32_t sync_cmd[3]; + struct topaz_cmd_header *cmd_hdr; + uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr; + int count = 1000; + uint32_t clr_flag; + + cmd_hdr = (struct topaz_cmd_header *)&sync_cmd[0]; + + /* reset sync area */ + *sync_p = 0; + + /* insert a SYNC command here */ + cmd_hdr->id = MTX_CMDID_SYNC; + cmd_hdr->size = 3; + cmd_hdr->seq = seq; + + sync_cmd[1] = topaz_priv->topaz_sync_offset; + sync_cmd[2] = sync_seq; + + TOPAZ_BEGIN_CCB(dev_priv); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]); + TOPAZ_END_CCB(dev_priv, 1); + + PSB_DEBUG_GENERAL("Topaz: Sent SYNC with cmd seq=0x%08x," + "sync_seq=0x%08x\n", seq, sync_seq); + + while (count && *sync_p != sync_seq) { + DRM_UDELAY(100); + --count; + } + if ((count == 0) && (*sync_p != sync_seq)) { + DRM_ERROR("TOPAZ: wait sycn timeout, expect sync seq 0x%08x," + "actual 0x%08x\n", sync_seq, *sync_p); + } + PSB_DEBUG_GENERAL("TOPAZ: SYNC succeed, sync seq=0x%08x\n", *sync_p); + PSB_DEBUG_GENERAL("Topaz: after SYNC test, query IRQ and clear it\n"); + + clr_flag = lnc_topaz_queryirq(dev); + lnc_topaz_clearirq(dev, clr_flag); + + return 0; +} +static int topaz_test_sync_tt_test(struct drm_device *dev, + uint32_t seq, + uint32_t sync_seq) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->bdev; + int ret; + bool is_iomem; + struct ttm_buffer_object *test_obj; + struct ttm_bo_kmap_obj test_kmap; + unsigned int *test_adr; + uint32_t sync_cmd[3]; + int count = 1000; + unsigned long pfn; + + ret = ttm_buffer_object_create(bdev, 4096, + ttm_bo_type_kernel, + TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT, + 0, 0, 0, NULL, &test_obj); + if (ret) { + DRM_ERROR("failed create test object buffer\n"); + return -1; + } + + ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu), + test_obj->offset, &pfn); + if (ret) { + DRM_ERROR("failed to get pfn from virtual\n"); + return -1; + } + + PSB_DEBUG_GENERAL("Topaz:offset %lx, pfn %lx\n", test_obj->offset, pfn); + + ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages, + &test_kmap); + if (ret) { + DRM_ERROR("failed map buffer\n"); + return -1; + } + test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem); + *test_adr = 0xff55; + ttm_bo_kunmap(&test_kmap); + + /* insert a SYNC command here */ + sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) | + (seq << 16); + sync_cmd[1] = test_obj->offset; + sync_cmd[2] = sync_seq; + + TOPAZ_BEGIN_CCB(dev_priv); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]); + TOPAZ_END_CCB(dev_priv, 1); + + ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages, + &test_kmap); + if (ret) { + DRM_ERROR("failed map buffer\n"); + return -1; + } + test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem); + + while (count && *test_adr != sync_seq) { + DRM_UDELAY(100); + --count; + } + if ((count == 0) && (*test_adr != sync_seq)) { + DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x)," + "actual 0x%08x\n", + sync_seq, *test_adr); + } + PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *test_adr); + ttm_bo_kunmap(&test_kmap); + ttm_bo_unref(&test_obj); + + return 0; +} + +static int topaz_test_sync_manual_alloc_page(struct drm_device *dev, + uint32_t seq, + uint32_t sync_seq, + uint32_t offset) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + int ret; + uint32_t sync_cmd[3]; + int count = 1000; + unsigned long pfn; + + struct page *p; + uint32_t *v; +/* uint32_t offset = 0xd0000000; */ + + p = alloc_page(GFP_DMA32); + if (!p) { + DRM_ERROR("Topaz:Failed allocating page\n"); + return -1; + } + + v = kmap(p); + memset(v, 0x67, PAGE_SIZE); + pfn = (offset >> PAGE_SHIFT); + kunmap(p); + + ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), + &p, pfn << PAGE_SHIFT, 1, 0, 0, 0); + if (ret) { + DRM_ERROR("Topaz:Failed inserting mmu page\n"); + return -1; + } + + /* insert a SYNC command here */ + sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) | + (0x5b << 16); + sync_cmd[1] = pfn << PAGE_SHIFT; + sync_cmd[2] = seq; + + TOPAZ_BEGIN_CCB(dev_priv); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]); + TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]); + TOPAZ_END_CCB(dev_priv, 1); + + v = kmap(p); + while (count && *v != sync_seq) { + DRM_UDELAY(100); + --count; + } + if ((count == 0) && (*v != sync_seq)) { + DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x)," + "actual 0x%08x\n", + sync_seq, *v); + } + PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *v); + kunmap(p); + + return 0; +} + +static int topaz_test_null(struct drm_device *dev, uint32_t seq) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct topaz_cmd_header null_cmd; + uint32_t clr_flag; + + /* XXX: here we finished firmware setup.... + * using a NULL command to verify the + * correctness of firmware + */ + + null_cmd.id = MTX_CMDID_NULL; + null_cmd.size = 1; + null_cmd.seq = seq; + + TOPAZ_BEGIN_CCB(dev_priv); + TOPAZ_OUT_CCB(dev_priv, *((uint32_t *)&null_cmd)); + TOPAZ_END_CCB(dev_priv, 1); + + DRM_UDELAY(1000); /* wait to finish */ + + PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x," + " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n", + seq, CCB_CTRL_SEQ(dev_priv), WB_CCB_CTRL_SEQ(dev_priv), + WB_CCB_CTRL_RINDEX(dev_priv)); + + PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n"); + + clr_flag = lnc_topaz_queryirq(dev); + lnc_topaz_clearirq(dev, clr_flag); + + return 0; +} + + +/* + * this function will test whether the mmu is correct: + * it get a drm_buffer_object and use CMD_SYNC to write + * certain value into this buffer. + */ +static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + unsigned long real_pfn; + int ret; + + /* topaz_mmu_flush(dev); */ + topaz_test_sync(dev, 0x55, sync_value); + + ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu), + topaz_priv->topaz_sync_offset, &real_pfn); + if (ret != 0) { + PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n"); + return; + } + PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, " + "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n", + topaz_priv->topaz_sync_offset, real_pfn, sync_value); +} + +void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data) +{ + int n; + int count; + + count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3); + for (n = 0; n < count; n++, ++data) + MM_READ32(topaz_default_regs[n][0], + topaz_default_regs[n][1], + data); + +} + +void topaz_restore_default_regs(struct drm_psb_private *dev_priv, + uint32_t *data) +{ + int n; + int count; + + count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3); + for (n = 0; n < count; n++, ++data) + MM_WRITE32(topaz_default_regs[n][0], + topaz_default_regs[n][1], + *data); + +} + +#endif + +int lnc_topaz_restore_mtx_state(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + uint32_t reg_val; + uint32_t *mtx_reg_state; + int i; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + if (!topaz_priv->topaz_mtx_saved) + return -1; + + if (topaz_priv->topaz_mtx_data_mem == NULL) { + PSB_DEBUG_GENERAL("TOPAZ: try to restore context without " + "space allocated, return directly without restore\n"); + return -1; + } + + /* turn on mtx clocks */ + MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, ®_val); + MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, + reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE)); + + /* reset mtx */ + /* FIXME: should use core_write??? */ + MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET, + MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK); + DRM_UDELAY(6000); + + topaz_mmu_hwsetup(dev_priv); + /* upload code, restore mtx data */ + mtx_dma_write(dev); + + mtx_reg_state = topaz_priv->topaz_mtx_reg_state; + /* restore register */ + /* FIXME: conside to put read/write into one function */ + /* Saves 8 Registers of D0 Bank */ + /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */ + for (i = 0; i < 8; i++) { + topaz_write_core_reg(dev_priv, 0x1 | (i<<4), + *mtx_reg_state); + mtx_reg_state++; + } + /* Saves 8 Registers of D1 Bank */ + /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */ + for (i = 0; i < 8; i++) { + topaz_write_core_reg(dev_priv, 0x2 | (i<<4), + *mtx_reg_state); + mtx_reg_state++; + } + /* Saves 4 Registers of A0 Bank */ + /* A0StP, A0FrP, A0.2 and A0.3 */ + for (i = 0; i < 4; i++) { + topaz_write_core_reg(dev_priv, 0x3 | (i<<4), + *mtx_reg_state); + mtx_reg_state++; + } + /* Saves 4 Registers of A1 Bank */ + /* A1GbP, A1LbP, A1.2 and A1.3 */ + for (i = 0; i < 4; i++) { + topaz_write_core_reg(dev_priv, 0x4 | (i<<4), + *mtx_reg_state); + mtx_reg_state++; + } + /* Saves PC and PCX */ + for (i = 0; i < 2; i++) { + topaz_write_core_reg(dev_priv, 0x5 | (i<<4), + *mtx_reg_state); + mtx_reg_state++; + } + /* Saves 8 Control Registers */ + /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI, + * TXGPIOO */ + for (i = 0; i < 8; i++) { + topaz_write_core_reg(dev_priv, 0x7 | (i<<4), + *mtx_reg_state); + mtx_reg_state++; + } + + /* turn on MTX */ + MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET, + MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK); + + topaz_priv->topaz_mtx_saved = 0; + + return 0; +} + +int lnc_topaz_save_mtx_state(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + uint32_t *mtx_reg_state; + int i; + struct topaz_codec_fw *cur_codec_fw; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + /* FIXME: make sure the topaz_mtx_data_mem is allocated */ + if (topaz_priv->topaz_mtx_data_mem == NULL) { + PSB_DEBUG_GENERAL("TOPAZ: try to save context without space " + "allocated, return directly without save\n"); + return -1; + } + if (topaz_priv->topaz_fw_loaded == 0) { + PSB_DEBUG_GENERAL("TOPAZ: try to save context without firmware " + "uploaded\n"); + return -1; + } + + topaz_wait_for_register(dev_priv, + MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET, + TXRPT_WAITONKICK_VALUE, + 0xffffffff); + + /* stop mtx */ + MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET, + MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK); + + mtx_reg_state = topaz_priv->topaz_mtx_reg_state; + + /* FIXME: conside to put read/write into one function */ + /* Saves 8 Registers of D0 Bank */ + /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */ + for (i = 0; i < 8; i++) { + topaz_read_core_reg(dev_priv, 0x1 | (i<<4), + mtx_reg_state); + mtx_reg_state++; + } + /* Saves 8 Registers of D1 Bank */ + /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */ + for (i = 0; i < 8; i++) { + topaz_read_core_reg(dev_priv, 0x2 | (i<<4), + mtx_reg_state); + mtx_reg_state++; + } + /* Saves 4 Registers of A0 Bank */ + /* A0StP, A0FrP, A0.2 and A0.3 */ + for (i = 0; i < 4; i++) { + topaz_read_core_reg(dev_priv, 0x3 | (i<<4), + mtx_reg_state); + mtx_reg_state++; + } + /* Saves 4 Registers of A1 Bank */ + /* A1GbP, A1LbP, A1.2 and A1.3 */ + for (i = 0; i < 4; i++) { + topaz_read_core_reg(dev_priv, 0x4 | (i<<4), + mtx_reg_state); + mtx_reg_state++; + } + /* Saves PC and PCX */ + for (i = 0; i < 2; i++) { + topaz_read_core_reg(dev_priv, 0x5 | (i<<4), + mtx_reg_state); + mtx_reg_state++; + } + /* Saves 8 Control Registers */ + /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI, + * TXGPIOO */ + for (i = 0; i < 8; i++) { + topaz_read_core_reg(dev_priv, 0x7 | (i<<4), + mtx_reg_state); + mtx_reg_state++; + } + + /* save mtx data memory */ + cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec]; + + mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000, + topaz_priv->cur_mtx_data_size); + + /* turn off mtx clocks */ + MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, + MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE); + + topaz_priv->topaz_mtx_saved = 1; + + return 0; +} + +void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct ttm_buffer_object *target; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + /* setup mtx DMAC registers to do transfer */ + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr); + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, + F_ENCODE(2, MTX_BURSTSIZE) | + F_ENCODE(1, MTX_RNW) | + F_ENCODE(1, MTX_ENABLE) | + F_ENCODE(size, MTX_LENGTH)); + + /* give the DMAC access to the host memory via BIF */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); + + target = topaz_priv->topaz_mtx_data_mem; + /* transfert the data */ + /* FIXME: size is meaured by bytes? */ + topaz_dma_transfer(dev_priv, 0, target->offset, 0, + MTX_CR_MTX_SYSC_CDMAT, + size, 0, 1); + + /* wait for it transfer */ + topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START, + F_ENCODE(1, IMG_SOC_TRANSFER_FIN), + F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); + /* clear interrupt */ + DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); + /* give access back to topaz core */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0); +} + +void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr, + uint32_t soc_addr, uint32_t bytes_num, + int increment, int rnw) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + uint32_t count_reg; + uint32_t irq_state; + + /* check no transfer is in progress */ + DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg); + if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) { + DRM_ERROR("TOPAZ: there's transfer in progress when wanna " + "save mtx data\n"); + /* FIXME: how to handle this error */ + return; + } + + /* no hold off period */ + DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0); + /* cleare irq state */ + DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0); + DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state); + if (0 != irq_state) { + DRM_ERROR("TOPAZ: there's irq cann't clear\n"); + return; + } + + DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr); + count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, + DMAC_PWIDTH_32_BIT, rnw, + DMAC_PWIDTH_32_BIT, bytes_num); + /* generate an interrupt at end of transfer */ + count_reg |= MASK_IMG_SOC_TRANSFER_IEN; + count_reg |= F_ENCODE(rnw, IMG_SOC_DIR); + DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg); + + DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel), + DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment, + DMAC_BURST_2)); + DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr); + + /* Finally, rewrite the count register with the enable + * bit set to kick off the transfer */ + DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), + count_reg | MASK_IMG_SOC_EN); +} + +void mtx_dma_write(struct drm_device *dev) +{ + struct topaz_codec_fw *cur_codec_fw; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec]; + + /* upload code */ + /* setup mtx DMAC registers to recieve transfer */ + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000); + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, + F_ENCODE(2, MTX_BURSTSIZE) | + F_ENCODE(0, MTX_RNW) | + F_ENCODE(1, MTX_ENABLE) | + F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH)); + + /* give DMAC access to host memory */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); + + /* transfer code */ + topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0, + MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4, + 0, 0); + /* wait finished */ + topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START, + F_ENCODE(1, IMG_SOC_TRANSFER_FIN), + F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); + /* clear interrupt */ + DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); + + /* setup mtx start recieving data */ + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 + + (cur_codec_fw->data_location) - 0x82880000); + + MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, + F_ENCODE(2, MTX_BURSTSIZE) | + F_ENCODE(0, MTX_RNW) | + F_ENCODE(1, MTX_ENABLE) | + F_ENCODE(topaz_priv->cur_mtx_data_size, MTX_LENGTH)); + + /* give DMAC access to host memory */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); + + /* transfer data */ + topaz_dma_transfer(dev_priv, 0, topaz_priv->topaz_mtx_data_mem->offset, + 0, MTX_CR_MTX_SYSC_CDMAT, + topaz_priv->cur_mtx_data_size, + 0, 0); + /* wait finished */ + topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START, + F_ENCODE(1, IMG_SOC_TRANSFER_FIN), + F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); + /* clear interrupt */ + DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); + + /* give access back to Topaz Core */ + TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0); +} + diff --git a/drivers/gpu/drm/psb/psb_bl.c b/drivers/gpu/drm/psb/psb_bl.c new file mode 100644 index 0000000..2c723f4 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_bl.c @@ -0,0 +1,232 @@ +/* + * psb backlight using HAL + * + * Copyright (c) 2009 Eric Knopp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include "psb_drv.h" +#include "psb_intel_reg.h" +#include "psb_intel_drv.h" +#include "psb_intel_bios.h" +#include "psb_powermgmt.h" + +#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF +#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */ +#define BLC_PWM_FREQ_CALC_CONSTANT 32 +#define MHz 1000000 +#define BRIGHTNESS_MIN_LEVEL 1 +#define BRIGHTNESS_MAX_LEVEL 100 +#define BRIGHTNESS_MASK 0xFF +#define BLC_POLARITY_NORMAL 0 +#define BLC_POLARITY_INVERSE 1 +#define BLC_ADJUSTMENT_MAX 100 + +#define PSB_BLC_PWM_PRECISION_FACTOR 10 +#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE +#define PSB_BLC_MIN_PWM_REG_FREQ 0x2 + +#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) +#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16) + +static int psb_brightness; +static int blc_pol; +static struct backlight_device *psb_backlight_device; +static u8 blc_brightnesscmd; +static u8 blc_type; + +int psb_set_brightness(struct backlight_device *bd) +{ + u32 blc_pwm_ctl; + u32 max_pwm_blc; + + struct drm_device *dev = + (struct drm_device *)psb_backlight_device->priv; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + int level = bd->props.brightness; + + DRM_DEBUG("backlight level set to %d\n", level); + + /* Perform value bounds checking */ + if (level < BRIGHTNESS_MIN_LEVEL) + level = BRIGHTNESS_MIN_LEVEL; + + if(IS_POULSBO(dev)) { + psb_intel_lvds_set_brightness(dev, level); + psb_brightness = level; + return 0; + } + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + /* Calculate and set the brightness value */ + max_pwm_blc = REG_READ(BLC_PWM_CTL) >> + MRST_BACKLIGHT_MODULATION_FREQ_SHIFT; + blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL; + + /* Adjust the backlight level with the percent in + * dev_priv->blc_adj1; + */ + blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1; + blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX; + + if (blc_pol == BLC_POLARITY_INVERSE) + blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl; + + /* force PWM bit on */ + REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2))); + REG_WRITE(BLC_PWM_CTL, + (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) | + blc_pwm_ctl); + + /* printk("***backlight brightness = %i\n", level); */ + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + + /* cache the brightness for later use */ + psb_brightness = level; + return 0; +} + +int psb_get_brightness(struct backlight_device *bd) +{ + /* return locally cached var instead of HW read (due to DPST etc.) */ + return psb_brightness; +} + +struct backlight_ops psb_ops = { + .get_brightness = psb_get_brightness, + .update_status = psb_set_brightness, +}; + +int psb_backlight_init(struct drm_device *dev) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + unsigned long CoreClock; + // u32 bl_max_freq; + // unsigned long value; + u16 bl_max_freq; + uint32_t value; + uint32_t clock; + uint32_t blc_pwm_precision_factor; + + struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0); + + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + psb_backlight_device = backlight_device_register("psb-bl", + NULL, NULL, &psb_ops); + if (IS_ERR(psb_backlight_device)) + return PTR_ERR(psb_backlight_device); + + psb_backlight_device->priv = dev; + + if(IS_MRST(dev)) { + /* HACK HACK HACK */ + dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX; + + bl_max_freq = 256; /* this needs to come from VBT when available */ + blc_pol = BLC_POLARITY_NORMAL; /* this needs to be set elsewhere */ + blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR; + + if (dev_priv->sku_83) + CoreClock = 166; + else if (dev_priv->sku_100) + CoreClock = 200; + else if (dev_priv->sku_100L) + CoreClock = 100; + else + return 1; + } else { + /* get bl_max_freq and pol from dev_priv*/ + if(!dev_priv->lvds_bl){ + DRM_ERROR("Has no valid LVDS backlight info\n"); + return 1; + } + bl_max_freq = dev_priv->lvds_bl->freq; + blc_pol = dev_priv->lvds_bl->pol; + blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR; + blc_brightnesscmd = dev_priv->lvds_bl->brightnesscmd; + blc_type = dev_priv->lvds_bl->type; + + //pci_write_config_dword(pci_root, 0xD4, 0x00C32004); + //pci_write_config_dword(pci_root, 0xD0, 0xE0033000); + + pci_write_config_dword(pci_root, 0xD0, 0xD0050300); + pci_read_config_dword(pci_root, 0xD4, &clock); + + switch(clock & 0x07) { + case 0: + CoreClock = 100; + break; + case 1: + CoreClock = 133; + break; + case 2: + CoreClock = 150; + break; + case 3: + CoreClock = 178; + break; + case 4: + CoreClock = 200; + break; + case 5: + case 6: + case 7: + CoreClock = 266; + default: + return 1; + } + }/*end if(IS_MRST(dev))*/ + + value = (CoreClock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT; + value *= blc_pwm_precision_factor; + value /= bl_max_freq; + value /= blc_pwm_precision_factor; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + if(IS_MRST(dev)) { + if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ) + return 2; + else { + REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2))); + REG_WRITE(BLC_PWM_CTL, value | + (value << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT)); + } + } else { + if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ || + value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ) + return 2; + else { + value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR; + REG_WRITE(BLC_PWM_CTL, + (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | + (value)); + } + } /*end if(IS_MRST(dev))*/ + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + + psb_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL; + psb_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL; + backlight_update_status(psb_backlight_device); +#endif + return 0; +} + +void psb_backlight_exit(void) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + psb_backlight_device->props.brightness = 0; + backlight_update_status(psb_backlight_device); + backlight_device_unregister(psb_backlight_device); +#endif + return; +} diff --git a/drivers/gpu/drm/psb/psb_buffer.c b/drivers/gpu/drm/psb/psb_buffer.c new file mode 100644 index 0000000..cb25bde --- /dev/null +++ b/drivers/gpu/drm/psb/psb_buffer.c @@ -0,0 +1,519 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +#include "ttm/ttm_placement_common.h" +#include "ttm/ttm_execbuf_util.h" +#include "ttm/ttm_fence_api.h" +#include +#include "psb_drv.h" +#include "psb_schedule.h" + +#define DRM_MEM_TTM 26 + +struct drm_psb_ttm_backend { + struct ttm_backend base; + struct page **pages; + unsigned int desired_tile_stride; + unsigned int hw_tile_stride; + int mem_type; + unsigned long offset; + unsigned long num_pages; +}; + +/* + * Poulsbo GPU virtual space looks like this + * (We currently use only one MMU context). + * + * gatt_start = Start of GATT aperture in bus space. + * stolen_end = End of GATT populated by stolen memory in bus space. + * gatt_end = End of GATT + * twod_end = MIN(gatt_start + 256_MEM, gatt_end) + * + * 0x00000000 -> 0x10000000 Temporary mapping space for tiling- + * and copy operations. + * This space is not managed and is protected by the + * temp_mem mutex. + * + * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers. + * + * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use. + * + * gatt_start -> stolen_end TTM_PL_VRAM Pre-populated GATT pages. + * + * stolen_end -> twod_end TTM_PL_TT GATT memory usable by 2D engine. + * + * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not + * usable by 2D engine. + * + * gatt_end -> 0xffffffff Currently unused. + */ + +static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) +{ + + struct drm_psb_private *dev_priv = + container_of(bdev, struct drm_psb_private, bdev); + struct psb_gtt *pg = dev_priv->pg; + + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case DRM_PSB_MEM_KERNEL: + man->io_offset = 0x00000000; + man->io_size = 0x00000000; + man->io_addr = NULL; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_CMA; + man->gpu_offset = PSB_MEM_KERNEL_START; + man->available_caching = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + case DRM_PSB_MEM_MMU: + man->io_offset = 0x00000000; + man->io_size = 0x00000000; + man->io_addr = NULL; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_CMA; + man->gpu_offset = PSB_MEM_MMU_START; + man->available_caching = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + case DRM_PSB_MEM_PDS: + man->io_offset = 0x00000000; + man->io_size = 0x00000000; + man->io_addr = NULL; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_CMA; + man->gpu_offset = PSB_MEM_PDS_START; + man->available_caching = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + case DRM_PSB_MEM_RASTGEOM: + man->io_offset = 0x00000000; + man->io_size = 0x00000000; + man->io_addr = NULL; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_CMA; + man->gpu_offset = PSB_MEM_RASTGEOM_START; + man->available_caching = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + case TTM_PL_VRAM: + man->io_addr = NULL; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; +#ifdef PSB_WORKING_HOST_MMU_ACCESS + man->io_offset = pg->gatt_start; + man->io_size = pg->gatt_pages << PAGE_SHIFT; +#else + man->io_offset = pg->stolen_base; + man->io_size = pg->vram_stolen_size; +#endif + man->gpu_offset = pg->gatt_start; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + case TTM_PL_CI: + man->io_addr = NULL; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; + man->io_offset = dev_priv->ci_region_start; + man->io_size = pg->ci_stolen_size; + man->gpu_offset = pg->gatt_start - pg->ci_stolen_size; + man->available_caching = TTM_PL_FLAG_UNCACHED; + man->default_caching = TTM_PL_FLAG_UNCACHED; + break; + case TTM_PL_RAR: /* Unmappable RAR memory */ + man->io_offset = dev_priv->rar_region_start; + man->io_size = pg->rar_stolen_size; + man->io_addr = NULL; + man->flags = TTM_MEMTYPE_FLAG_FIXED; + man->available_caching = TTM_PL_FLAG_UNCACHED; + man->default_caching = TTM_PL_FLAG_UNCACHED; + man->gpu_offset = pg->gatt_start + pg->vram_stolen_size; + break; + case TTM_PL_TT: /* Mappable GATT memory */ + man->io_offset = pg->gatt_start; + man->io_size = pg->gatt_pages << PAGE_SHIFT; + man->io_addr = NULL; +#ifdef PSB_WORKING_HOST_MMU_ACCESS + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; +#else + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_CMA; +#endif + man->available_caching = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + man->gpu_offset = pg->gatt_start; + break; + case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */ + man->io_offset = pg->gatt_start; + man->io_size = pg->gatt_pages << PAGE_SHIFT; + man->io_addr = NULL; +#ifdef PSB_WORKING_HOST_MMU_ACCESS + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; +#else + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_CMA; +#endif + man->gpu_offset = pg->gatt_start; + man->available_caching = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned) type); + return -EINVAL; + } + return 0; +} + +static uint32_t psb_evict_mask(struct ttm_buffer_object *bo) +{ + uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM; + + + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + if (bo->mem.proposed_flags & TTM_PL_FLAG_TT) + return cur_placement | TTM_PL_FLAG_TT; + else + return cur_placement | TTM_PL_FLAG_SYSTEM; + default: + return cur_placement | TTM_PL_FLAG_SYSTEM; + } +} + +static int psb_invalidate_caches(struct ttm_bo_device *bdev, + uint32_t placement) +{ + return 0; +} + +static int psb_move_blit(struct ttm_buffer_object *bo, + bool evict, bool no_wait, + struct ttm_mem_reg *new_mem) +{ + struct drm_psb_private *dev_priv = + container_of(bo->bdev, struct drm_psb_private, bdev); + struct drm_device *dev = dev_priv->dev; + struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_fence_object *fence; + int dir = 0; + int ret; + + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { + dir = 1; + } + + psb_emit_2d_copy_blit(dev, + old_mem->mm_node->start << PAGE_SHIFT, + new_mem->mm_node->start << PAGE_SHIFT, + new_mem->num_pages, dir); + + ret = ttm_fence_object_create(&dev_priv->fdev, 0, + _PSB_FENCE_TYPE_EXE, + TTM_FENCE_FLAG_EMIT, + &fence); + if (unlikely(ret != 0)) { + psb_idle_2d(dev); + if (fence) + ttm_fence_object_unref(&fence); + } + + ret = ttm_bo_move_accel_cleanup(bo, (void *) fence, + (void *) (unsigned long) + _PSB_FENCE_TYPE_EXE, + evict, no_wait, new_mem); + if (fence) + ttm_fence_object_unref(&fence); + return ret; +} + +/* + * Flip destination ttm into GATT, + * then blit and subsequently move out again. + */ + +static int psb_move_flip(struct ttm_buffer_object *bo, + bool evict, bool interruptible, bool no_wait, + struct ttm_mem_reg *new_mem) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_reg tmp_mem; + int ret; + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; + tmp_mem.proposed_flags = TTM_PL_FLAG_TT; + + ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait); + if (ret) + return ret; + ret = ttm_tt_bind(bo->ttm, &tmp_mem); + if (ret) + goto out_cleanup; + ret = psb_move_blit(bo, true, no_wait, &tmp_mem); + if (ret) + goto out_cleanup; + + ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); +out_cleanup: + if (tmp_mem.mm_node) { + spin_lock(&bdev->lru_lock); + drm_mm_put_block(tmp_mem.mm_node); + tmp_mem.mm_node = NULL; + spin_unlock(&bdev->lru_lock); + } + return ret; +} + +static int psb_move(struct ttm_buffer_object *bo, + bool evict, bool interruptible, + bool no_wait, struct ttm_mem_reg *new_mem) +{ + struct ttm_mem_reg *old_mem = &bo->mem; + + if ((old_mem->mem_type == TTM_PL_RAR) || + (new_mem->mem_type == TTM_PL_RAR)) { + ttm_bo_free_old_node(bo); + *old_mem = *new_mem; + } else if (old_mem->mem_type == TTM_PL_SYSTEM) { + return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } else if (new_mem->mem_type == TTM_PL_SYSTEM) { + int ret = psb_move_flip(bo, evict, interruptible, + no_wait, new_mem); + if (unlikely(ret != 0)) { + if (ret == -ERESTART) + return ret; + else + return ttm_bo_move_memcpy(bo, evict, no_wait, + new_mem); + } + } else { + if (psb_move_blit(bo, evict, no_wait, new_mem)) + return ttm_bo_move_memcpy(bo, evict, no_wait, + new_mem); + } + return 0; +} + +static int drm_psb_tbe_populate(struct ttm_backend *backend, + unsigned long num_pages, + struct page **pages, + struct page *dummy_read_page) +{ + struct drm_psb_ttm_backend *psb_be = + container_of(backend, struct drm_psb_ttm_backend, base); + + psb_be->pages = pages; + return 0; +} + +static int drm_psb_tbe_unbind(struct ttm_backend *backend) +{ + struct ttm_bo_device *bdev = backend->bdev; + struct drm_psb_private *dev_priv = + container_of(bdev, struct drm_psb_private, bdev); + struct drm_psb_ttm_backend *psb_be = + container_of(backend, struct drm_psb_ttm_backend, base); + struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu); + struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type]; + + PSB_DEBUG_RENDER("MMU unbind.\n"); + + if (psb_be->mem_type == TTM_PL_TT) { + uint32_t gatt_p_offset = + (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT; + + (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset, + psb_be->num_pages, + psb_be->desired_tile_stride, + psb_be->hw_tile_stride); + } + + psb_mmu_remove_pages(pd, psb_be->offset, + psb_be->num_pages, + psb_be->desired_tile_stride, + psb_be->hw_tile_stride); + + return 0; +} + +static int drm_psb_tbe_bind(struct ttm_backend *backend, + struct ttm_mem_reg *bo_mem) +{ + struct ttm_bo_device *bdev = backend->bdev; + struct drm_psb_private *dev_priv = + container_of(bdev, struct drm_psb_private, bdev); + struct drm_psb_ttm_backend *psb_be = + container_of(backend, struct drm_psb_ttm_backend, base); + struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu); + struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type]; + int type; + int ret = 0; + + psb_be->mem_type = bo_mem->mem_type; + psb_be->num_pages = bo_mem->num_pages; + psb_be->desired_tile_stride = 0; + psb_be->hw_tile_stride = 0; + psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) + + man->gpu_offset; + + type = + (bo_mem-> + flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0; + + PSB_DEBUG_RENDER("MMU bind.\n"); + if (psb_be->mem_type == TTM_PL_TT) { + uint32_t gatt_p_offset = + (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT; + + ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages, + gatt_p_offset, + psb_be->num_pages, + psb_be->desired_tile_stride, + psb_be->hw_tile_stride, type); + } + + ret = psb_mmu_insert_pages(pd, psb_be->pages, + psb_be->offset, psb_be->num_pages, + psb_be->desired_tile_stride, + psb_be->hw_tile_stride, type); + if (ret) + goto out_err; + + return 0; +out_err: + drm_psb_tbe_unbind(backend); + return ret; + +} + +static void drm_psb_tbe_clear(struct ttm_backend *backend) +{ + struct drm_psb_ttm_backend *psb_be = + container_of(backend, struct drm_psb_ttm_backend, base); + + psb_be->pages = NULL; + return; +} + +static void drm_psb_tbe_destroy(struct ttm_backend *backend) +{ + struct drm_psb_ttm_backend *psb_be = + container_of(backend, struct drm_psb_ttm_backend, base); + + if (backend) + kfree(psb_be); +} + +static struct ttm_backend_func psb_ttm_backend = { + .populate = drm_psb_tbe_populate, + .clear = drm_psb_tbe_clear, + .bind = drm_psb_tbe_bind, + .unbind = drm_psb_tbe_unbind, + .destroy = drm_psb_tbe_destroy, +}; + +static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev) +{ + struct drm_psb_ttm_backend *psb_be; + + psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL); + if (!psb_be) + return NULL; + psb_be->pages = NULL; + psb_be->base.func = &psb_ttm_backend; + psb_be->base.bdev = bdev; + return &psb_be->base; +} + +/* + * Use this memory type priority if no eviction is needed. + */ +static uint32_t psb_mem_prios[] = { + TTM_PL_CI, + TTM_PL_RAR, + TTM_PL_VRAM, + TTM_PL_TT, + DRM_PSB_MEM_KERNEL, + DRM_PSB_MEM_MMU, + DRM_PSB_MEM_RASTGEOM, + DRM_PSB_MEM_PDS, + DRM_PSB_MEM_APER, + TTM_PL_SYSTEM +}; + +/* + * Use this memory type priority if need to evict. + */ +static uint32_t psb_busy_prios[] = { + TTM_PL_TT, + TTM_PL_VRAM, + TTM_PL_CI, + TTM_PL_RAR, + DRM_PSB_MEM_KERNEL, + DRM_PSB_MEM_MMU, + DRM_PSB_MEM_RASTGEOM, + DRM_PSB_MEM_PDS, + DRM_PSB_MEM_APER, + TTM_PL_SYSTEM +}; + + +struct ttm_bo_driver psb_ttm_bo_driver = { + .mem_type_prio = psb_mem_prios, + .mem_busy_prio = psb_busy_prios, + .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios), + .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios), + .create_ttm_backend_entry = &drm_psb_tbe_init, + .invalidate_caches = &psb_invalidate_caches, + .init_mem_type = &psb_init_mem_type, + .evict_flags = &psb_evict_mask, + .move = &psb_move, + .verify_access = &psb_verify_access, + .sync_obj_signaled = &ttm_fence_sync_obj_signaled, + .sync_obj_wait = &ttm_fence_sync_obj_wait, + .sync_obj_flush = &ttm_fence_sync_obj_flush, + .sync_obj_unref = &ttm_fence_sync_obj_unref, + .sync_obj_ref = &ttm_fence_sync_obj_ref +}; diff --git a/drivers/gpu/drm/psb/psb_dpst.c b/drivers/gpu/drm/psb/psb_dpst.c new file mode 100644 index 0000000..435e53b --- /dev/null +++ b/drivers/gpu/drm/psb/psb_dpst.c @@ -0,0 +1,208 @@ +/* + * Copyright © 2009 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * James C. Gualario + * + */ +#include "psb_umevents.h" +#include "psb_dpst.h" +/** + * inform the kernel of the work to be performed and related function. + * + */ +DECLARE_WORK(dpst_dev_change_work, &psb_dpst_dev_change_wq); +/** + * psb_dpst_notify_change_um - notify user mode of hotplug changes + * + * @name: name of event to notify user mode of change to + * @state: dpst state struct to get workqueue from + * + */ +int psb_dpst_notify_change_um(struct umevent_obj *event, + struct dpst_state *state) +{ + state->dpst_change_wq_data.dev_name_arry_rw_status + [state->dpst_change_wq_data.dev_name_write] = + DRM_DPST_READY_TO_READ; + state->dpst_change_wq_data.dev_umevent_arry + [state->dpst_change_wq_data.dev_name_write] = + event; + if (state->dpst_change_wq_data.dev_name_read_write_wrap_ack == 1) + state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0; + state->dpst_change_wq_data.dev_name_write++; + if (state->dpst_change_wq_data.dev_name_write == + state->dpst_change_wq_data.dev_name_read) { + state->dpst_change_wq_data.dev_name_write--; + return IRQ_NONE; + } + if (state->dpst_change_wq_data.dev_name_write > + DRM_DPST_RING_DEPTH_MAX) { + state->dpst_change_wq_data.dev_name_write = 0; + state->dpst_change_wq_data.dev_name_write_wrap = 1; + } + state->dpst_change_wq_data.hotplug_dev_list = state->list; + queue_work(state->dpst_wq, &(state->dpst_change_wq_data.work)); + return IRQ_HANDLED; +} +EXPORT_SYMBOL(psb_dpst_notify_change_um); +/** + * + * psb_dpst_create_and_notify_um - create and notify user mode of new dev + * + * @name: name to give for new event / device + * @state: dpst state instaces to associate event with + * + */ +struct umevent_obj *psb_dpst_create_and_notify_um(const char *name, + struct dpst_state *state) +{ + return psb_create_umevent_obj(name, state->list); + +} +EXPORT_SYMBOL(psb_dpst_create_and_notify_um); +/** + * psb_dpst_device_pool_create_and_init - make new hotplug device pool + * + * @parent_kobj - parent kobject to associate dpst kset with + * @state - dpst state instance to associate list with + * + */ +struct umevent_list *psb_dpst_device_pool_create_and_init( + struct kobject *parent_kobj, + struct dpst_state *state) +{ + + struct umevent_list *new_hotplug_dev_list = NULL; + new_hotplug_dev_list = psb_umevent_create_list(); + if (new_hotplug_dev_list) + psb_umevent_init(parent_kobj, new_hotplug_dev_list, + "psb_dpst"); + + state->dpst_wq = create_singlethread_workqueue("dpst-wq"); + + if (!state->dpst_wq) + return NULL; + + INIT_WORK(&state->dpst_change_wq_data.work, psb_dpst_dev_change_wq); + + state->dpst_change_wq_data.dev_name_read = 0; + state->dpst_change_wq_data.dev_name_write = 0; + state->dpst_change_wq_data.dev_name_write_wrap = 0; + state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0; + + memset(&(state->dpst_change_wq_data.dev_name_arry_rw_status[0]), + 0, sizeof(int)*DRM_DPST_RING_DEPTH); + + return new_hotplug_dev_list; +} +EXPORT_SYMBOL(psb_dpst_device_pool_create_and_init); +/** + * psb_dpst_init - init dpst subsystem + * @parent_kobj - parent kobject to associate dpst state with + * + */ +struct dpst_state *psb_dpst_init(struct kobject *parent_kobj) +{ + struct dpst_state *state; + state = kzalloc(sizeof(struct dpst_state), GFP_KERNEL); + printk(KERN_ALERT "after kzalloc\n"); + state->list = NULL; + state->list = psb_dpst_device_pool_create_and_init( + parent_kobj, + state); + return state; +} +EXPORT_SYMBOL(psb_dpst_init); +/** + * psb_dpst_device_pool_destroy - destroy all dpst related resources + * + * @state: dpst state instance to destroy + * + */ +void psb_dpst_device_pool_destroy(struct dpst_state *state) +{ + flush_workqueue(state->dpst_wq); + destroy_workqueue(state->dpst_wq); + psb_umevent_cleanup(state->list); + kfree(state); +} +EXPORT_SYMBOL(psb_dpst_device_pool_destroy); +/** + * psb_dpst_dev_change_wq - change workqueue implementation + * + * @work: work struct to use for kernel scheduling + * + */ +void psb_dpst_dev_change_wq(struct work_struct *work) +{ + struct dpst_disp_workqueue_data *wq_data; + wq_data = to_dpst_disp_workqueue_data(work); + if (wq_data->dev_name_write_wrap == 1) { + wq_data->dev_name_read_write_wrap_ack = 1; + wq_data->dev_name_write_wrap = 0; + while (wq_data->dev_name_read != DRM_DPST_RING_DEPTH_MAX) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_DPST_READY_TO_READ) { + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_DPST_READ_COMPLETE; + psb_umevent_notify_change_gfxsock + (wq_data->dev_umevent_arry + [wq_data->dev_name_read]); + } + wq_data->dev_name_read++; + } + wq_data->dev_name_read = 0; + while (wq_data->dev_name_read < wq_data->dev_name_write-1) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_DPST_READY_TO_READ) { + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_DPST_READ_COMPLETE; + psb_umevent_notify_change_gfxsock + (wq_data->dev_umevent_arry + [wq_data->dev_name_read]); + } + wq_data->dev_name_read++; + } + } else { + while (wq_data->dev_name_read < wq_data->dev_name_write) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_DPST_READY_TO_READ) { + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_DPST_READ_COMPLETE; + psb_umevent_notify_change_gfxsock + (wq_data->dev_umevent_arry + [wq_data->dev_name_read]); + } + wq_data->dev_name_read++; + } + } + if (wq_data->dev_name_read > DRM_DPST_RING_DEPTH_MAX) + wq_data->dev_name_read = 0; +} +EXPORT_SYMBOL(psb_dpst_dev_change_wq); diff --git a/drivers/gpu/drm/psb/psb_dpst.h b/drivers/gpu/drm/psb/psb_dpst.h new file mode 100644 index 0000000..43d3128 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_dpst.h @@ -0,0 +1,90 @@ +/* + * Copyright © 2009 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * James C. Gualario + * + */ +#ifndef _PSB_DPST_H_ +#define _PSB_DPST_H_ +/** + * required includes + * + */ +#include "psb_umevents.h" +/** + * dpst specific defines + * + */ +#define DRM_DPST_RING_DEPTH 256 +#define DRM_DPST_RING_DEPTH_MAX (DRM_DPST_RING_DEPTH-1) +#define DRM_DPST_READY_TO_READ 1 +#define DRM_DPST_READ_COMPLETE 2 +/** + * dpst workqueue data struct. + */ +struct dpst_disp_workqueue_data { + struct work_struct work; + const char *dev_name; + int dev_name_write; + int dev_name_read; + int dev_name_write_wrap; + int dev_name_read_write_wrap_ack; + struct umevent_obj *dev_umevent_arry[DRM_DPST_RING_DEPTH]; + int dev_name_arry_rw_status[DRM_DPST_RING_DEPTH]; + struct umevent_list *hotplug_dev_list; +}; +/** + * dpst state structure + * + */ +struct dpst_state { + struct workqueue_struct *dpst_wq; + struct dpst_disp_workqueue_data dpst_change_wq_data; + struct umevent_list *list; +}; +/** + * main interface function prototytpes for dpst support. + * + */ +extern struct dpst_state *psb_dpst_init(struct kobject *parent_kobj); +extern int psb_dpst_notify_change_um(struct umevent_obj *event, + struct dpst_state *state); +extern struct umevent_obj *psb_dpst_create_and_notify_um(const char *name, + struct dpst_state *state); +extern struct umevent_list *psb_dpst_device_pool_create_and_init( + struct kobject *parent_kobj, + struct dpst_state *state); +extern void psb_dpst_device_pool_destroy(struct dpst_state *state); +/** + * to go back and forth between work struct and workqueue data + * + */ +#define to_dpst_disp_workqueue_data(x) \ + container_of(x, struct dpst_disp_workqueue_data, work) + +/** + * function prototypes for workqueue implementation + * + */ +extern void psb_dpst_dev_change_wq(struct work_struct *work); +#endif diff --git a/drivers/gpu/drm/psb/psb_drm.h b/drivers/gpu/drm/psb/psb_drm.h new file mode 100644 index 0000000..596a9f0 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_drm.h @@ -0,0 +1,716 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + */ + +#ifndef _PSB_DRM_H_ +#define _PSB_DRM_H_ + +#if defined(__linux__) && !defined(__KERNEL__) +#include +#include "drm_mode.h" +#endif + +#include "ttm/ttm_fence_user.h" +#include "ttm/ttm_placement_user.h" + +/* + * Menlow/MRST graphics driver package version + * a.b.c.xxxx + * a - Product Family: 5 - Linux + * b - Major Release Version: 0 - non-Gallium (Unbuntu); + * 1 - Gallium (Moblin2) + * c - Hotfix Release + * xxxx - Graphics internal build # + */ +#define PSB_PACKAGE_VERSION "5.1.0.32L.0124" + +#define DRM_PSB_SAREA_MAJOR 0 +#define DRM_PSB_SAREA_MINOR 2 +#define PSB_FIXED_SHIFT 16 + +#define DRM_PSB_FIRST_TA_USE_REG 3 +#define DRM_PSB_NUM_TA_USE_REG 5 +#define DRM_PSB_FIRST_RASTER_USE_REG 8 +#define DRM_PSB_NUM_RASTER_USE_REG 7 + +#define PSB_NUM_PIPE 2 + +/* + * Public memory types. + */ + +#define DRM_PSB_MEM_MMU TTM_PL_PRIV1 +#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1 +#define DRM_PSB_MEM_PDS TTM_PL_PRIV2 +#define DRM_PSB_FLAG_MEM_PDS TTM_PL_FLAG_PRIV2 +#define DRM_PSB_MEM_APER TTM_PL_PRIV3 +#define DRM_PSB_FLAG_MEM_APER TTM_PL_FLAG_PRIV3 +#define DRM_PSB_MEM_RASTGEOM TTM_PL_PRIV4 +#define DRM_PSB_FLAG_MEM_RASTGEOM TTM_PL_FLAG_PRIV4 +#define PSB_MEM_RASTGEOM_START 0x30000000 + +typedef int32_t psb_fixed; +typedef uint32_t psb_ufixed; + +static inline int32_t psb_int_to_fixed(int a) +{ + return a * (1 << PSB_FIXED_SHIFT); +} + +static inline uint32_t psb_unsigned_to_ufixed(unsigned int a) +{ + return a << PSB_FIXED_SHIFT; +} + +/*Status of the command sent to the gfx device.*/ +typedef enum { + DRM_CMD_SUCCESS, + DRM_CMD_FAILED, + DRM_CMD_HANG +} drm_cmd_status_t; + +struct drm_psb_scanout { + uint32_t buffer_id; /* DRM buffer object ID */ + uint32_t rotation; /* Rotation as in RR_rotation definitions */ + uint32_t stride; /* Buffer stride in bytes */ + uint32_t depth; /* Buffer depth in bits (NOT) bpp */ + uint32_t width; /* Buffer width in pixels */ + uint32_t height; /* Buffer height in lines */ + int32_t transform[3][3]; /* Buffer composite transform */ + /* (scaling, rot, reflect) */ +}; + +#define DRM_PSB_SAREA_OWNERS 16 +#define DRM_PSB_SAREA_OWNER_2D 0 +#define DRM_PSB_SAREA_OWNER_3D 1 + +#define DRM_PSB_SAREA_SCANOUTS 3 + +struct drm_psb_sarea { + /* Track changes of this data structure */ + + uint32_t major; + uint32_t minor; + + /* Last context to touch part of hw */ + uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS]; + + /* Definition of front- and rotated buffers */ + uint32_t num_scanouts; + struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS]; + + int planeA_x; + int planeA_y; + int planeA_w; + int planeA_h; + int planeB_x; + int planeB_y; + int planeB_w; + int planeB_h; + /* Number of active scanouts */ + uint32_t num_active_scanouts; +}; + +#define PSB_RELOC_MAGIC 0x67676767 +#define PSB_RELOC_SHIFT_MASK 0x0000FFFF +#define PSB_RELOC_SHIFT_SHIFT 0 +#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000 +#define PSB_RELOC_ALSHIFT_SHIFT 16 + +#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated + * buffer + */ +#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated + * buffer, relative to 2D + * base address + */ +#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer, + * relative to PDS base address + */ +#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated + * buffer (for tiling) + */ +#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer + * relative to base reg + */ +#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */ + +struct drm_psb_reloc { + uint32_t reloc_op; + uint32_t where; /* offset in destination buffer */ + uint32_t buffer; /* Buffer reloc applies to */ + uint32_t mask; /* Destination format: */ + uint32_t shift; /* Destination format: */ + uint32_t pre_add; /* Destination format: */ + uint32_t background; /* Destination add */ + uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */ + uint32_t arg0; /* Reloc-op dependant */ + uint32_t arg1; +}; + + +#define PSB_GPU_ACCESS_READ (1ULL << 32) +#define PSB_GPU_ACCESS_WRITE (1ULL << 33) +#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE) + +#define PSB_BO_FLAG_TA (1ULL << 48) +#define PSB_BO_FLAG_SCENE (1ULL << 49) +#define PSB_BO_FLAG_FEEDBACK (1ULL << 50) +#define PSB_BO_FLAG_USSE (1ULL << 51) +#define PSB_BO_FLAG_COMMAND (1ULL << 52) + +#define PSB_ENGINE_2D 0 +#define PSB_ENGINE_VIDEO 1 +#define PSB_ENGINE_RASTERIZER 2 +#define PSB_ENGINE_TA 3 +#define PSB_ENGINE_HPRAST 4 +#define LNC_ENGINE_ENCODE 5 + +/* + * For this fence class we have a couple of + * fence types. + */ + +#define _PSB_FENCE_EXE_SHIFT 0 +#define _PSB_FENCE_TA_DONE_SHIFT 1 +#define _PSB_FENCE_RASTER_DONE_SHIFT 2 +#define _PSB_FENCE_SCENE_DONE_SHIFT 3 +#define _PSB_FENCE_FEEDBACK_SHIFT 4 + +#define _PSB_ENGINE_TA_FENCE_TYPES 5 +#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT) +#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT) +#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT) +#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT) +#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT) + +#define PSB_ENGINE_HPRAST 4 +#define PSB_NUM_ENGINES 6 + +#define PSB_TA_FLAG_FIRSTPASS (1 << 0) +#define PSB_TA_FLAG_LASTPASS (1 << 1) + +#define PSB_FEEDBACK_OP_VISTEST (1 << 0) + +struct drm_psb_extension_rep { + int32_t exists; + uint32_t driver_ioctl_offset; + uint32_t sarea_offset; + uint32_t major; + uint32_t minor; + uint32_t pl; +}; + +#define DRM_PSB_EXT_NAME_LEN 128 + +union drm_psb_extension_arg { + char extension[DRM_PSB_EXT_NAME_LEN]; + struct drm_psb_extension_rep rep; +}; + +struct psb_validate_req { + uint64_t set_flags; + uint64_t clear_flags; + uint64_t next; + uint64_t presumed_gpu_offset; + uint32_t buffer_handle; + uint32_t presumed_flags; + uint32_t group; + uint32_t pad64; +}; + +struct psb_validate_rep { + uint64_t gpu_offset; + uint32_t placement; + uint32_t fence_type_mask; +}; + +#define PSB_USE_PRESUMED (1 << 0) + +struct psb_validate_arg { + int handled; + int ret; + union { + struct psb_validate_req req; + struct psb_validate_rep rep; + } d; +}; + +struct drm_psb_scene { + int handle_valid; + uint32_t handle; + uint32_t w; /* also contains msaa info */ + uint32_t h; + uint32_t num_buffers; +}; + +#define DRM_PSB_FENCE_NO_USER (1 << 0) + +struct psb_ttm_fence_rep { + uint32_t handle; + uint32_t fence_class; + uint32_t fence_type; + uint32_t signaled_types; + uint32_t error; +}; + +typedef struct drm_psb_cmdbuf_arg { + uint64_t buffer_list; /* List of buffers to validate */ + uint64_t clip_rects; /* See i915 counterpart */ + uint64_t scene_arg; + uint64_t fence_arg; + + uint32_t ta_flags; + + uint32_t ta_handle; /* TA reg-value pairs */ + uint32_t ta_offset; + uint32_t ta_size; + + uint32_t oom_handle; + uint32_t oom_offset; + uint32_t oom_size; + + uint32_t cmdbuf_handle; /* 2D Command buffer object or, */ + uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */ + uint32_t cmdbuf_size; + + uint32_t reloc_handle; /* Reloc buffer object */ + uint32_t reloc_offset; + uint32_t num_relocs; + + int32_t damage; /* Damage front buffer with cliprects */ + /* Not implemented yet */ + uint32_t fence_flags; + uint32_t engine; + + /* + * Feedback; + */ + + uint32_t feedback_ops; + uint32_t feedback_handle; + uint32_t feedback_offset; + uint32_t feedback_breakpoints; + uint32_t feedback_size; +}drm_psb_cmdbuf_arg_t; + +typedef struct drm_psb_pageflip_arg { + uint32_t flip_offset; + uint32_t stride; +}drm_psb_pageflip_arg_t; + +typedef enum { + LNC_VIDEO_FRAME_SKIP, + LNC_VIDEO_GETPARAM_RAR_REGION_SIZE, + LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET +} lnc_getparam_key_t; + +struct drm_lnc_video_getparam_arg { + lnc_getparam_key_t key; + uint64_t arg; /* argument pointer */ + uint64_t value; /* feed back pointer */ +}; + +struct drm_psb_xhw_init_arg { + uint32_t operation; + uint32_t buffer_handle; +}; + +/* + * Feedback components: + */ + +/* + * Vistest component. The number of these in the feedback buffer + * equals the number of vistest breakpoints + 1. + * This is currently the only feedback component. + */ + +struct drm_psb_vistest { + uint32_t vt[8]; +}; + +struct drm_psb_sizes_arg { + uint32_t ta_mem_size; + uint32_t mmu_size; + uint32_t pds_size; + uint32_t rastgeom_size; + uint32_t tt_size; + uint32_t vram_size; +}; + +struct mrst_timing_info { + uint16_t pixel_clock; + uint8_t hactive_lo; + uint8_t hblank_lo; + uint8_t hblank_hi:4; + uint8_t hactive_hi:4; + uint8_t vactive_lo; + uint8_t vblank_lo; + uint8_t vblank_hi:4; + uint8_t vactive_hi:4; + uint8_t hsync_offset_lo; + uint8_t hsync_pulse_width_lo; + uint8_t vsync_pulse_width_lo:4; + uint8_t vsync_offset_lo:4; + uint8_t vsync_pulse_width_hi:2; + uint8_t vsync_offset_hi:2; + uint8_t hsync_pulse_width_hi:2; + uint8_t hsync_offset_hi:2; + uint8_t width_mm_lo; + uint8_t height_mm_lo; + uint8_t height_mm_hi:4; + uint8_t width_mm_hi:4; + uint8_t hborder; + uint8_t vborder; + uint8_t unknown0:1; + uint8_t hsync_positive:1; + uint8_t vsync_positive:1; + uint8_t separate_sync:2; + uint8_t stereo:1; + uint8_t unknown6:1; + uint8_t interlaced:1; +} __attribute__((packed)); + +struct mrst_panel_descriptor_v1{ + uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */ + /* 0x61190 if MIPI */ + uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/ + uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/ + uint32_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */ + /* Register 0x61210 */ + struct mrst_timing_info DTD;/*18 bytes, Standard definition */ + uint16_t Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */ + /* Bit 0, Frequency, 15 bits,0 - 32767Hz */ + /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */ + uint16_t Panel_MIPI_Display_Descriptor; + /*16 bits, Defined as follows: */ + /* if MIPI, 0x0000 if LVDS */ + /* Bit 0, Type, 2 bits, */ + /* 0: Type-1, */ + /* 1: Type-2, */ + /* 2: Type-3, */ + /* 3: Type-4 */ + /* Bit 2, Pixel Format, 4 bits */ + /* Bit0: 16bpp (not supported in LNC), */ + /* Bit1: 18bpp loosely packed, */ + /* Bit2: 18bpp packed, */ + /* Bit3: 24bpp */ + /* Bit 6, Reserved, 2 bits, 00b */ + /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */ + /* Bit 14, Reserved, 2 bits, 00b */ +} __attribute__ ((packed)); + +struct mrst_panel_descriptor_v2{ + uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */ + /* 0x61190 if MIPI */ + uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/ + uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/ + uint8_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */ + /* Register 0x61210 */ + struct mrst_timing_info DTD;/*18 bytes, Standard definition */ + uint16_t Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/ + /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/ + uint8_t Panel_Initial_Brightness;/* [7:0] 0 - 100% */ + /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/ + uint16_t Panel_MIPI_Display_Descriptor; + /*16 bits, Defined as follows: */ + /* if MIPI, 0x0000 if LVDS */ + /* Bit 0, Type, 2 bits, */ + /* 0: Type-1, */ + /* 1: Type-2, */ + /* 2: Type-3, */ + /* 3: Type-4 */ + /* Bit 2, Pixel Format, 4 bits */ + /* Bit0: 16bpp (not supported in LNC), */ + /* Bit1: 18bpp loosely packed, */ + /* Bit2: 18bpp packed, */ + /* Bit3: 24bpp */ + /* Bit 6, Reserved, 2 bits, 00b */ + /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */ + /* Bit 14, Reserved, 2 bits, 00b */ +} __attribute__ ((packed)); + +union mrst_panel_rx{ + struct{ + uint16_t NumberOfLanes :2; /*Num of Lanes, 2 bits,0 = 1 lane,*/ + /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */ + uint16_t MaxLaneFreq :3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */ + /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/ + uint16_t SupportedVideoTransferMode :2; /*0: Non-burst only */ + /* 1: Burst and non-burst */ + /* 2/3: Reserved */ + uint16_t HSClkBehavior :1; /*0: Continuous, 1: Non-continuous*/ + uint16_t DuoDisplaySupport :1; /*1 bit,0: No, 1: Yes*/ + uint16_t ECC_ChecksumCapabilities :1;/*1 bit,0: No, 1: Yes*/ + uint16_t BidirectionalCommunication :1;/*1 bit,0: No, 1: Yes */ + uint16_t Rsvd :5;/*5 bits,00000b */ + }panelrx; + uint16_t panel_receiver; +} __attribute__ ((packed)); + +struct gct_ioctl_arg{ + uint8_t bpi; /* boot panel index, number of panel used during boot */ + uint8_t pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */ + struct mrst_timing_info DTD; /* timing info for the selected panel */ + uint32_t Panel_Port_Control; + uint32_t PP_On_Sequencing;/*1 dword,Register 0x61208,*/ + uint32_t PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/ + uint32_t PP_Cycle_Delay; + uint16_t Panel_Backlight_Inverter_Descriptor; +} __attribute__ ((packed)); + +struct mrst_vbt{ + char Signature[4]; /*4 bytes,"$GCT" */ + uint8_t Revision; /*1 byte */ + uint8_t Size; /*1 byte */ + uint8_t Checksum; /*1 byte,Calculated*/ + void *mrst_gct; +} __attribute__ ((packed)); + +struct mrst_gct_v1{ /* expect this table to change per customer request*/ + union{ /*8 bits,Defined as follows: */ + struct{ + uint8_t PanelType :4; /*4 bits, Bit field for panels*/ + /* 0 - 3: 0 = LVDS, 1 = MIPI*/ + uint8_t BootPanelIndex :2;/*2 bits,Specifies which of the*/ + /* 4 panels to use by default*/ + uint8_t BootMIPI_DSI_RxIndex :2;/*Specifies which of*/ + /* the 4 MIPI DSI receivers to use*/ + }PD; + uint8_t PanelDescriptor; + }; + struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/ + union mrst_panel_rx panelrx[4]; /* panel receivers*/ +} __attribute__ ((packed)); + +struct mrst_gct_v2{ /* expect this table to change per customer request*/ + union{ /*8 bits,Defined as follows: */ + struct{ + uint8_t PanelType :4; /*4 bits, Bit field for panels*/ + /* 0 - 3: 0 = LVDS, 1 = MIPI*/ + uint8_t BootPanelIndex :2;/*2 bits,Specifies which of the*/ + /* 4 panels to use by default*/ + uint8_t BootMIPI_DSI_RxIndex :2;/*Specifies which of*/ + /* the 4 MIPI DSI receivers to use*/ + }PD; + uint8_t PanelDescriptor; + }; + struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/ + union mrst_panel_rx panelrx[4]; /* panel receivers*/ +} __attribute__ ((packed)); + +#define PSB_DC_CRTC_SAVE 0x01 +#define PSB_DC_CRTC_RESTORE 0x02 +#define PSB_DC_OUTPUT_SAVE 0x04 +#define PSB_DC_OUTPUT_RESTORE 0x08 +#define PSB_DC_CRTC_MASK 0x03 +#define PSB_DC_OUTPUT_MASK 0x0C + +struct drm_psb_dc_state_arg { + uint32_t flags; + uint32_t obj_id; +}; + +struct drm_psb_mode_operation_arg { + uint32_t obj_id; + uint16_t operation; + struct drm_mode_modeinfo mode; + void * data; +}; + +struct drm_psb_stolen_memory_arg { + uint32_t base; + uint32_t size; +}; + +/*Display Register Bits*/ +#define REGRWBITS_PFIT_CONTROLS (1 << 0) +#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1) +#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2) +#define REGRWBITS_PIPEASRC (1 << 3) +#define REGRWBITS_PIPEBSRC (1 << 4) +#define REGRWBITS_VTOTAL_A (1 << 5) +#define REGRWBITS_VTOTAL_B (1 << 6) + +/*Overlay Register Bits*/ +#define OV_REGRWBITS_OVADD (1 << 0) +#define OV_REGRWBITS_OGAM_ALL (1 << 1) + +struct drm_psb_register_rw_arg { + uint32_t b_force_hw_on; + + uint32_t display_read_mask; + uint32_t display_write_mask; + + struct { + uint32_t pfit_controls; + uint32_t pfit_autoscale_ratios; + uint32_t pfit_programmed_scale_ratios; + uint32_t pipeasrc; + uint32_t pipebsrc; + uint32_t vtotal_a; + uint32_t vtotal_b; + } display; + + uint32_t overlay_read_mask; + uint32_t overlay_write_mask; + + struct { + uint32_t OVADD; + uint32_t OGAMC0; + uint32_t OGAMC1; + uint32_t OGAMC2; + uint32_t OGAMC3; + uint32_t OGAMC4; + uint32_t OGAMC5; + } overlay; +}; + +#define PSB_HW_COOKIE_SIZE 16 +#define PSB_HW_FEEDBACK_SIZE 8 +#define PSB_HW_OOM_CMD_SIZE (6 + DRM_PSB_NUM_RASTER_USE_REG * 2) + +struct drm_psb_xhw_arg { + uint32_t op; + int ret; + uint32_t irq_op; + uint32_t issue_irq; + uint32_t cookie[PSB_HW_COOKIE_SIZE]; + union { + struct { + uint32_t w; /* also contains msaa info */ + uint32_t h; + uint32_t size; + uint32_t clear_p_start; + uint32_t clear_num_pages; + } si; + struct { + uint32_t fire_flags; + uint32_t hw_context; + uint32_t offset; + uint32_t engine; + uint32_t flags; + uint32_t rca; + uint32_t num_oom_cmds; + uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE]; + } sb; + struct { + uint32_t pages; + uint32_t size; + uint32_t ta_min_size; + } bi; + struct { + uint32_t bca; + uint32_t rca; + uint32_t flags; + } oom; + struct { + uint32_t pt_offset; + uint32_t param_offset; + uint32_t flags; + } bl; + struct { + uint32_t value; + } cl; + uint32_t feedback[PSB_HW_FEEDBACK_SIZE]; + } arg; +}; + +/* Controlling the kernel modesetting buffers */ + +#define DRM_PSB_KMS_OFF 0x00 +#define DRM_PSB_KMS_ON 0x01 +#define DRM_PSB_VT_LEAVE 0x02 +#define DRM_PSB_VT_ENTER 0x03 +#define DRM_PSB_XHW_INIT 0x04 +#define DRM_PSB_XHW 0x05 +#define DRM_PSB_EXTENSION 0x06 +#define DRM_PSB_SIZES 0x07 +#define DRM_PSB_FUSE_REG 0x08 +#define DRM_PSB_VBT 0x09 +#define DRM_PSB_DC_STATE 0x0A +#define DRM_PSB_ADB 0x0B +#define DRM_PSB_MODE_OPERATION 0x0C +#define DRM_PSB_STOLEN_MEMORY 0x0D +#define DRM_PSB_REGISTER_RW 0x0E + +/* + * Xhw commands. + */ + +#define PSB_XHW_INIT 0x00 +#define PSB_XHW_TAKEDOWN 0x01 + +#define PSB_XHW_FIRE_RASTER 0x00 +#define PSB_XHW_SCENE_INFO 0x01 +#define PSB_XHW_SCENE_BIND_FIRE 0x02 +#define PSB_XHW_TA_MEM_INFO 0x03 +#define PSB_XHW_RESET_DPM 0x04 +#define PSB_XHW_OOM 0x05 +#define PSB_XHW_TERMINATE 0x06 +#define PSB_XHW_VISTEST 0x07 +#define PSB_XHW_RESUME 0x08 +#define PSB_XHW_TA_MEM_LOAD 0x09 +#define PSB_XHW_CHECK_LOCKUP 0x0a + +#define PSB_SCENE_FLAG_DIRTY (1 << 0) +#define PSB_SCENE_FLAG_COMPLETE (1 << 1) +#define PSB_SCENE_FLAG_SETUP (1 << 2) +#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3) +#define PSB_SCENE_FLAG_CLEARED (1 << 4) + +#define PSB_TA_MEM_FLAG_TA (1 << 0) +#define PSB_TA_MEM_FLAG_RASTER (1 << 1) +#define PSB_TA_MEM_FLAG_HOSTA (1 << 2) +#define PSB_TA_MEM_FLAG_HOSTD (1 << 3) +#define PSB_TA_MEM_FLAG_INIT (1 << 4) +#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5) + +/*Raster fire will deallocate memory */ +#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0) +/*Isp reset needed due to change in ZLS format */ +#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1) +/*These are set by Xpsb. */ +#define PSB_FIRE_FLAG_XHW_MASK 0xff000000 +/*The task has had at least one OOM and Xpsb will + send back messages on each fire. */ +#define PSB_FIRE_FLAG_XHW_OOM (1 << 24) + +#define PSB_SCENE_ENGINE_TA 0 +#define PSB_SCENE_ENGINE_RASTER 1 +#define PSB_SCENE_NUM_ENGINES 2 + +#define PSB_LOCKUP_RASTER (1 << 0) +#define PSB_LOCKUP_TA (1 << 1) + +struct drm_psb_dev_info_arg { + uint32_t num_use_attribute_registers; +}; +#define DRM_PSB_DEVINFO 0x01 + +#define PSB_MODE_OPERATION_MODE_VALID 0x01 + +#endif diff --git a/drivers/gpu/drm/psb/psb_drv.c b/drivers/gpu/drm/psb/psb_drv.c new file mode 100644 index 0000000..7019b73 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_drv.c @@ -0,0 +1,2239 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + */ + +#include +#include +#include "psb_drm.h" +#include "psb_drv.h" +#include "psb_reg.h" +#include "psb_intel_reg.h" +#include "psb_intel_bios.h" +#include "psb_msvdx.h" +#include "lnc_topaz.h" +#include +#include "psb_scene.h" +#include "psb_powermgmt.h" +#include +#include +#include + +int drm_psb_debug; +EXPORT_SYMBOL(drm_psb_debug); +static int drm_psb_trap_pagefaults; +static int drm_psb_clock_gating = 2; +static int drm_psb_ta_mem_size = 32 * 1024; + +int drm_psb_disable_vsync = 1; +int drm_psb_no_fb; +int drm_psb_force_pipeb; +int drm_idle_check_interval = 5; +int drm_psb_ospm = 0; +int drm_msvdx_pmpolicy = PSB_PMPOLICY_NOPM; +int drm_topaz_pmpolicy = PSB_PMPOLICY_NOPM; + +static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); + +MODULE_PARM_DESC(debug, "Enable debug output"); +MODULE_PARM_DESC(clock_gating, "clock gating"); +MODULE_PARM_DESC(no_fb, "Disable FBdev"); +MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults"); +MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts"); +MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb"); +MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB"); +MODULE_PARM_DESC(ospm, "switch for ospm support"); +MODULE_PARM_DESC(msvdx_pmpolicy, "msvdx power management policy btw frames"); +MODULE_PARM_DESC(topaz_pmpolicy, "topaz power managerment policy btw frames"); +module_param_named(debug, drm_psb_debug, int, 0600); +module_param_named(clock_gating, drm_psb_clock_gating, int, 0600); +module_param_named(no_fb, drm_psb_no_fb, int, 0600); +module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600); +module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600); +module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600); +module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600); +module_param_named(ospm, drm_psb_ospm, int, 0600); +module_param_named(msvdx_pmpolicy, drm_msvdx_pmpolicy, int, 0600); +module_param_named(topaz_pmpolicy, drm_topaz_pmpolicy, int, 0600); + +#ifndef CONFIG_X86_PAT +#warning "Don't build this driver without PAT support!!!" +#endif + +#define psb_PCI_IDS \ + {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \ + {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \ + {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ + {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ + {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ + {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ + {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ + {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ + {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ + {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ + {0, 0, 0} + +static struct pci_device_id pciidlist[] = { + psb_PCI_IDS +}; + +/* + * Standard IOCTLs. + */ + +#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE) +#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE) +#define DRM_IOCTL_PSB_VT_LEAVE DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE) +#define DRM_IOCTL_PSB_VT_ENTER DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE) +#define DRM_IOCTL_PSB_XHW_INIT DRM_IOW(DRM_PSB_XHW_INIT + DRM_COMMAND_BASE, \ + struct drm_psb_xhw_init_arg) +#define DRM_IOCTL_PSB_XHW DRM_IO(DRM_PSB_XHW + DRM_COMMAND_BASE) +#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \ + union drm_psb_extension_arg) +#define DRM_IOCTL_PSB_SIZES DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \ + struct drm_psb_sizes_arg) +#define DRM_IOCTL_PSB_FUSE_REG DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, \ + uint32_t) +#define DRM_IOCTL_PSB_VBT DRM_IOWR(DRM_PSB_VBT + DRM_COMMAND_BASE, \ + struct gct_ioctl_arg) +#define DRM_IOCTL_PSB_DC_STATE DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \ + struct drm_psb_dc_state_arg) +#define DRM_IOCTL_PSB_ADB DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, \ + uint32_t) +#define DRM_IOCTL_PSB_MODE_OPERATION DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \ + struct drm_psb_mode_operation_arg) +#define DRM_IOCTL_PSB_STOLEN_MEMORY DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \ + struct drm_psb_stolen_memory_arg) +#define DRM_IOCTL_PSB_REGISTER_RW DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \ + struct drm_psb_register_rw_arg) + +/* + * TTM execbuf extension. + */ + +#define DRM_PSB_CMDBUF (DRM_PSB_REGISTER_RW + 1) +#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1) +#define DRM_IOCTL_PSB_CMDBUF DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \ + struct drm_psb_cmdbuf_arg) +#define DRM_IOCTL_PSB_SCENE_UNREF DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \ + struct drm_psb_scene) +#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE) +#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE) +#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \ + union drm_psb_extension_arg) +/* + * TTM placement user extension. + */ + +#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1) + +#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET) +#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET) +#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET) +#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET) +#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET) +#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET) + +/* + * TTM fence extension. + */ + +#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1) +#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET) +#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET) +#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET) + +#define DRM_PSB_FLIP (DRM_PSB_TTM_FENCE_UNREF + 1) //20 +/* PSB video extension */ +#define DRM_LNC_VIDEO_GETPARAM (DRM_PSB_FLIP + 1) + +#define DRM_IOCTL_PSB_TTM_PL_CREATE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\ + union ttm_pl_create_arg) +#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\ + union ttm_pl_reference_arg) +#define DRM_IOCTL_PSB_TTM_PL_UNREF \ + DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\ + struct ttm_pl_reference_req) +#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \ + DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\ + struct ttm_pl_synccpu_arg) +#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \ + DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\ + struct ttm_pl_waitidle_arg) +#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\ + union ttm_pl_setstatus_arg) +#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \ + union ttm_fence_signaled_arg) +#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \ + union ttm_fence_finish_arg) +#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \ + DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \ + struct ttm_fence_unref_arg) +#define DRM_IOCTL_PSB_FLIP \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \ + struct drm_psb_pageflip_arg) +#define DRM_IOCTL_LNC_VIDEO_GETPARAM \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_LNC_VIDEO_GETPARAM, \ + struct drm_lnc_video_getparam_arg) + +static int psb_vt_leave_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +static int psb_vt_enter_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +static int psb_sizes_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +static int psb_vbt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +static int psb_dc_state_ioctl(struct drm_device *dev, void * data, + struct drm_file *file_priv); +static int psb_adb_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +static int psb_mode_operation_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +static int psb_register_rw_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#define PSB_IOCTL_DEF(ioctl, func, flags) \ + [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} + +static struct drm_ioctl_desc psb_ioctls[] = { + PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl, + DRM_ROOT_ONLY), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON, psbfb_kms_on_ioctl, DRM_ROOT_ONLY), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl, + DRM_ROOT_ONLY), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER, psb_vt_enter_ioctl, DRM_ROOT_ONLY), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW_INIT, psb_xhw_init_ioctl, + DRM_ROOT_ONLY), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW, psb_xhw_ioctl, DRM_ROOT_ONLY), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_FUSE_REG, psb_fuse_reg_ioctl, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_VBT, psb_vbt_ioctl, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl, + DRM_AUTH), + + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED, + psb_fence_signaled_ioctl, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl, + DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_PSB_FLIP, psb_page_flip, DRM_AUTH), + PSB_IOCTL_DEF(DRM_IOCTL_LNC_VIDEO_GETPARAM, lnc_video_getparam, DRM_AUTH) +}; + +static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls); + +static void get_ci_info(struct drm_psb_private *dev_priv) +{ + struct pci_dev *pdev; + + pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL); + if (pdev == NULL) { + /* IF no pci_device we set size & addr to 0, no ci + * share buffer can be created */ + dev_priv->ci_region_start = 0; + dev_priv->ci_region_size = 0; + printk(KERN_ERR "can't find CI device, no ci share buffer\n"); + return; + } + + dev_priv->ci_region_start = pci_resource_start(pdev, 1); + dev_priv->ci_region_size = pci_resource_len(pdev, 1); + + printk(KERN_INFO "ci_region_start %x ci_region_size %d\n", + dev_priv->ci_region_start, dev_priv->ci_region_size); + + pci_dev_put(pdev); + + return; +} + +static void get_rar_info(struct drm_psb_private *dev_priv) +{ + struct pci_dev *pdev; + const uint32_t msg_opcode = 0xD0; + const uint32_t bunit_port = 0x3; + const uint32_t start_addr_reg_offset = 0x10; + const uint32_t end_addr_reg_offset = 0x11; + const uint32_t msg_byte_write_enable = 0xf; + const uint32_t vendor_id = 0x8086; + const uint32_t device_id = 0x4110; + const uint32_t lnc_mcr_offset = 0xd0; + const uint32_t lnc_mdr_offset = 0xd4; + uint32_t start_addr_msg, end_addr_msg, start_addr, end_addr; + + pdev = pci_get_subsys(vendor_id, device_id, 0, 0, NULL); + if (pdev == NULL) { + dev_priv->rar_region_start = 0; + dev_priv->rar_region_size = 0; + goto out; + } + + /* get the start msg */ + start_addr_msg = (msg_opcode << 24) | + (bunit_port << 16) | + (start_addr_reg_offset << 8) | + (msg_byte_write_enable << 4); + + /* thought write/read is always success */ + pci_write_config_dword(pdev, + lnc_mcr_offset, + start_addr_msg); + pci_read_config_dword(pdev, + lnc_mdr_offset, + &start_addr); + + start_addr &= 0xfffffc00u; + + /* get the end msg */ + end_addr_msg = (msg_opcode << 24) | + (bunit_port << 16) | + (end_addr_reg_offset << 8) | + (msg_byte_write_enable << 4); + + pci_write_config_dword(pdev, + lnc_mcr_offset, + end_addr_msg); + pci_read_config_dword(pdev, + lnc_mdr_offset, + &end_addr); + + end_addr |= 0x3ffu; + + dev_priv->rar_region_start = start_addr; + dev_priv->rar_region_size = end_addr - start_addr + 1; + + printk(KERN_INFO "rar for video region [0x%x, 0x%x], size %d\n", + start_addr, end_addr, dev_priv->rar_region_size); +out: + if (pdev != NULL) + pci_dev_put(pdev); + + return; +} + +static void psb_set_uopt(struct drm_psb_uopt *uopt) +{ + uopt->clock_gating = drm_psb_clock_gating; +} + +static void psb_lastclose(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + if (!dev->dev_private) + return; + + if (dev_priv->ta_mem) + psb_ta_mem_unref(&dev_priv->ta_mem); + mutex_lock(&dev_priv->cmdbuf_mutex); + if (dev_priv->context.buffers) { + vfree(dev_priv->context.buffers); + dev_priv->context.buffers = NULL; + } + mutex_unlock(&dev_priv->cmdbuf_mutex); +} + +static void psb_do_takedown(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->bdev; + + + if (dev_priv->have_mem_rastgeom) { + ttm_bo_clean_mm(bdev, DRM_PSB_MEM_RASTGEOM); + dev_priv->have_mem_rastgeom = 0; + } + if (dev_priv->have_mem_mmu) { + ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU); + dev_priv->have_mem_mmu = 0; + } + if (dev_priv->have_mem_aper) { + ttm_bo_clean_mm(bdev, DRM_PSB_MEM_APER); + dev_priv->have_mem_aper = 0; + } + if (dev_priv->have_tt) { + ttm_bo_clean_mm(bdev, TTM_PL_TT); + dev_priv->have_tt = 0; + } + if (dev_priv->have_vram) { + ttm_bo_clean_mm(bdev, TTM_PL_VRAM); + dev_priv->have_vram = 0; + } + if (dev_priv->have_camera) { + ttm_bo_clean_mm(bdev, TTM_PL_CI); + dev_priv->have_camera = 0; + } + if (dev_priv->have_rar) { + ttm_bo_clean_mm(bdev, TTM_PL_RAR); + dev_priv->have_rar = 0; + } + + psb_msvdx_uninit(dev); + + if (IS_MRST(dev)) + lnc_topaz_uninit(dev); + + if (dev_priv->comm) { + kunmap(dev_priv->comm_page); + dev_priv->comm = NULL; + } + if (dev_priv->comm_page) { + __free_page(dev_priv->comm_page); + dev_priv->comm_page = NULL; + } +} + +void psb_clockgating(struct drm_psb_private *dev_priv) +{ + uint32_t clock_gating; + + if (dev_priv->uopt.clock_gating == 1) { + PSB_DEBUG_INIT("Disabling clock gating.\n"); + + clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED << + _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_DISABLED << + _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_DISABLED << + _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_DISABLED << + _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_DISABLED << + _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_DISABLED << + _PSB_C_CLKGATECTL_USE_CLKG_SHIFT); + + } else if (dev_priv->uopt.clock_gating == 2) { + PSB_DEBUG_INIT("Enabling clock gating.\n"); + + clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO << + _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_AUTO << + _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_AUTO << + _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_AUTO << + _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_AUTO << + _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) | + (_PSB_C_CLKGATECTL_CLKG_AUTO << + _PSB_C_CLKGATECTL_USE_CLKG_SHIFT); + } else + clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL); + +#ifdef FIX_TG_2D_CLOCKGATE + clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK; + clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED << + _PSB_C_CLKGATECTL_2D_CLKG_SHIFT); +#endif + PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL); + (void) PSB_RSGX32(PSB_CR_CLKGATECTL); +} + +#define FB_REG06 0xD0810600 +#define FB_MIPI_DISABLE BIT11 +#define FB_REG09 0xD0810900 +#define FB_SKU_MASK (BIT12|BIT13|BIT14) +#define FB_SKU_SHIFT 12 +#define FB_SKU_100 0 +#define FB_SKU_100L 1 +#define FB_SKU_83 2 +#if 1 /* FIXME remove it after PO */ +#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22) +#define FB_GFX_CLK_DIVIDE_SHIFT 20 +#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24) +#define FB_VED_CLK_DIVIDE_SHIFT 23 +#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26) +#define FB_VEC_CLK_DIVIDE_SHIFT 25 +#endif /* FIXME remove it after PO */ + + +void mrst_get_fuse_settings(struct drm_psb_private *dev_priv) +{ + struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + uint32_t fuse_value = 0; + uint32_t fuse_value_tmp = 0; + + pci_write_config_dword(pci_root, 0xD0, FB_REG06); + pci_read_config_dword(pci_root, 0xD4, &fuse_value); + + dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE; + + DRM_INFO("internal display is %s\n", + dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display"); + + pci_write_config_dword(pci_root, 0xD0, FB_REG09); + pci_read_config_dword(pci_root, 0xD4, &fuse_value); + + DRM_INFO("SKU values is 0x%x. \n", fuse_value); + fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT; + + dev_priv->fuse_reg_value = fuse_value; + + switch (fuse_value_tmp) { + case FB_SKU_100: + DRM_INFO("SKU values is SKU_100. LNC core clock is 200MHz. \n"); + dev_priv->sku_100 = true; + break; + case FB_SKU_100L: + DRM_INFO("SKU values is SKU_100L. LNC core clock is 100MHz. \n"); + dev_priv->sku_100L = true; + break; + case FB_SKU_83: + DRM_INFO("SKU values is SKU_83. LNC core clock is 166MHz. \n"); + dev_priv->sku_83 = true; + break; + default: + DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n", + fuse_value_tmp); + } + +#if 1 /* FIXME remove it after PO */ + fuse_value_tmp = (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT; + + switch (fuse_value_tmp) { + case 0: + DRM_INFO("Gfx clk : core clk = 1:1. \n"); + break; + case 1: + DRM_INFO("Gfx clk : core clk = 4:3. \n"); + break; + case 2: + DRM_INFO("Gfx clk : core clk = 8:5. \n"); + break; + case 3: + DRM_INFO("Gfx clk : core clk = 2:1. \n"); + break; + case 4: + DRM_INFO("Gfx clk : core clk = 16:7. \n"); + break; + case 5: + DRM_INFO("Gfx clk : core clk = 8:3. \n"); + break; + case 6: + DRM_INFO("Gfx clk : core clk = 16:5. \n"); + break; + case 7: + DRM_INFO("Gfx clk : core clk = 4:1. \n"); + break; + default: + DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n", + fuse_value_tmp); + } + + fuse_value_tmp = (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT; + + switch (fuse_value_tmp) { + case 0: + DRM_INFO("Ved clk : core clk = 1:1. \n"); + break; + case 1: + DRM_INFO("Ved clk : core clk = 4:3. \n"); + break; + case 2: + DRM_INFO("Ved clk : core clk = 8:5. \n"); + break; + case 3: + DRM_INFO("Ved clk : core clk = 2:1. \n"); + break; + default: + DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n", + fuse_value_tmp); + } + + fuse_value_tmp = (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT; + + switch (fuse_value_tmp) { + case 0: + DRM_INFO("Vec clk : core clk = 1:1. \n"); + break; + case 1: + DRM_INFO("Vec clk : core clk = 4:3. \n"); + break; + case 2: + DRM_INFO("Vec clk : core clk = 8:5. \n"); + break; + case 3: + DRM_INFO("Vec clk : core clk = 2:1. \n"); + break; + default: + DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n", + fuse_value_tmp); + } +#endif /* FIXME remove it after PO */ + + return; +} + +bool mrst_get_vbt_data(struct drm_psb_private *dev_priv) +{ + struct mrst_vbt *pVBT = &dev_priv->vbt_data; + u32 platform_config_address; + u8 *pVBT_virtual; + u8 bpi; + void *pGCT; + struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0,PCI_DEVFN(2,0)); + + /*get the address of the platform config vbt, B0:D2:F0;0xFC */ + pci_read_config_dword(pci_gfx_root,0xFC,&platform_config_address); + DRM_INFO("drm platform config address is %x\n",platform_config_address); + + /* check for platform config address == 0. */ + /* this means fw doesn't support vbt */ + + if(platform_config_address == 0) { + pVBT->Size = 0; + return false; + } + + /* get the virtual address of the vbt */ + pVBT_virtual = ioremap(platform_config_address, sizeof(*pVBT)); + + memcpy(pVBT, pVBT_virtual, sizeof(*pVBT)); + iounmap(pVBT_virtual); /* Free virtual address space */ + + printk(KERN_ALERT "GCT Revision is %x\n",pVBT->Revision); + pVBT->mrst_gct = NULL; + pVBT->mrst_gct = ioremap(platform_config_address + sizeof(*pVBT) - 4, + pVBT->Size - sizeof(*pVBT) + 4); + pGCT = pVBT->mrst_gct; + + switch (pVBT->Revision) { + case 0: + bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex; + dev_priv->gct_data.bpi = bpi; + dev_priv->gct_data.pt = + ((struct mrst_gct_v1 *)pGCT)->PD.PanelType; + memcpy(&dev_priv->gct_data.DTD, + &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD, + sizeof(struct mrst_timing_info)); + dev_priv->gct_data.Panel_Port_Control = + ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control; + break; + case 1: + bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex; + dev_priv->gct_data.bpi = bpi; + dev_priv->gct_data.pt = + ((struct mrst_gct_v2 *)pGCT)->PD.PanelType; + memcpy(&dev_priv->gct_data.DTD, + &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD, + sizeof(struct mrst_timing_info)); + dev_priv->gct_data.Panel_Port_Control = + ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control; + break; + default: + printk(KERN_ALERT "Unknown revision of GCT!\n"); + pVBT->Size = 0; + return false; + } + + return true; +} + +int mrst_get_ospm_io(struct drm_psb_private *dev_priv) +{ + struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + uint32_t ospm_base = 0; + + pci_write_config_dword(pci_root, 0xD0, 0xd0047800); + pci_read_config_dword(pci_root, 0xD4, &ospm_base); + + dev_priv->ospm_base = ospm_base & 0x0ffff; + + DRM_INFO("ospm base is %x\n", dev_priv->ospm_base); + + return 0; +} + +static int psb_do_init(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->bdev; + struct psb_gtt *pg = dev_priv->pg; + + uint32_t stolen_gtt; + uint32_t tt_start; + uint32_t tt_pages; + + int ret = -ENOMEM; + + dev_priv->ta_mem_pages = + PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, + PAGE_SIZE) >> PAGE_SHIFT; + dev_priv->comm_page = alloc_page(GFP_KERNEL); + if (!dev_priv->comm_page) + goto out_err; + + dev_priv->comm = kmap(dev_priv->comm_page); + memset((void *) dev_priv->comm, 0, PAGE_SIZE); + + set_pages_uc(dev_priv->comm_page, 1); + + /* + * Initialize sequence numbers for the different command + * submission mechanisms. + */ + + dev_priv->sequence[PSB_ENGINE_2D] = 0; + dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0; + dev_priv->sequence[PSB_ENGINE_TA] = 0; + dev_priv->sequence[PSB_ENGINE_HPRAST] = 0; + + if (pg->gatt_start & 0x0FFFFFFF) { + DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n"); + ret = -EINVAL; + goto out_err; + } + + stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4; + stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT; + stolen_gtt = + (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages; + + dev_priv->gatt_free_offset = pg->gatt_start + + (stolen_gtt << PAGE_SHIFT) * 1024; + + /* + * Insert a cache-coherent communications page in mmu space + * just after the stolen area. Will be used for fencing etc. + */ + + dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset; + dev_priv->gatt_free_offset += PAGE_SIZE; + + ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), + &dev_priv->comm_page, + dev_priv->comm_mmu_offset, 1, 0, 0, 0); + + if (ret) + goto out_err; + + if (1 || drm_debug) { + uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID); + uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION); + DRM_INFO("SGX core id = 0x%08x\n", core_id); + DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n", + (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >> + _PSB_CC_REVISION_MAJOR_SHIFT, + (core_rev & _PSB_CC_REVISION_MINOR_MASK) >> + _PSB_CC_REVISION_MINOR_SHIFT); + DRM_INFO + ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n", + (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >> + _PSB_CC_REVISION_MAINTENANCE_SHIFT, + (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >> + _PSB_CC_REVISION_DESIGNER_SHIFT); + } + + spin_lock_init(&dev_priv->irqmask_lock); + dev_priv->fence0_irq_on = 0; + + tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ? + pg->gatt_pages : PSB_TT_PRIV0_PLIMIT; + tt_start = dev_priv->gatt_free_offset - pg->gatt_start; + tt_pages -= tt_start >> PAGE_SHIFT; + + dev_priv->sizes.ta_mem_size = drm_psb_ta_mem_size / 1024; + + if (!ttm_bo_init_mm(bdev, TTM_PL_VRAM, 0, + pg->vram_stolen_size >> PAGE_SHIFT)) { + dev_priv->have_vram = 1; + dev_priv->sizes.vram_size = + pg->vram_stolen_size / (1024 * 1024); + } + + if (IS_MRST(dev) && + (dev_priv->ci_region_size != 0) && + !ttm_bo_init_mm(bdev, TTM_PL_CI, 0, + dev_priv->ci_region_size >> PAGE_SHIFT)) { + dev_priv->have_camera = 1; + } + + /* since there is always rar region for video, it is ok */ + if (IS_MRST(dev) && + (dev_priv->rar_region_size != 0) && + !ttm_bo_init_mm(bdev, TTM_PL_RAR, 0, + dev_priv->rar_region_size >> PAGE_SHIFT)) { + dev_priv->have_rar = 1; + } + + if (!ttm_bo_init_mm(bdev, TTM_PL_TT, tt_start >> PAGE_SHIFT, + tt_pages)) { + dev_priv->have_tt = 1; + dev_priv->sizes.tt_size = + (tt_pages << PAGE_SHIFT) / (1024 * 1024); + } + + if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU, 0x00000000, + (pg->gatt_start - PSB_MEM_MMU_START - + pg->ci_stolen_size) >> PAGE_SHIFT)) { + dev_priv->have_mem_mmu = 1; + dev_priv->sizes.mmu_size = + (pg->gatt_start - PSB_MEM_MMU_START - pg->ci_stolen_size) / + (1024*1024); + } + + if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_RASTGEOM, 0x00000000, + (PSB_MEM_MMU_START - + PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) { + dev_priv->have_mem_rastgeom = 1; + dev_priv->sizes.rastgeom_size = + (PSB_MEM_MMU_START - PSB_MEM_RASTGEOM_START) / + (1024 * 1024); + } +#if 0 + if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) { + if (!ttm_bo_init_mm + (bdev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT, + pg->gatt_pages - PSB_TT_PRIV0_PLIMIT, 1)) { + dev_priv->have_mem_aper = 1; + } + } +#endif + + PSB_DEBUG_INIT("Init MSVDX\n"); + psb_msvdx_init(dev); + + if (IS_MRST(dev)) { + PSB_DEBUG_INIT("Init Topaz\n"); + lnc_topaz_init(dev); + } + + return 0; +out_err: + psb_do_takedown(dev); + return ret; +} + +static int psb_intel_opregion_init(struct drm_device *dev) +{ + struct drm_psb_private * dev_priv = dev->dev_private; + /*struct psb_intel_opregion * opregion = &dev_priv->opregion;*/ + u32 opregion_phy; + void * base; + u32 * lid_state; + + dev_priv->lid_state = NULL; + + pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy); + if(opregion_phy == 0) { + DRM_DEBUG("Opregion not supported, won't support lid-switch\n"); + return -ENOTSUPP; + } + DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy); + + base = ioremap(opregion_phy, 8*1024); + if(!base) { + return -ENOMEM; + } + + lid_state = base + 0x01ac; + + DRM_DEBUG("Lid switch state 0x%08x\n", *lid_state); + + dev_priv->lid_state = lid_state; + dev_priv->lid_last_state = *lid_state; + return 0; +} + +#if 0 +/** + * Get a section from BDB by section id, port from i915 driver + */ +static void * psb_intel_vbt_find_section(struct bdb_header * bdb, int section_id) +{ + u8 * base = (u8 *)bdb; + int index = 0; + u16 total, current_size; + u8 current_id; + + index += bdb->header_size; + total = bdb->bdb_size; + + while(index < total) { + current_id = *(base + index); + index++; + current_size = *((u16 *)(base + index)); + index += 2; + if(current_id == section_id) + return base + index; + index += current_size; + } + + return NULL; +} + +static void psb_intel_vbt_parse_backlight_data(struct drm_psb_private * dev_priv, struct bdb_header * bdb) +{ + struct bdb_lvds_backlight * lvds_bl = NULL; + u8 p_type = 0; + void * bl_start = NULL; + struct bdb_lvds_options * lvds_opts + = psb_intel_vbt_find_section(bdb, BDB_LVDS_OPTIONS); + + if(lvds_opts) { + DRM_DEBUG("lvds_options found at %p\n", lvds_opts); + p_type = lvds_opts->panel_type; + } else { + DRM_DEBUG("no lvds_options\n"); + } + + bl_start = psb_intel_vbt_find_section(bdb, BDB_LVDS_BACKLIGHT); + lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type; + + dev_priv->lvds_bl = lvds_bl; +} + +/** + * Retrieve VBT and blc data. port from i915 driver + */ +static int psb_get_vbt_data(struct drm_device * dev) +{ + struct drm_psb_private * dev_priv = dev->dev_private; + struct pci_dev * pdev = dev->pdev; + struct vbt_header * vbt = NULL; + struct bdb_header * bdb; + u8 __iomem * bios; + + size_t size; + int i; + + /*FIXME: unmap it when driver exit*/ + bios = pci_map_rom(pdev, &size); + if(!bios) + return -1; + + for(i=0; i + 4 < size; i++) { + if(!memcmp(bios + i, "$VBT", 4)) { + vbt = (struct vbt_header *)(bios + i); + break; + } + } + + if(!vbt) { + DRM_ERROR("VBT sigature missing\n"); + pci_unmap_rom(pdev, bios); + return -1; + } + + bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); + + psb_intel_vbt_parse_backlight_data(dev_priv, bdb); + + DRM_INFO("BIOS Data Block found at %p\n", bdb); + + return 0; +} +#endif + +static int psb_driver_unload(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + psb_backlight_exit(); /*writes minimum value to backlight HW reg */ + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_irq_uninstall(dev); + } + + if (drm_psb_no_fb == 0) + psb_modeset_cleanup(dev); + + if (dev_priv) { + struct ttm_bo_device *bdev = &dev_priv->bdev; + + if(IS_POULSBO(dev)) + psb_lid_timer_takedown(dev_priv); + + psb_watchdog_takedown(dev_priv); + psb_do_takedown(dev); + psb_xhw_takedown(dev_priv); + psb_scheduler_takedown(&dev_priv->scheduler); + + if (dev_priv->have_mem_pds) { + ttm_bo_clean_mm(bdev, DRM_PSB_MEM_PDS); + dev_priv->have_mem_pds = 0; + } + if (dev_priv->have_mem_kernel) { + ttm_bo_clean_mm(bdev, DRM_PSB_MEM_KERNEL); + dev_priv->have_mem_kernel = 0; + } + + if (dev_priv->pf_pd) { + psb_mmu_free_pagedir(dev_priv->pf_pd); + dev_priv->pf_pd = NULL; + } + if (dev_priv->mmu) { + struct psb_gtt *pg = dev_priv->pg; + + down_read(&pg->sem); + psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd + (dev_priv->mmu), + pg->gatt_start, + pg->vram_stolen_size >> + PAGE_SHIFT); + if (pg->ci_stolen_size != 0) + psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd + (dev_priv->mmu), + pg->gatt_start - pg->ci_stolen_size, + pg->ci_stolen_size >> + PAGE_SHIFT); + if (pg->rar_stolen_size != 0) + psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd + (dev_priv->mmu), + pg->gatt_start + pg->vram_stolen_size, + pg->rar_stolen_size >> + PAGE_SHIFT); + up_read(&pg->sem); + psb_mmu_driver_takedown(dev_priv->mmu); + dev_priv->mmu = NULL; + } + psb_gtt_takedown(dev_priv->pg, 1); + if (dev_priv->scratch_page) { + __free_page(dev_priv->scratch_page); + dev_priv->scratch_page = NULL; + } + if (dev_priv->has_bo_device) { + ttm_bo_device_release(&dev_priv->bdev); + dev_priv->has_bo_device = 0; + } + if (dev_priv->has_fence_device) { + ttm_fence_device_release(&dev_priv->fdev); + dev_priv->has_fence_device = 0; + } + if (dev_priv->vdc_reg) { + iounmap(dev_priv->vdc_reg); + dev_priv->vdc_reg = NULL; + } + if (dev_priv->sgx_reg) { + iounmap(dev_priv->sgx_reg); + dev_priv->sgx_reg = NULL; + } + if (dev_priv->msvdx_reg) { + iounmap(dev_priv->msvdx_reg); + dev_priv->msvdx_reg = NULL; + } + + if (IS_MRST(dev)) { + if (dev_priv->topaz_reg) { + iounmap(dev_priv->topaz_reg); + dev_priv->topaz_reg = NULL; + } + } + + if (dev_priv->tdev) + ttm_object_device_release(&dev_priv->tdev); + + if (dev_priv->has_global) + psb_ttm_global_release(dev_priv); + + kfree(dev_priv); + dev->dev_private = NULL; + + /*destory VBT data*/ + if(IS_POULSBO(dev)) + psb_intel_destory_bios(dev); + } + + powermgmt_shutdown(); + + return 0; +} + + +static int psb_driver_load(struct drm_device *dev, unsigned long chipset) +{ + struct drm_psb_private *dev_priv; + struct ttm_bo_device *bdev; + unsigned long resource_start; + struct psb_gtt *pg; + unsigned long irqflags; + int ret = -ENOMEM; + + DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION); + + if (IS_MRST(dev)) + DRM_INFO("Run drivers on Moorestown platform!\n"); + else + DRM_INFO("Run drivers on Poulsbo platform!\n"); + + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (dev_priv == NULL) + return -ENOMEM; + + dev_priv->dev = dev; + bdev = &dev_priv->bdev; + + psb_init_ospm(dev_priv); + + ret = psb_ttm_global_init(dev_priv); + if (unlikely(ret != 0)) + goto out_err; + dev_priv->has_global = 1; + + dev_priv->tdev = ttm_object_device_init + (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER); + if (unlikely(dev_priv->tdev == NULL)) + goto out_err; + + powermgmt_init(); + + mutex_init(&dev_priv->temp_mem); + mutex_init(&dev_priv->cmdbuf_mutex); + mutex_init(&dev_priv->reset_mutex); + INIT_LIST_HEAD(&dev_priv->context.validate_list); + INIT_LIST_HEAD(&dev_priv->context.kern_validate_list); + psb_init_disallowed(); + +#ifdef FIX_TG_16 + atomic_set(&dev_priv->lock_2d, 0); + atomic_set(&dev_priv->ta_wait_2d, 0); + atomic_set(&dev_priv->ta_wait_2d_irq, 0); + atomic_set(&dev_priv->waiters_2d, 0);; + DRM_INIT_WAITQUEUE(&dev_priv->queue_2d); +#else + mutex_init(&dev_priv->mutex_2d); +#endif + + spin_lock_init(&dev_priv->reloc_lock); + + DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue); + DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue); + + dev->dev_private = (void *) dev_priv; + dev_priv->chipset = chipset; + psb_set_uopt(&dev_priv->uopt); + + PSB_DEBUG_GENERAL("Init watchdog and scheduler\n"); + psb_watchdog_init(dev_priv); + psb_scheduler_init(dev, &dev_priv->scheduler); + + + PSB_DEBUG_INIT("Mapping MMIO\n"); + resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE); + + if (IS_MRST(dev)) + dev_priv->msvdx_reg = + ioremap(resource_start + MRST_MSVDX_OFFSET, + PSB_MSVDX_SIZE); + else + dev_priv->msvdx_reg = + ioremap(resource_start + PSB_MSVDX_OFFSET, + PSB_MSVDX_SIZE); + + if (!dev_priv->msvdx_reg) + goto out_err; + + if (IS_MRST(dev)) { + dev_priv->topaz_reg = + ioremap(resource_start + LNC_TOPAZ_OFFSET, + LNC_TOPAZ_SIZE); + if (!dev_priv->topaz_reg) + goto out_err; + } + + dev_priv->vdc_reg = + ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE); + if (!dev_priv->vdc_reg) + goto out_err; + + if (IS_MRST(dev)) + dev_priv->sgx_reg = + ioremap(resource_start + MRST_SGX_OFFSET, + PSB_SGX_SIZE); + else + dev_priv->sgx_reg = + ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE); + + if (!dev_priv->sgx_reg) + goto out_err; + + if (IS_MRST(dev)){ + mrst_get_fuse_settings(dev_priv); + mrst_get_vbt_data(dev_priv); + } else { + psb_intel_opregion_init(dev); + psb_intel_init_bios(dev); + } + + PSB_DEBUG_INIT("Init TTM fence and BO driver\n"); + + if (IS_MRST(dev)) + mrst_get_ospm_io(dev_priv); + + if (IS_MRST(dev)) { + get_ci_info(dev_priv); + get_rar_info(dev_priv); + } + + psb_clockgating(dev_priv); + + ret = psb_ttm_fence_device_init(&dev_priv->fdev); + if (unlikely(ret != 0)) + goto out_err; + + dev_priv->has_fence_device = 1; + ret = ttm_bo_device_init(bdev, + dev_priv->mem_global_ref.object, + &psb_ttm_bo_driver, + DRM_PSB_FILE_PAGE_OFFSET); + if (unlikely(ret != 0)) + goto out_err; + dev_priv->has_bo_device = 1; + ttm_lock_init(&dev_priv->ttm_lock); + + ret = -ENOMEM; + + dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO); + if (!dev_priv->scratch_page) + goto out_err; + + set_pages_uc(dev_priv->scratch_page, 1); + + dev_priv->pg = psb_gtt_alloc(dev); + if (!dev_priv->pg) + goto out_err; + + ret = psb_gtt_init(dev_priv->pg, 0); + if (ret) + goto out_err; + + dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg, + drm_psb_trap_pagefaults, 0, + dev_priv); + if (!dev_priv->mmu) + goto out_err; + + pg = dev_priv->pg; + + /* + * Make sgx MMU aware of the stolen memory area we call VRAM. + */ + + down_read(&pg->sem); + ret = + psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd + (dev_priv->mmu), + pg->stolen_base >> PAGE_SHIFT, + pg->gatt_start, + pg->vram_stolen_size >> PAGE_SHIFT, 0); + up_read(&pg->sem); + if (ret) + goto out_err; + + /* + * Make sgx MMU aware of the CI stolen memory area. + */ + if (dev_priv->pg->ci_stolen_size != 0) { + down_read(&pg->sem); + ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd + (dev_priv->mmu), + dev_priv->ci_region_start >> PAGE_SHIFT, + pg->gatt_start - pg->ci_stolen_size, + pg->ci_stolen_size >> PAGE_SHIFT, 0); + up_read(&pg->sem); + if (ret) + goto out_err; + } + + /* + * Make sgx MMU aware of the rar stolen memory area. + */ + if (dev_priv->pg->rar_stolen_size != 0) { + down_read(&pg->sem); + ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu), + dev_priv->rar_region_start >> PAGE_SHIFT, + pg->gatt_start + pg->vram_stolen_size, + pg->rar_stolen_size >> PAGE_SHIFT, 0); + up_read(&pg->sem); + if (ret) + goto out_err; + } + + dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); + if (!dev_priv->pf_pd) + goto out_err; + + /* + * Make all presumably unused requestors page-fault by making them + * use context 1 which does not have any valid mappings. + */ + + PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); + PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); + PSB_RSGX32(PSB_CR_BIF_BANK1); + + psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); + psb_mmu_set_pd_context(dev_priv->pf_pd, 1); + psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK); + + psb_init_2d(dev_priv); + + ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_KERNEL, 0x00000000, + (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START) + >> PAGE_SHIFT); + if (ret) + goto out_err; + dev_priv->have_mem_kernel = 1; + + ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_PDS, 0x00000000, + (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START) + >> PAGE_SHIFT); + if (ret) + goto out_err; + dev_priv->have_mem_pds = 1; + dev_priv->sizes.pds_size = + (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START) / (1024 * 1024); + PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n"); + + ret = psb_do_init(dev); + if (ret) + return ret; + + ret = psb_xhw_init(dev); + if (ret) + return ret; + + PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE); + PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE); + + /** + * Init lid switch timer. + * NOTE: must do this after psb_intel_opregion_init + * and psb_backlight_init + */ + if(IS_POULSBO(dev) && dev_priv->lid_state) { + psb_lid_timer_init(dev_priv); + } + + /*initialize the MSI for MRST*/ + if (IS_MRST(dev)) { + if (pci_enable_msi(dev->pdev)) { + DRM_ERROR("Enable MSI for MRST failed!\n"); + } else { + PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n", + dev->pdev->irq); + /* pci_write_config_word(pdev, 0x04, 0x07); */ + } + } + + //Init vblank module in DRM. Must be done before call to drm_irq_install() + ret = drm_vblank_init(dev, PSB_NUM_PIPE); + if (ret) + goto out_err; + + /* + * Install interrupt handlers prior to powering off SGX or else we will + * crash. + */ + dev_priv->vdc_irq_mask = 0; + dev_priv->sgx_irq_mask = 0; + dev_priv->sgx2_irq_mask = 0; + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R); + PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_irq_install(dev); +#if 0 + /*set SGX in low power mode*/ + if (drm_psb_ospm && IS_MRST(dev)) + if (psb_try_power_down_sgx(dev)) + PSB_DEBUG_PM("initialize SGX to low power failed\n"); + if (IS_MRST(dev)) + if (psb_try_power_down_msvdx(dev)) + PSB_DEBUG_PM("Initialize MSVDX to low power failed\n"); + if (IS_MRST(dev)) { + if (psb_try_power_down_topaz(dev)) + PSB_DEBUG_PM("Initialize TOPAZ to low power failed\n"); + } +#endif + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ + + dev->driver->get_vblank_counter = psb_get_vblank_counter; + + ret = drm_vblank_init(dev, PSB_NUM_PIPE); + + if (drm_psb_no_fb == 0) { + psb_modeset_init(dev); + drm_helper_initial_config(dev); + } + + /*must be after mrst_get_fuse_settings()*/ + ret = psb_backlight_init(dev); + if (ret) + return ret; + + /*dri_page_flipping is set when flipping is enabled*/ + dev_priv->dri_page_flipping = 0; + + return 0; +out_err: + psb_driver_unload(dev); + return ret; +} + +int psb_driver_device_is_agp(struct drm_device *dev) +{ + return 0; +} + +int psb_extension_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + union drm_psb_extension_arg *arg = data; + struct drm_psb_extension_rep *rep = &arg->rep; + + /*tricky fix for sgx HW access from user space when XPSB is load*/ + static int firsttime = 1; + if (firsttime) { + firsttime = 0; + powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true); + } + if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) { + rep->exists = 1; + rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET; + rep->sarea_offset = 0; + rep->major = 1; + rep->minor = 0; + rep->pl = 0; + return 0; + } + if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) { + rep->exists = 1; + rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET; + rep->sarea_offset = 0; + rep->major = 1; + rep->minor = 0; + rep->pl = 0; + return 0; + } + if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) { + rep->exists = 1; + rep->driver_ioctl_offset = DRM_PSB_CMDBUF; + rep->sarea_offset = 0; + rep->major = 1; + rep->minor = 0; + rep->pl = 0; + return 0; + } + + /*return the page flipping ioctl offset*/ + if (strcmp(arg->extension, "psb_page_flipping_alphadrop") == 0) { + rep->exists = 1; + rep->driver_ioctl_offset = DRM_PSB_FLIP; + rep->sarea_offset = 0; + rep->major = 1; + rep->minor = 0; + rep->pl = 0; + return 0; + } + + /* return the video rar offset */ + if (strcmp(arg->extension, "lnc_video_getparam") == 0) { + rep->exists = 1; + rep->driver_ioctl_offset = DRM_LNC_VIDEO_GETPARAM; + rep->sarea_offset = 0; + rep->major = 1; + rep->minor = 0; + rep->pl = 0; + return 0; + } + + rep->exists = 0; + return 0; +} + +/*keep following code*/ +#if 0 +static void psb_display_states_restore(struct drm_device * dev) +{ + struct drm_crtc * crtc = NULL; + struct drm_connector * connector = NULL; + struct drm_crtc_helper_funcs * crtc_helper_funcs = NULL; + struct drm_encoder * encoder = NULL; + struct drm_encoder_helper_funcs * encoder_helper_funcs = NULL; + struct drm_psb_private * dev_priv = + (struct drm_psb_private *)dev->dev_private; + + mutex_lock(&dev->mode_config.mutex); +#if 0 + /*Output dpms off*/ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + encoder_helper_funcs = + (struct drm_encoder_helper_funcs *)encoder->helper_private; + if(encoder_helper_funcs && encoder_helper_funcs->dpms) + encoder_helper_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); + } + + psb_intel_wait_for_vblank(dev); + + /*CRTC dpms off*/ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + crtc_helper_funcs = + (struct drm_crtc_helper_funcs *)crtc->helper_private; + //if(crtc_helper_funcs && crtc_helper_funcs->dpms) + if(drm_helper_crtc_in_use(crtc)) + crtc_helper_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + } + + /*Restore CRTC states*/ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + //if(crtc->funcs && crtc->funcs->restore) + if(drm_helper_crtc_in_use(crtc)) + crtc->funcs->restore(crtc); + } +#endif + + /*Restore outputs states*/ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if(connector->funcs && connector->funcs->restore) + connector->funcs->restore(connector); + } + + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if(drm_helper_crtc_in_use(crtc)) + crtc->funcs->restore(crtc); + } + + mutex_unlock(&dev->mode_config.mutex); + + if(IS_MRST(dev)) + return; + + REG_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); + REG_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0); + REG_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1); + REG_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV); + + /*TODO: SWF registers restore*/ +} + +static void psb_display_states_save(struct drm_device * dev) +{ + struct drm_crtc * crtc = NULL; + struct drm_connector * connector = NULL; + struct drm_psb_private * dev_priv = + (struct drm_psb_private *)dev->dev_private; + + mutex_lock(&dev->mode_config.mutex); + /*Save output states*/ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if(connector->funcs && connector->funcs->save) + connector->funcs->save(connector); + } + +#if 1 + /*Restore CRTC states*/ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + //if(crtc->funcs && crtc->funcs->save) + if(drm_helper_crtc_in_use(crtc)) + crtc->funcs->save(crtc); + } +#endif + + mutex_unlock(&dev->mode_config.mutex); + + if(IS_MRST(dev)) + return; + + dev_priv->saveVCLK_DIVISOR_VGA0 = REG_READ(VCLK_DIVISOR_VGA0); + dev_priv->saveVCLK_DIVISOR_VGA1 = REG_READ(VCLK_DIVISOR_VGA1); + dev_priv->saveVCLK_POST_DIV = REG_READ(VCLK_POST_DIV); + dev_priv->saveVGACNTRL = REG_READ(VGACNTRL); + + /*TODO: save SWF here if necessary*/ +} +#endif + +static int psb_vt_leave_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + struct ttm_bo_device *bdev = &dev_priv->bdev; + struct ttm_mem_type_manager *man; + int clean; + int ret; + + ret = ttm_write_lock(&dev_priv->ttm_lock, 1, + psb_fpriv(file_priv)->tfile); + if (unlikely(ret != 0)) + return ret; + + powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true); + + /* + * Clean VRAM and TT for fbdev. + */ + + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); + if (unlikely(ret != 0)) + goto out_unlock; + + man = &bdev->man[TTM_PL_VRAM]; + spin_lock(&bdev->lru_lock); + clean = drm_mm_clean(&man->manager); + spin_unlock(&bdev->lru_lock); + if (unlikely(!clean)) + DRM_INFO("Notice: VRAM was not clean after VT switch, if you are running fbdev please ignore.\n"); + + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT); + if (unlikely(ret != 0)) + goto out_unlock; + + man = &bdev->man[TTM_PL_TT]; + spin_lock(&bdev->lru_lock); + clean = drm_mm_clean(&man->manager); + spin_unlock(&bdev->lru_lock); + if (unlikely(!clean)) + DRM_INFO("Warning: GATT was not clean after VT switch.\n"); + + ttm_bo_swapout_all(&dev_priv->bdev); + + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return 0; +out_unlock: + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + (void) ttm_write_unlock(&dev_priv->ttm_lock, + psb_fpriv(file_priv)->tfile); + return ret; +} + +static int psb_vt_enter_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + return ttm_write_unlock(&dev_priv->ttm_lock, + psb_fpriv(file_priv)->tfile); +} + +static int psb_sizes_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + struct drm_psb_sizes_arg *arg = + (struct drm_psb_sizes_arg *) data; + + *arg = dev_priv->sizes; + return 0; +} + +static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + uint32_t *arg = data; + + *arg = dev_priv->fuse_reg_value; + return 0; +} +static int psb_vbt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + struct gct_ioctl_arg *pGCT = data; + + memcpy(pGCT, &dev_priv->gct_data, sizeof(*pGCT)); + + return 0; +} + +static int psb_dc_state_ioctl(struct drm_device *dev, void * data, + struct drm_file *file_priv) +{ + uint32_t flags; + uint32_t obj_id; + struct drm_mode_object * obj; + struct drm_connector * connector; + struct drm_crtc * crtc; + struct drm_psb_dc_state_arg * arg = + (struct drm_psb_dc_state_arg *)data; + + if(IS_MRST(dev)) + return 0; + + flags = arg->flags; + obj_id = arg->obj_id; + + if(flags & PSB_DC_CRTC_MASK) { + obj = drm_mode_object_find(dev, obj_id, + DRM_MODE_OBJECT_CRTC); + if(! obj) { + DRM_DEBUG("Invalid CRTC object.\n"); + return -EINVAL; + } + + crtc = obj_to_crtc(obj); + + mutex_lock(&dev->mode_config.mutex); + if(drm_helper_crtc_in_use(crtc)) { + if(flags & PSB_DC_CRTC_SAVE) + crtc->funcs->save(crtc); + else + crtc->funcs->restore(crtc); + } + mutex_unlock(&dev->mode_config.mutex); + + return 0; + } else if (flags & PSB_DC_OUTPUT_MASK) { + obj = drm_mode_object_find(dev, obj_id, + DRM_MODE_OBJECT_CONNECTOR); + if(! obj) { + DRM_DEBUG("Invalid connector id.\n"); + return -EINVAL; + } + + connector = obj_to_connector(obj); + if(flags & PSB_DC_OUTPUT_SAVE) + connector->funcs->save(connector); + else + connector->funcs->restore(connector); + + return 0; + } + + DRM_DEBUG("Bad flags 0x%x\n", flags); + return -EINVAL; +} + +static int psb_adb_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + uint32_t *arg = data; + struct backlight_device bd; + dev_priv->blc_adj1 = *arg; +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + bd.props.brightness = psb_get_brightness(&bd); + psb_set_brightness(&bd); +#endif + return 0; +} + +static int psb_mode_operation_ioctl(struct drm_device *dev, void *data, + struct drm_file * file_priv) +{ + uint32_t obj_id; + uint16_t op; + struct drm_mode_modeinfo * umode; + struct drm_display_mode * mode; + struct drm_psb_mode_operation_arg * arg; + struct drm_mode_object * obj; + struct drm_connector * connector; + struct drm_connector_helper_funcs * connector_funcs; + int ret = 0; + int resp = MODE_OK; + + if (IS_MRST(dev)) + return 0; + + arg = (struct drm_psb_mode_operation_arg *)data; + obj_id = arg->obj_id; + op = arg->operation; + umode = &arg->mode; + + mutex_lock(&dev->mode_config.mutex); + + obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR); + if(!obj) { + ret = - EINVAL; + goto mode_op_out; + } + + connector = obj_to_connector(obj); + + mode = drm_mode_create(dev); + if(!mode) { + ret = -ENOMEM; + goto mode_op_out; + } + + //drm_crtc_convert_umode(mode, umode); + { + mode->clock = umode->clock; + mode->hdisplay = umode->hdisplay; + mode->hsync_start = umode->hsync_start; + mode->hsync_end = umode->hsync_end; + mode->htotal = umode->htotal; + mode->hskew = umode->hskew; + mode->vdisplay = umode->vdisplay; + mode->vsync_start = umode->vsync_start; + mode->vsync_end = umode->vsync_end; + mode->vtotal = umode->vtotal; + mode->vscan = umode->vscan; + mode->vrefresh = umode->vrefresh; + mode->flags = umode->flags; + mode->type = umode->type; + strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN); + mode->name[DRM_DISPLAY_MODE_LEN-1] = 0; + } + + connector_funcs = (struct drm_connector_helper_funcs *) + connector->helper_private; + + switch(op) { + case PSB_MODE_OPERATION_MODE_VALID: + if(connector_funcs->mode_valid) { + resp = connector_funcs->mode_valid(connector, mode); + arg->data = (void *)resp; + } + break; + default: + DRM_DEBUG("Unsupported psb mode operation"); + ret = -EOPNOTSUPP; + goto mode_op_err; + } + +mode_op_err: + drm_mode_destroy(dev, mode); +mode_op_out: + mutex_unlock(&dev->mode_config.mutex); + return ret; +} + +static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + struct drm_psb_stolen_memory_arg *arg = data; + + arg->base = dev_priv->pg->stolen_base; + arg->size = dev_priv->pg->vram_stolen_size; + + return 0; +} + +static int psb_register_rw_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + struct drm_psb_register_rw_arg *arg = data; + + if (arg->display_write_mask != 0) { + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) { + if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS) + PSB_WVDC32(arg->display.pfit_controls, PFIT_CONTROL); + if (arg->display_write_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS) + PSB_WVDC32(arg->display.pfit_autoscale_ratios, PFIT_AUTO_RATIOS); + if (arg->display_write_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS) + PSB_WVDC32(arg->display.pfit_programmed_scale_ratios, PFIT_PGM_RATIOS); + if (arg->display_write_mask & REGRWBITS_PIPEASRC) + PSB_WVDC32(arg->display.pipeasrc, PIPEASRC); + if (arg->display_write_mask & REGRWBITS_PIPEBSRC) + PSB_WVDC32(arg->display.pipebsrc, PIPEBSRC); + if (arg->display_write_mask & REGRWBITS_VTOTAL_A) + PSB_WVDC32(arg->display.vtotal_a, VTOTAL_A); + if (arg->display_write_mask & REGRWBITS_VTOTAL_B) + PSB_WVDC32(arg->display.vtotal_b, VTOTAL_B); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS) + dev_priv->savePFIT_CONTROL = arg->display.pfit_controls; + if (arg->display_write_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS) + dev_priv->savePFIT_AUTO_RATIOS = arg->display.pfit_autoscale_ratios; + if (arg->display_write_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS) + dev_priv->savePFIT_PGM_RATIOS = arg->display.pfit_programmed_scale_ratios; + if (arg->display_write_mask & REGRWBITS_PIPEASRC) + dev_priv->savePIPEASRC = arg->display.pipeasrc; + if (arg->display_write_mask & REGRWBITS_PIPEBSRC) + dev_priv->savePIPEBSRC = arg->display.pipebsrc; + if (arg->display_write_mask & REGRWBITS_VTOTAL_A) + dev_priv->saveVTOTAL_A = arg->display.vtotal_a; + if (arg->display_write_mask & REGRWBITS_VTOTAL_B) + dev_priv->saveVTOTAL_B = arg->display.vtotal_b; + } + } + + if (arg->display_read_mask != 0) { + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) { + if (arg->display_read_mask & REGRWBITS_PFIT_CONTROLS) + arg->display.pfit_controls = PSB_RVDC32(PFIT_CONTROL); + if (arg->display_read_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS) + arg->display.pfit_autoscale_ratios = PSB_RVDC32(PFIT_AUTO_RATIOS); + if (arg->display_read_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS) + arg->display.pfit_programmed_scale_ratios = PSB_RVDC32(PFIT_PGM_RATIOS); + if (arg->display_read_mask & REGRWBITS_PIPEASRC) + arg->display.pipeasrc = PSB_RVDC32(PIPEASRC); + if (arg->display_read_mask & REGRWBITS_PIPEBSRC) + arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC); + if (arg->display_read_mask & REGRWBITS_VTOTAL_A) + arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A); + if (arg->display_read_mask & REGRWBITS_VTOTAL_B) + arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + if (arg->display_read_mask & REGRWBITS_PFIT_CONTROLS) + arg->display.pfit_controls = dev_priv->savePFIT_CONTROL; + if (arg->display_read_mask & REGRWBITS_PFIT_AUTOSCALE_RATIOS) + arg->display.pfit_autoscale_ratios = dev_priv->savePFIT_AUTO_RATIOS; + if (arg->display_read_mask & REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS) + arg->display.pfit_programmed_scale_ratios = dev_priv->savePFIT_PGM_RATIOS; + if (arg->display_read_mask & REGRWBITS_PIPEASRC) + arg->display.pipeasrc = dev_priv->savePIPEASRC; + if (arg->display_read_mask & REGRWBITS_PIPEBSRC) + arg->display.pipebsrc = dev_priv->savePIPEBSRC; + if (arg->display_read_mask & REGRWBITS_VTOTAL_A) + arg->display.vtotal_a = dev_priv->saveVTOTAL_A; + if (arg->display_read_mask & REGRWBITS_VTOTAL_B) + arg->display.vtotal_b = dev_priv->saveVTOTAL_B; + } + } + + if (arg->overlay_write_mask != 0) { + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) { + if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) { + PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5); + PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4); + PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3); + PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2); + PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1); + PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0); + } + if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) { + PSB_WVDC32(arg->overlay.OVADD, OV_OVADD); + } + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) { + dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5; + dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4; + dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3; + dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2; + dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1; + dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0; + } + if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) + dev_priv->saveOV_OVADD = arg->overlay.OVADD; + } + } + + if (arg->overlay_read_mask != 0) { + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, arg->b_force_hw_on)) { + if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) { + arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5); + arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4); + arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3); + arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2); + arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1); + arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0); + } + if (arg->overlay_read_mask & OV_REGRWBITS_OVADD) + arg->overlay.OVADD = PSB_RVDC32(OV_OVADD); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) { + arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5; + arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4; + arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3; + arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2; + arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1; + arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0; + } + if (arg->overlay_read_mask & OV_REGRWBITS_OVADD) + arg->overlay.OVADD = dev_priv->saveOV_OVADD; + } + } + + return 0; +} + +/* always available as we are SIGIO'd */ +static unsigned int psb_poll(struct file *filp, + struct poll_table_struct *wait) +{ + return POLLIN | POLLRDNORM; +} + +int psb_driver_open(struct drm_device *dev, struct drm_file *priv) +{ + return 0; +} + +static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->minor->dev; + unsigned int nr = DRM_IOCTL_NR(cmd); + long ret; + + /* + * The driver private ioctls and TTM ioctls should be + * thread-safe. + */ + + if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) + && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { + struct drm_ioctl_desc *ioctl = &psb_ioctls[nr - DRM_COMMAND_BASE]; + + if (unlikely(ioctl->cmd != cmd)) { + DRM_ERROR("Invalid drm command %d\n", + nr - DRM_COMMAND_BASE); + return -EINVAL; + } + + return drm_unlocked_ioctl(filp, cmd, arg); + } + /* + * Not all old drm ioctls are thread-safe. + */ + + lock_kernel(); + ret = drm_unlocked_ioctl(filp, cmd, arg); + unlock_kernel(); + return ret; +} + +static int psb_ospm_read(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + int len = 0; +#ifdef OSPM_STAT + unsigned long d0 = 0; + unsigned long d0i3 = 0; + unsigned long d3 = 0; +#endif + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("D0i3:%s ", drm_psb_ospm ? "enabled" : "disabled"); + +#ifdef OSPM_STAT + switch (dev_priv->graphics_state) { + case PSB_PWR_STATE_D0: + DRM_PROC_PRINT("GFX:%s\n", "D0"); + break; + case PSB_PWR_STATE_D0i3: + DRM_PROC_PRINT("GFX:%s\n", "D0i3"); + break; + case PSB_PWR_STATE_D3: + DRM_PROC_PRINT("GFX:%s\n", "D3"); + break; + default: + DRM_PROC_PRINT("GFX:%s\n", "unknown"); + } + + d0 = dev_priv->gfx_d0_time * 1000 / HZ; + d0i3 = dev_priv->gfx_d0i3_time * 1000 / HZ; + d3 = dev_priv->gfx_d3_time * 1000 / HZ; + switch (dev_priv->graphics_state) { + case PSB_PWR_STATE_D0: + d0 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ; + break; + case PSB_PWR_STATE_D0i3: + d0i3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ; + break; + case PSB_PWR_STATE_D3: + d3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ; + break; + } + DRM_PROC_PRINT("GFX(cnt/ms):\n"); + DRM_PROC_PRINT("D0:%lu/%lu, D0i3:%lu/%lu, D3:%lu/%lu \n", + dev_priv->gfx_d0_cnt, d0, dev_priv->gfx_d0i3_cnt, d0i3, + dev_priv->gfx_d3_cnt, d3); +#endif + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +/* When a client dies: + * - Check for and clean up flipped page state + */ +void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv) +{ + unsigned long irqflags; + int pipe, i; + if (dev->dev_private) { + struct drm_psb_private *dev_priv = dev->dev_private; + if (dev_priv->dri_page_flipping && dev_priv->current_page == 1) { + for (pipe=0; pipe<2; pipe++) { + if (dev_priv->pipe_active[pipe] == 1) { + dev_priv->flip_start[pipe] = dev_priv->saved_start[pipe]; + dev_priv->flip_offset[pipe] = dev_priv->saved_offset[pipe]; + dev_priv->flip_stride[pipe] = dev_priv->saved_stride[pipe]; + psb_flip_set_base(dev_priv, pipe); + } + } + dev_priv->dri_page_flipping = 0; + dev_priv->current_page = 0; + } + + drm_psb_disable_vsync = 1; + dev_priv->vdc_irq_mask &= ~(_PSB_VSYNC_PIPEA_FLAG | _PSB_VSYNC_PIPEB_FLAG); + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); + + for (i = 0; i < dev->num_crtcs; i++) { + if (atomic_read(&dev->vblank_refcount[i]) == 0 && + dev->vblank_enabled[i]) { + DRM_DEBUG("disabling vblank on crtc %d\n", i); + dev->last_vblank[i] = + dev->driver->get_vblank_counter(dev, i); + dev->vblank_enabled[i] = 0; + } + } + } +} + +static void psb_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + drm_put_dev(dev); +} + +static int psb_proc_init(struct drm_minor *minor) +{ + struct proc_dir_entry *ent; + ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->proc_root, + psb_ospm_read, minor); + if (ent) + return 0; + else + return -1; +} + +static void psb_proc_cleanup(struct drm_minor *minor) +{ + remove_proc_entry(OSPM_PROC_ENTRY, minor->proc_root); + return; +} + +static struct drm_driver driver = { + .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | DRIVER_MODESET, + .load = psb_driver_load, + .unload = psb_driver_unload, + + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = psb_ioctls, + .device_is_agp = psb_driver_device_is_agp, + .irq_preinstall = psb_irq_preinstall, + .irq_postinstall = psb_irq_postinstall, + .irq_uninstall = psb_irq_uninstall, + .irq_handler = psb_irq_handler, + .enable_vblank = psb_enable_vblank, + .disable_vblank = psb_disable_vblank, + .firstopen = NULL, + .lastclose = psb_lastclose, + .open = psb_driver_open, + .proc_init = psb_proc_init, + .proc_cleanup = psb_proc_cleanup, + .preclose = psb_driver_preclose, + .fops = { + .owner = THIS_MODULE, + .open = psb_open, + .release = psb_release, + .unlocked_ioctl = psb_unlocked_ioctl, + .mmap = psb_mmap, + .poll = psb_poll, + .fasync = drm_fasync, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .resume = powermgmt_resume, + .suspend = powermgmt_suspend, + .probe = psb_probe, + .remove = psb_remove, + }, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = PSB_DRM_DRIVER_DATE, + .major = PSB_DRM_DRIVER_MAJOR, + .minor = PSB_DRM_DRIVER_MINOR, + .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL +}; + +static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static int __init psb_init(void) +{ + driver.num_ioctls = psb_max_ioctl; + return drm_init(&driver); +} + +static void __exit psb_exit(void) +{ + drm_exit(&driver); +} + +late_initcall(psb_init); +module_exit(psb_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/psb/psb_drv.h b/drivers/gpu/drm/psb/psb_drv.h new file mode 100644 index 0000000..9b2c4e1 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_drv.h @@ -0,0 +1,1224 @@ +/************************************************************************** + *Copyright (c) 2007-2008, Intel Corporation. + *All Rights Reserved. + * + *This program is free software; you can redistribute it and/or modify it + *under the terms and conditions of the GNU General Public License, + *version 2, as published by the Free Software Foundation. + * + *This program is distributed in the hope it will be useful, but WITHOUT + *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + *more details. + * + *You should have received a copy of the GNU General Public License along with + *this program; if not, write to the Free Software Foundation, Inc., + *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + *develop this driver. + * + **************************************************************************/ +/* + */ +#ifndef _PSB_DRV_H_ +#define _PSB_DRV_H_ + +#include +#include "psb_drm.h" +#include "psb_reg.h" +#include "psb_schedule.h" +#include "psb_intel_drv.h" +#include "psb_hotplug.h" +#include "psb_dpst.h" +#include "ttm/ttm_object.h" +#include "ttm/ttm_fence_driver.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_lock.h" + +extern struct ttm_bo_driver psb_ttm_bo_driver; + +enum { + CHIP_PSB_8108 = 0, + CHIP_PSB_8109 = 1, + CHIP_MRST_4100 = 2 +}; + +/* + *Hardware bugfixes + */ + +#define FIX_TG_16 +#define FIX_TG_2D_CLOCKGATE +#define OSPM_STAT + +#define DRIVER_NAME "psb" +#define DRIVER_DESC "drm driver for the Intel GMA500" +#define DRIVER_AUTHOR "Tungsten Graphics Inc." +#define OSPM_PROC_ENTRY "ospm" + +#define PSB_DRM_DRIVER_DATE "2009-03-10" +#define PSB_DRM_DRIVER_MAJOR 8 +#define PSB_DRM_DRIVER_MINOR 1 +#define PSB_DRM_DRIVER_PATCHLEVEL 0 + +/* + *TTM driver private offsets. + */ + +#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) + +#define PSB_OBJECT_HASH_ORDER 13 +#define PSB_FILE_OBJECT_HASH_ORDER 12 +#define PSB_BO_HASH_ORDER 12 + +#define PSB_VDC_OFFSET 0x00000000 +#define PSB_VDC_SIZE 0x000080000 +#define MRST_MMIO_SIZE 0x0000C0000 +#define PSB_SGX_SIZE 0x8000 +#define PSB_SGX_OFFSET 0x00040000 +#define MRST_SGX_OFFSET 0x00080000 +#define PSB_MMIO_RESOURCE 0 +#define PSB_GATT_RESOURCE 2 +#define PSB_GTT_RESOURCE 3 +#define PSB_GMCH_CTRL 0x52 +#define PSB_BSM 0x5C +#define _PSB_GMCH_ENABLED 0x4 +#define PSB_PGETBL_CTL 0x2020 +#define _PSB_PGETBL_ENABLED 0x00000001 +#define PSB_SGX_2D_SLAVE_PORT 0x4000 +#define PSB_TT_PRIV0_LIMIT (256*1024*1024) +#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT) +#define PSB_NUM_VALIDATE_BUFFERS 2048 +#define PSB_MEM_KERNEL_START 0x10000000 +#define PSB_MEM_PDS_START 0x20000000 +#define PSB_MEM_MMU_START 0x40000000 + +#define DRM_PSB_MEM_KERNEL TTM_PL_PRIV0 +#define DRM_PSB_FLAG_MEM_KERNEL TTM_PL_FLAG_PRIV0 + +/* + *Flags for external memory type field. + */ + +#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */ +#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */ +/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */ +#define PSB_MSVDX_SIZE 0x10000 + +#define LNC_TOPAZ_OFFSET 0xA0000 +#define LNC_TOPAZ_SIZE 0x10000 + +#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */ +#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */ +#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */ + +/* + *PTE's and PDE's + */ + +#define PSB_PDE_MASK 0x003FFFFF +#define PSB_PDE_SHIFT 22 +#define PSB_PTE_SHIFT 12 + +#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */ +#define PSB_PTE_WO 0x0002 /* Write only */ +#define PSB_PTE_RO 0x0004 /* Read only */ +#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */ + +/* + *VDC registers and bits + */ +#define PSB_MSVDX_CLOCKGATING 0x2064 +#define PSB_TOPAZ_CLOCKGATING 0x2068 +#define PSB_HWSTAM 0x2098 +#define PSB_INSTPM 0x20C0 +#define PSB_INT_IDENTITY_R 0x20A4 +#define _PSB_VSYNC_PIPEB_FLAG (1<<5) +#define _PSB_VSYNC_PIPEA_FLAG (1<<7) +#define _PSB_IRQ_SGX_FLAG (1<<18) +#define _PSB_IRQ_MSVDX_FLAG (1<<19) +#define _LNC_IRQ_TOPAZ_FLAG (1<<20) +#define PSB_INT_MASK_R 0x20A8 +#define PSB_INT_ENABLE_R 0x20A0 + +#define _PSB_MMU_ER_MASK 0x0001FF00 +#define _PSB_MMU_ER_HOST (1 << 16) +#define GPIOA 0x5010 +#define GPIOB 0x5014 +#define GPIOC 0x5018 +#define GPIOD 0x501c +#define GPIOE 0x5020 +#define GPIOF 0x5024 +#define GPIOG 0x5028 +#define GPIOH 0x502c +#define GPIO_CLOCK_DIR_MASK (1 << 0) +#define GPIO_CLOCK_DIR_IN (0 << 1) +#define GPIO_CLOCK_DIR_OUT (1 << 1) +#define GPIO_CLOCK_VAL_MASK (1 << 2) +#define GPIO_CLOCK_VAL_OUT (1 << 3) +#define GPIO_CLOCK_VAL_IN (1 << 4) +#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) +#define GPIO_DATA_DIR_MASK (1 << 8) +#define GPIO_DATA_DIR_IN (0 << 9) +#define GPIO_DATA_DIR_OUT (1 << 9) +#define GPIO_DATA_VAL_MASK (1 << 10) +#define GPIO_DATA_VAL_OUT (1 << 11) +#define GPIO_DATA_VAL_IN (1 << 12) +#define GPIO_DATA_PULLUP_DISABLE (1 << 13) + +#define VCLK_DIVISOR_VGA0 0x6000 +#define VCLK_DIVISOR_VGA1 0x6004 +#define VCLK_POST_DIV 0x6010 + +#define PSB_COMM_2D (PSB_ENGINE_2D << 4) +#define PSB_COMM_3D (PSB_ENGINE_3D << 4) +#define PSB_COMM_TA (PSB_ENGINE_TA << 4) +#define PSB_COMM_HP (PSB_ENGINE_HP << 4) +#define PSB_COMM_USER_IRQ (1024 >> 2) +#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1) +#define PSB_COMM_FW (2048 >> 2) + +#define PSB_UIRQ_VISTEST 1 +#define PSB_UIRQ_OOM_REPLY 2 +#define PSB_UIRQ_FIRE_TA_REPLY 3 +#define PSB_UIRQ_FIRE_RASTER_REPLY 4 + +#define PSB_2D_SIZE (256*1024*1024) +#define PSB_MAX_RELOC_PAGES 1024 + +#define PSB_LOW_REG_OFFS 0x0204 +#define PSB_HIGH_REG_OFFS 0x0600 + +#define PSB_NUM_VBLANKS 2 + +#define PSB_COMM_2D (PSB_ENGINE_2D << 4) +#define PSB_COMM_3D (PSB_ENGINE_3D << 4) +#define PSB_COMM_TA (PSB_ENGINE_TA << 4) +#define PSB_COMM_HP (PSB_ENGINE_HP << 4) +#define PSB_COMM_FW (2048 >> 2) + +#define PSB_2D_SIZE (256*1024*1024) +#define PSB_MAX_RELOC_PAGES 1024 + +#define PSB_LOW_REG_OFFS 0x0204 +#define PSB_HIGH_REG_OFFS 0x0600 + +#define PSB_NUM_VBLANKS 2 +#define PSB_WATCHDOG_DELAY (DRM_HZ * 2) +#define PSB_LID_DELAY (DRM_HZ / 10) + +#define PSB_PWR_STATE_D0 1 +#define PSB_PWR_STATE_D0i3 2 +#define PSB_PWR_STATE_D3 3 + +#define PSB_PMPOLICY_NOPM 0 +#define PSB_PMPOLICY_CLOCKGATING 1 +#define PSB_PMPOLICY_POWERDOWN 2 + +#define PSB_PMSTATE_POWERUP 0 +#define PSB_PMSTATE_CLOCKGATED 1 +#define PSB_PMSTATE_POWERDOWN 2 + +/* Graphics MSI address and data region in PCIx */ +#define PSB_PCIx_MSI_ADDR_LOC 0x94 +#define PSB_PCIx_MSI_DATA_LOC 0x98 + + +/* + *User options. + */ + +struct drm_psb_uopt { + int clock_gating; +}; + +/** + *struct psb_context + * + *@buffers: array of pre-allocated validate buffers. + *@used_buffers: number of buffers in @buffers array currently in use. + *@validate_buffer: buffers validated from user-space. + *@kern_validate_buffers : buffers validated from kernel-space. + *@fence_flags : Fence flags to be used for fence creation. + * + *This structure is used during execbuf validation. + */ + +struct psb_context { + struct psb_validate_buffer *buffers; + uint32_t used_buffers; + struct list_head validate_list; + struct list_head kern_validate_list; + uint32_t fence_types; + uint32_t val_seq; +}; + +struct psb_gtt { + struct drm_device *dev; + int initialized; + uint32_t gatt_start; + uint32_t gtt_start; + uint32_t gtt_phys_start; + unsigned gtt_pages; + unsigned gatt_pages; + uint32_t stolen_base; + uint32_t pge_ctl; + u16 gmch_ctrl; + unsigned long stolen_size; + unsigned long vram_stolen_size; + unsigned long ci_stolen_size; + unsigned long rar_stolen_size; + uint32_t *gtt_map; + struct rw_semaphore sem; +}; + +struct psb_use_base { + struct list_head head; + struct ttm_fence_object *fence; + unsigned int reg; + unsigned long offset; + unsigned int dm; +}; + +struct psb_validate_buffer; + +struct psb_msvdx_cmd_queue { + struct list_head head; + void *cmd; + unsigned long cmd_size; + uint32_t sequence; +}; + +struct drm_psb_private { + + /* + *TTM Glue. + */ + + struct drm_global_reference mem_global_ref; + int has_global; + + struct drm_device *dev; + struct ttm_object_device *tdev; + struct ttm_fence_device fdev; + struct ttm_bo_device bdev; + struct ttm_lock ttm_lock; + struct vm_operations_struct *ttm_vm_ops; + int has_fence_device; + int has_bo_device; + + unsigned long chipset; + + struct psb_xhw_buf resume_buf; + struct drm_psb_dev_info_arg dev_info; + struct drm_psb_uopt uopt; + + struct psb_gtt *pg; + + struct page *scratch_page; + struct page *comm_page; + /* Deleted volatile because it is not recommended to use. */ + uint32_t *comm; + uint32_t comm_mmu_offset; + uint32_t mmu_2d_offset; + uint32_t sequence[PSB_NUM_ENGINES]; + uint32_t last_sequence[PSB_NUM_ENGINES]; + int idle[PSB_NUM_ENGINES]; + uint32_t last_submitted_seq[PSB_NUM_ENGINES]; + int engine_lockup_2d; + + struct psb_mmu_driver *mmu; + struct psb_mmu_pd *pf_pd; + + uint8_t *sgx_reg; + uint8_t *vdc_reg; + uint32_t gatt_free_offset; + + /* + *MSVDX + */ + uint8_t *msvdx_reg; + atomic_t msvdx_mmu_invaldc; + void *msvdx_private; + + /* + *TOPAZ + */ + uint8_t *topaz_reg; + void *topaz_private; + + /* + *Fencing / irq. + */ + + uint32_t sgx_irq_mask; + uint32_t sgx2_irq_mask; + uint32_t vdc_irq_mask; + u32 pipestat[2]; + + spinlock_t irqmask_lock; + spinlock_t sequence_lock; + int fence0_irq_on; + int irq_enabled; + unsigned int irqen_count_2d; + wait_queue_head_t event_2d_queue; + +#ifdef FIX_TG_16 + wait_queue_head_t queue_2d; + atomic_t lock_2d; + atomic_t ta_wait_2d; + atomic_t ta_wait_2d_irq; + atomic_t waiters_2d; +#else + struct mutex mutex_2d; +#endif + int fence2_irq_on; + + /* + *Modesetting + */ + struct psb_intel_mode_device mode_dev; + + + /* + * CI share buffer + */ + unsigned int ci_region_start; + unsigned int ci_region_size; + + /* + * RAR share buffer; + */ + unsigned int rar_region_start; + unsigned int rar_region_size; + + /* + *Memory managers + */ + + int have_vram; + int have_camera; + int have_rar; + int have_tt; + int have_mem_mmu; + int have_mem_aper; + int have_mem_kernel; + int have_mem_pds; + int have_mem_rastgeom; + struct mutex temp_mem; + + /* + *Relocation buffer mapping. + */ + + spinlock_t reloc_lock; + unsigned int rel_mapped_pages; + wait_queue_head_t rel_mapped_queue; + + /* + *SAREA + */ + struct drm_psb_sarea *sarea_priv; + + /* + *OSPM info + */ + uint32_t ospm_base; + + /* + * Sizes info + */ + + struct drm_psb_sizes_arg sizes; + + uint32_t fuse_reg_value; + + /* vbt (gct) header information*/ + struct mrst_vbt vbt_data; + /* info that is stored from the gct */ + struct gct_ioctl_arg gct_data; + + /* + *LVDS info + */ + int backlight_duty_cycle; /* restore backlight to this value */ + bool panel_wants_dither; + struct drm_display_mode *panel_fixed_mode; + struct drm_display_mode *lfp_lvds_vbt_mode; + struct drm_display_mode *sdvo_lvds_vbt_mode; + + struct bdb_lvds_backlight * lvds_bl; /*LVDS backlight info from VBT*/ + struct psb_intel_i2c_chan * lvds_i2c_bus; + + /* Feature bits from the VBIOS*/ + unsigned int int_tv_support:1; + unsigned int lvds_dither:1; + unsigned int lvds_vbt:1; + unsigned int int_crt_support:1; + unsigned int lvds_use_ssc:1; + int lvds_ssc_freq; + +/* MRST private date start */ +/*FIXME JLIU7 need to revisit */ + bool sku_83; + bool sku_100; + bool sku_100L; + bool sku_bypass; + uint32_t iLVDS_enable; + + /* pipe config register value */ + uint32_t pipeconf; + + /* plane control register value */ + uint32_t dspcntr; + +/* MRST_DSI private date start */ + /* + *MRST DSI info + */ + /* The DSI device ready */ + bool dsi_device_ready; + + /* The DPI panel power on */ + bool dpi_panel_on; + + /* The DBI panel power on */ + bool dbi_panel_on; + + /* The DPI display */ + bool dpi; + + /* status */ + uint32_t videoModeFormat:2; + uint32_t laneCount:3; + uint32_t status_reserved:27; + + /* dual display - DPI & DBI */ + bool dual_display; + + /* HS or LP transmission */ + bool lp_transmission; + + /* configuration phase */ + bool config_phase; + + /* DSI clock */ + uint32_t RRate; + uint32_t DDR_Clock; + uint32_t DDR_Clock_Calculated; + uint32_t ClockBits; + + /* DBI Buffer pointer */ + u8 *p_DBI_commandBuffer_orig; + u8 *p_DBI_commandBuffer; + uint32_t DBI_CB_pointer; + u8 *p_DBI_dataBuffer_orig; + u8 *p_DBI_dataBuffer; + uint32_t DBI_DB_pointer; + + /* DPI panel spec */ + uint32_t pixelClock; + uint32_t HsyncWidth; + uint32_t HbackPorch; + uint32_t HfrontPorch; + uint32_t HactiveArea; + uint32_t VsyncWidth; + uint32_t VbackPorch; + uint32_t VfrontPorch; + uint32_t VactiveArea; + uint32_t bpp:5; + uint32_t Reserved:27; + + /* DBI panel spec */ + uint32_t dbi_pixelClock; + uint32_t dbi_HsyncWidth; + uint32_t dbi_HbackPorch; + uint32_t dbi_HfrontPorch; + uint32_t dbi_HactiveArea; + uint32_t dbi_VsyncWidth; + uint32_t dbi_VbackPorch; + uint32_t dbi_VfrontPorch; + uint32_t dbi_VactiveArea; + uint32_t dbi_bpp:5; + uint32_t dbi_Reserved:27; + +/* MRST_DSI private date end */ + + /* + *Register state + */ + uint32_t saveDSPACNTR; + uint32_t saveDSPBCNTR; + uint32_t savePIPEACONF; + uint32_t savePIPEBCONF; + uint32_t savePIPEASRC; + uint32_t savePIPEBSRC; + uint32_t saveFPA0; + uint32_t saveFPA1; + uint32_t saveDPLL_A; + uint32_t saveDPLL_A_MD; + uint32_t saveHTOTAL_A; + uint32_t saveHBLANK_A; + uint32_t saveHSYNC_A; + uint32_t saveVTOTAL_A; + uint32_t saveVBLANK_A; + uint32_t saveVSYNC_A; + uint32_t saveDSPASTRIDE; + uint32_t saveDSPASIZE; + uint32_t saveDSPAPOS; + uint32_t saveDSPABASE; + uint32_t saveDSPASURF; + uint32_t saveFPB0; + uint32_t saveFPB1; + uint32_t saveDPLL_B; + uint32_t saveDPLL_B_MD; + uint32_t saveHTOTAL_B; + uint32_t saveHBLANK_B; + uint32_t saveHSYNC_B; + uint32_t saveVTOTAL_B; + uint32_t saveVBLANK_B; + uint32_t saveVSYNC_B; + uint32_t saveDSPBSTRIDE; + uint32_t saveDSPBSIZE; + uint32_t saveDSPBPOS; + uint32_t saveDSPBBASE; + uint32_t saveDSPBSURF; + uint32_t saveVCLK_DIVISOR_VGA0; + uint32_t saveVCLK_DIVISOR_VGA1; + uint32_t saveVCLK_POST_DIV; + uint32_t saveVGACNTRL; + uint32_t saveADPA; + uint32_t saveLVDS; + uint32_t saveDVOA; + uint32_t saveDVOB; + uint32_t saveDVOC; + uint32_t savePP_ON; + uint32_t savePP_OFF; + uint32_t savePP_CONTROL; + uint32_t savePP_CYCLE; + uint32_t savePFIT_CONTROL; + uint32_t savePaletteA[256]; + uint32_t savePaletteB[256]; + uint32_t saveBLC_PWM_CTL2; + uint32_t saveBLC_PWM_CTL; + uint32_t saveCLOCKGATING; + uint32_t saveDSPARB; + uint32_t saveDSPATILEOFF; + uint32_t saveDSPBTILEOFF; + uint32_t saveDSPAADDR; + uint32_t saveDSPBADDR; + uint32_t savePFIT_AUTO_RATIOS; + uint32_t savePFIT_PGM_RATIOS; + uint32_t savePP_ON_DELAYS; + uint32_t savePP_OFF_DELAYS; + uint32_t savePP_DIVISOR; + uint32_t saveBSM; + uint32_t saveVBT; + uint32_t saveBCLRPAT_A; + uint32_t saveBCLRPAT_B; + uint32_t saveDSPALINOFF; + uint32_t saveDSPBLINOFF; + uint32_t savePERF_MODE; + uint32_t saveDSPFW1; + uint32_t saveDSPFW2; + uint32_t saveDSPFW3; + uint32_t saveDSPFW4; + uint32_t saveDSPFW5; + uint32_t saveDSPFW6; + uint32_t saveCHICKENBIT; + uint32_t saveDSPACURSOR_CTRL; + uint32_t saveDSPBCURSOR_CTRL; + uint32_t saveDSPACURSOR_BASE; + uint32_t saveDSPBCURSOR_BASE; + uint32_t saveDSPACURSOR_POS; + uint32_t saveDSPBCURSOR_POS; + uint32_t save_palette_a[256]; + uint32_t save_palette_b[256]; + uint32_t saveOV_OVADD; + uint32_t saveOV_OGAMC0; + uint32_t saveOV_OGAMC1; + uint32_t saveOV_OGAMC2; + uint32_t saveOV_OGAMC3; + uint32_t saveOV_OGAMC4; + uint32_t saveOV_OGAMC5; + + /* MSI reg save */ + uint32_t msi_addr; + uint32_t msi_data; + + /* + *Xhw + */ + + uint32_t *xhw; + struct ttm_buffer_object *xhw_bo; + struct ttm_bo_kmap_obj xhw_kmap; + struct list_head xhw_in; + spinlock_t xhw_lock; + atomic_t xhw_client; + struct drm_file *xhw_file; + wait_queue_head_t xhw_queue; + wait_queue_head_t xhw_caller_queue; + struct mutex xhw_mutex; + struct psb_xhw_buf *xhw_cur_buf; + int xhw_submit_ok; + int xhw_on; + + /* + *Scheduling. + */ + + struct mutex reset_mutex; + struct psb_scheduler scheduler; + struct mutex cmdbuf_mutex; + uint32_t ta_mem_pages; + struct psb_ta_mem *ta_mem; + int force_ta_mem_load; + atomic_t val_seq; + + /* + *TODO: change this to be per drm-context. + */ + + struct psb_context context; + + /* + * LID-Switch + */ + spinlock_t lid_lock; + struct timer_list lid_timer; + struct psb_intel_opregion opregion; + u32 * lid_state; + u32 lid_last_state; + + /* + *Watchdog + */ + + spinlock_t watchdog_lock; + struct timer_list watchdog_timer; + struct work_struct watchdog_wq; + struct work_struct msvdx_watchdog_wq; + struct work_struct topaz_watchdog_wq; + int timer_available; + + uint32_t apm_reg; + uint16_t apm_base; +#ifdef OSPM_STAT + unsigned char graphics_state; + unsigned long gfx_d0i3_time; + unsigned long gfx_d0_time; + unsigned long gfx_d3_time; + unsigned long gfx_last_mode_change; + unsigned long gfx_d0_cnt; + unsigned long gfx_d0i3_cnt; + unsigned long gfx_d3_cnt; +#endif + + int dri_page_flipping; + int current_page; + int pipe_active[2]; + int saved_start[2]; + int saved_offset[2]; + int saved_stride[2]; + + int flip_start[2]; + int flip_offset[2]; + int flip_stride[2]; + + + /* + *Used for modifying backlight from xrandr -- consider removing and using HAL instead + */ + struct drm_property *backlight_property; + uint32_t blc_adj1; + + /* + * DPST and Hotplug state + */ + + struct dpst_state *psb_dpst_state; + struct hotplug_state *psb_hotplug_state; + +}; + +struct psb_fpriv { + struct ttm_object_file *tfile; +}; + +struct psb_mmu_driver; + +extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int); +extern int drm_pick_crtcs(struct drm_device *dev); + + +static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv) +{ + return (struct psb_fpriv *) file_priv->driver_priv; +} + +static inline struct drm_psb_private *psb_priv(struct drm_device *dev) +{ + return (struct drm_psb_private *) dev->dev_private; +} + +/* + *TTM glue. psb_ttm_glue.c + */ + +extern int psb_open(struct inode *inode, struct file *filp); +extern int psb_release(struct inode *inode, struct file *filp); +extern int psb_mmap(struct file *filp, struct vm_area_struct *vma); + +extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_verify_access(struct ttm_buffer_object *bo, + struct file *filp); +extern ssize_t psb_ttm_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); +extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos); +extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_pl_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_extension_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_ttm_global_init(struct drm_psb_private *dev_priv); +extern void psb_ttm_global_release(struct drm_psb_private *dev_priv); +/* + *MMU stuff. + */ + +extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, + int trap_pagefaults, + int invalid_type, + struct drm_psb_private *dev_priv); +extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver); +extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver + *driver); +extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset, + uint32_t gtt_start, uint32_t gtt_pages); +extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset); +extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, + int trap_pagefaults, + int invalid_type); +extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd); +extern void psb_mmu_flush(struct psb_mmu_driver *driver); +extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, + unsigned long address, + uint32_t num_pages); +extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, + uint32_t start_pfn, + unsigned long address, + uint32_t num_pages, int type); +extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, + unsigned long *pfn); + +/* + *Enable / disable MMU for different requestors. + */ + +extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, + uint32_t mask); +extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, + uint32_t mask); +extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context); +extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, + unsigned long address, uint32_t num_pages, + uint32_t desired_tile_stride, + uint32_t hw_tile_stride, int type); +extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, + unsigned long address, uint32_t num_pages, + uint32_t desired_tile_stride, + uint32_t hw_tile_stride); +/* + *psb_sgx.c + */ + +extern int psb_blit_sequence(struct drm_psb_private *dev_priv, + uint32_t sequence); +extern void psb_init_2d(struct drm_psb_private *dev_priv); +extern int psb_idle_2d(struct drm_device *dev); +extern int psb_idle_3d(struct drm_device *dev); +extern int psb_emit_2d_copy_blit(struct drm_device *dev, + uint32_t src_offset, + uint32_t dst_offset, uint32_t pages, + int direction); +extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_reg_submit(struct drm_psb_private *dev_priv, + uint32_t *regs, unsigned int cmds); +extern int psb_submit_copy_cmdbuf(struct drm_device *dev, + struct ttm_buffer_object *cmd_buffer, + unsigned long cmd_offset, + unsigned long cmd_size, int engine, + uint32_t *copy_buffer); + +extern void psb_init_disallowed(void); +extern void psb_fence_or_sync(struct drm_file *file_priv, + uint32_t engine, + uint32_t fence_types, + uint32_t fence_flags, + struct list_head *list, + struct psb_ttm_fence_rep *fence_arg, + struct ttm_fence_object **fence_p); +extern int psb_validate_kernel_buffer(struct psb_context *context, + struct ttm_buffer_object *bo, + uint32_t fence_class, + uint64_t set_flags, + uint64_t clr_flags); +extern void psb_init_ospm(struct drm_psb_private *dev_priv); +extern int psb_try_power_down_sgx(struct drm_device *dev); +extern int psb_page_flip(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_flip_set_base(struct drm_psb_private *dev_priv, int pipe); + +/* + *psb_irq.c + */ + +extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS); +extern void psb_irq_preinstall(struct drm_device *dev); +extern int psb_irq_postinstall(struct drm_device *dev); +extern void psb_irq_uninstall(struct drm_device *dev); +extern void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands); +extern int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands); +extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands); +extern int psb_vblank_wait2(struct drm_device *dev, + unsigned int *sequence); +extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence); + +extern int psb_enable_vblank(struct drm_device *dev, int crtc); +extern void psb_disable_vblank(struct drm_device *dev, int crtc); +void +psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask); + +void +psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask); + +extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc); +/* + *psb_fence.c + */ + +extern void psb_fence_handler(struct drm_device *dev, uint32_t class); +extern void psb_2D_irq_off(struct drm_psb_private *dev_priv); +extern void psb_2D_irq_on(struct drm_psb_private *dev_priv); +extern uint32_t psb_fence_advance_sequence(struct drm_device *dev, + uint32_t class); +extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev, + uint32_t fence_class, + uint32_t flags, uint32_t *sequence, + unsigned long *timeout_jiffies); +extern void psb_fence_error(struct drm_device *dev, + uint32_t class, + uint32_t sequence, uint32_t type, int error); +extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev); + +/* MSVDX/Topaz stuff */ +extern int lnc_video_frameskip(struct drm_device *dev, + uint64_t user_pointer); +extern int lnc_video_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_try_power_down_topaz(struct drm_device *dev); +extern int psb_try_power_down_msvdx(struct drm_device *dev); + +/* + *psb_gtt.c + */ +extern int psb_gtt_init(struct psb_gtt *pg, int resume); +extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages, + unsigned offset_pages, unsigned num_pages, + unsigned desired_tile_stride, + unsigned hw_tile_stride, int type); +extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages, + unsigned num_pages, + unsigned desired_tile_stride, + unsigned hw_tile_stride); + +extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev); +extern void psb_gtt_takedown(struct psb_gtt *pg, int free); + +/* + *psb_fb.c + */ +extern int psbfb_probed(struct drm_device *dev); +extern int psbfb_remove(struct drm_device *dev, + struct drm_framebuffer *fb); +extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/* + *psb_reset.c + */ + +extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d); +extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv); +extern void psb_watchdog_init(struct drm_psb_private *dev_priv); +extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv); +extern void psb_lid_timer_init(struct drm_psb_private * dev_priv); +extern void psb_lid_timer_takedown(struct drm_psb_private * dev_priv); +extern void psb_print_pagefault(struct drm_psb_private *dev_priv); + +/* + *psb_xhw.c + */ + +extern int psb_xhw_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int psb_xhw_init(struct drm_device *dev); +extern void psb_xhw_takedown(struct drm_psb_private *dev_priv); +extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv, + struct drm_file *file_priv, int closing); +extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t fire_flags, + uint32_t hw_context, + uint32_t *cookie, + uint32_t *oom_cmds, + uint32_t num_oom_cmds, + uint32_t offset, + uint32_t engine, uint32_t flags); +extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t fire_flags); +extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, uint32_t w, + uint32_t h, uint32_t *hw_cookie, + uint32_t *bo_size, uint32_t *clear_p_start, + uint32_t *clear_num_pages); + +extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf); +extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, uint32_t *value); +extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t pages, + uint32_t * hw_cookie, + uint32_t * size, + uint32_t * ta_min_size); +extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, uint32_t *cookie); +extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t *cookie, + uint32_t *bca, + uint32_t *rca, uint32_t *flags); +extern int psb_xhw_vistest(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf); +extern int psb_xhw_handler(struct drm_psb_private *dev_priv); +extern int psb_xhw_resume(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf); +extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, uint32_t *cookie); +extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t flags, + uint32_t param_offset, + uint32_t pt_offset, uint32_t *hw_cookie); +extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf); + +/* + *psb_schedule.c: HW bug fixing. + */ + +#ifdef FIX_TG_16 + +extern void psb_2d_unlock(struct drm_psb_private *dev_priv); +extern void psb_2d_lock(struct drm_psb_private *dev_priv); +extern int psb_2d_trylock(struct drm_psb_private *dev_priv); +extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv); +extern int psb_2d_trylock(struct drm_psb_private *dev_priv); +extern void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv); +#else + +#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d) +#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d) + +#endif + +/* modesetting */ +extern void psb_modeset_init(struct drm_device *dev); +extern void psb_modeset_cleanup(struct drm_device *dev); + +/* psb_bl.c */ +int psb_backlight_init(struct drm_device *dev); +void psb_backlight_exit(void); +int psb_set_brightness(struct backlight_device *bd); +int psb_get_brightness(struct backlight_device *bd); + +/* + *Utilities + */ +#define DRM_DRIVER_PRIVATE_T struct drm_psb_private + +static inline u32 MSG_READ32(uint port, uint offset) +{ + int mcr = (0xD0<<24) | (port << 16) | (offset << 8); + outl(0x800000D0, 0xCF8); + outl(mcr, 0xCFC); + outl(0x800000D4, 0xCF8); + return inl(0xcfc); +} +static inline void MSG_WRITE32(uint port, uint offset, u32 value) +{ + int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0; + outl(0x800000D4, 0xCF8); + outl(value, 0xcfc); + outl(0x800000D0, 0xCF8); + outl(mcr, 0xCFC); +} + +static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + + return ioread32(dev_priv->vdc_reg + (reg)); +} + +#define REG_READ(reg) REGISTER_READ(dev, (reg)) +static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg, + uint32_t val) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + + iowrite32((val), dev_priv->vdc_reg + (reg)); +} + +#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val)) + +static inline void REGISTER_WRITE16(struct drm_device *dev, + uint32_t reg, uint32_t val) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + + iowrite16((val), dev_priv->vdc_reg + (reg)); +} + +#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val)) + +static inline void REGISTER_WRITE8(struct drm_device *dev, + uint32_t reg, uint32_t val) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + + iowrite8((val), dev_priv->vdc_reg + (reg)); +} + +#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val)) + +#define PSB_ALIGN_TO(_val, _align) \ + (((_val) + ((_align) - 1)) & ~((_align) - 1)) +#define PSB_WVDC32(_val, _offs) \ + iowrite32(_val, dev_priv->vdc_reg + (_offs)) +#define PSB_RVDC32(_offs) \ + ioread32(dev_priv->vdc_reg + (_offs)) + +//#define TRAP_SGX_PM_FAULT 1 +#ifdef TRAP_SGX_PM_FAULT +#define PSB_WSGX32(_val, _offs) \ +{ \ + if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \ + printk(KERN_ERR "access sgx when it's off!!(WRITE) %s, %d\n", \ + __FILE__, __LINE__); \ + mdelay(1000); \ + } \ + iowrite32(_val, dev_priv->sgx_reg + (_offs)); \ +} +#define PSB_RSGX32(_offs) \ +({ \ + if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \ + printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \ + __FILE__, __LINE__); \ + mdelay(1000); \ + } \ + ioread32(dev_priv->sgx_reg + (_offs)); \ +}) +#else +#define PSB_WSGX32(_val, _offs) \ + iowrite32(_val, dev_priv->sgx_reg + (_offs)) +#define PSB_RSGX32(_offs) \ + ioread32(dev_priv->sgx_reg + (_offs)) +#endif + +#define PSB_WMSVDX32(_val, _offs) \ + iowrite32(_val, dev_priv->msvdx_reg + (_offs)) +#define PSB_RMSVDX32(_offs) \ + ioread32(dev_priv->msvdx_reg + (_offs)) + +#define PSB_ALPL(_val, _base) \ + (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) +#define PSB_ALPLM(_val, _base) \ + ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK)) + +#define PSB_D_RENDER (1 << 16) + +#define PSB_D_GENERAL (1 << 0) +#define PSB_D_INIT (1 << 1) +#define PSB_D_IRQ (1 << 2) +#define PSB_D_FW (1 << 3) +#define PSB_D_PERF (1 << 4) +#define PSB_D_TMP (1 << 5) +#define PSB_D_PM (1 << 6) + +extern int drm_psb_debug; +extern int drm_psb_no_fb; +extern int drm_psb_disable_vsync; +extern int drm_idle_check_interval; +extern int drm_psb_ospm; + +#define PSB_DEBUG_FW(_fmt, _arg...) \ + PSB_DEBUG(PSB_D_FW, _fmt, ##_arg) +#define PSB_DEBUG_GENERAL(_fmt, _arg...) \ + PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg) +#define PSB_DEBUG_INIT(_fmt, _arg...) \ + PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg) +#define PSB_DEBUG_IRQ(_fmt, _arg...) \ + PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg) +#define PSB_DEBUG_RENDER(_fmt, _arg...) \ + PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg) +#define PSB_DEBUG_PERF(_fmt, _arg...) \ + PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg) +#define PSB_DEBUG_TMP(_fmt, _arg...) \ + PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg) +#define PSB_DEBUG_PM(_fmt, _arg...) \ + PSB_DEBUG(PSB_D_PM, _fmt, ##_arg) + +#if DRM_DEBUG_CODE +#define PSB_DEBUG(_flag, _fmt, _arg...) \ + do { \ + if (unlikely((_flag) & drm_psb_debug)) \ + printk(KERN_DEBUG \ + "[psb:0x%02x:%s] " _fmt , _flag, \ + __func__ , ##_arg); \ + } while (0) +#else +#define PSB_DEBUG(_fmt, _arg...) do { } while (0) +#endif + +#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \ + ((dev)->pci_device == 0x8109)) + +#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) + +#endif diff --git a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c new file mode 100644 index 0000000..a29694e --- /dev/null +++ b/drivers/gpu/drm/psb/psb_fb.c @@ -0,0 +1,1833 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "psb_drv.h" +#include "psb_intel_reg.h" +#include "psb_intel_drv.h" +#include "ttm/ttm_userobj_api.h" +#include "psb_fb.h" +#include "psb_sgx.h" +#include "psb_powermgmt.h" + +static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth) +{ + switch (depth) { + case 8: + var->red.offset = 0; + var->green.offset = 0; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 15: + var->red.offset = 10; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 5; + var->blue.length = 5; + var->transp.length = 1; + var->transp.offset = 15; + break; + case 16: + var->red.offset = 11; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 6; + var->blue.length = 5; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 24: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 32: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 8; + var->transp.offset = 24; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb); +static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle); + +static const struct drm_framebuffer_funcs psb_fb_funcs = { + .destroy = psb_user_framebuffer_destroy, + .create_handle = psb_user_framebuffer_create_handle, +}; + +struct psbfb_par { + struct drm_device *dev; + struct psb_framebuffer *psbfb; + + int dpms_state; + + int crtc_count; + /* crtc currently bound to this */ + uint32_t crtc_ids[2]; +}; + +#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) + +static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, + struct fb_info *info) +{ + struct psbfb_par *par = info->par; + struct drm_framebuffer *fb = &par->psbfb->base; + uint32_t v; + + if (!fb) + return -ENOMEM; + + if (regno > 255) + return 1; + +#if 0 /* JB: not drop, check that this works */ + if (fb->bits_per_pixel == 8) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + head) { + for (i = 0; i < par->crtc_count; i++) + if (crtc->base.id == par->crtc_ids[i]) + break; + + if (i == par->crtc_count) + continue; + + if (crtc->funcs->gamma_set) + crtc->funcs->gamma_set(crtc, red, green, + blue, regno); + } + return 0; + } +#endif + + red = CMAP_TOHW(red, info->var.red.length); + blue = CMAP_TOHW(blue, info->var.blue.length); + green = CMAP_TOHW(green, info->var.green.length); + transp = CMAP_TOHW(transp, info->var.transp.length); + + v = (red << info->var.red.offset) | + (green << info->var.green.offset) | + (blue << info->var.blue.offset) | + (transp << info->var.transp.offset); + + if (regno < 16) { + switch (fb->bits_per_pixel) { + case 16: + ((uint32_t *) info->pseudo_palette)[regno] = v; + break; + case 24: + case 32: + ((uint32_t *) info->pseudo_palette)[regno] = v; + break; + } + } + + return 0; +} + +static struct drm_display_mode *psbfb_find_first_mode(struct + fb_var_screeninfo + *var, + struct fb_info *info, + struct drm_crtc + *crtc) +{ + struct psbfb_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_display_mode *drm_mode; + struct drm_display_mode *preferred_mode = NULL; + struct drm_display_mode *last_mode = NULL; + struct drm_connector *connector; + int found; + + found = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + if (connector->encoder && connector->encoder->crtc == crtc) { + found = 1; + break; + } + } + + /* found no connector, bail */ + if (!found) + return NULL; + + found = 0; + list_for_each_entry(drm_mode, &connector->modes, head) { + if (drm_mode->hdisplay == var->xres && + drm_mode->vdisplay == var->yres + && drm_mode->clock != 0) { + found = 1; + last_mode = drm_mode; + if(IS_POULSBO(dev)) { + if(last_mode->type & DRM_MODE_TYPE_PREFERRED) { + preferred_mode = last_mode; + } + } + } + } + + /* No mode matching mode found */ + if (!found) + return NULL; + + if(IS_POULSBO(dev)) { + if(preferred_mode) + return preferred_mode; + else + return last_mode; + } else { + return last_mode; + } +} + +static int psbfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct psbfb_par *par = info->par; + struct psb_framebuffer *psbfb = par->psbfb; + struct drm_device *dev = par->dev; + int ret; + int depth; + int pitch; + int bpp = var->bits_per_pixel; + + if (!psbfb) + return -ENOMEM; + + if (!var->pixclock) + return -EINVAL; + + /* don't support virtuals for now */ + if (var->xres_virtual > var->xres) + return -EINVAL; + + if (var->yres_virtual > var->yres) + return -EINVAL; + + switch (bpp) { +#if 0 /* JB: for now only support true color */ + case 8: + depth = 8; + break; +#endif + case 16: + depth = (var->green.length == 6) ? 16 : 15; + break; + case 24: /* assume this is 32bpp / depth 24 */ + bpp = 32; + /* fallthrough */ + case 32: + depth = (var->transp.length > 0) ? 32 : 24; + break; + default: + return -EINVAL; + } + + pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f; + + /* Check that we can resize */ + if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) { +#if 1 + /* Need to resize the fb object. + * But the generic fbdev code doesn't really understand + * that we can do this. So disable for now. + */ + DRM_INFO("Can't support requested size, too big!\n"); + return -EINVAL; +#else + struct drm_psb_private *dev_priv = psb_priv(dev); + struct ttm_bo_device *bdev = &dev_priv->bdev; + struct ttm_buffer_object *fbo = NULL; + struct ttm_bo_kmap_obj tmp_kmap; + + /* a temporary BO to check if we could resize in setpar. + * Therefore no need to set NO_EVICT. + */ + ret = ttm_buffer_object_create(bdev, + pitch * var->yres, + ttm_bo_type_kernel, + TTM_PL_FLAG_TT | + TTM_PL_FLAG_VRAM | + TTM_PL_FLAG_NO_EVICT, + 0, 0, &fbo); + if (ret || !fbo) + return -ENOMEM; + + ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap); + if (ret) { + ttm_bo_usage_deref_unlocked(&fbo); + return -EINVAL; + } + + ttm_bo_kunmap(&tmp_kmap); + /* destroy our current fbo! */ + ttm_bo_usage_deref_unlocked(&fbo); +#endif + } + + ret = fill_fb_bitfield(var, depth); + if (ret) + return ret; + +#if 1 + /* Here we walk the output mode list and look for modes. If we haven't + * got it, then bail. Not very nice, so this is disabled. + * In the set_par code, we create our mode based on the incoming + * parameters. Nicer, but may not be desired by some. + */ + { + struct drm_crtc *crtc; + int i; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + head) { + struct psb_intel_crtc *psb_intel_crtc = + to_psb_intel_crtc(crtc); + + for (i = 0; i < par->crtc_count; i++) + if (crtc->base.id == par->crtc_ids[i]) + break; + + if (i == par->crtc_count) + continue; + + if (psb_intel_crtc->mode_set.num_connectors == 0) + continue; + + if (!psbfb_find_first_mode(&info->var, info, crtc)) + return -EINVAL; + } + } +#else + (void) i; + (void) dev; /* silence warnings */ + (void) crtc; + (void) drm_mode; + (void) connector; +#endif + + return 0; +} + +/* this will let fbcon do the mode init */ +static int psbfb_set_par(struct fb_info *info) +{ + struct psbfb_par *par = info->par; + struct psb_framebuffer *psbfb = par->psbfb; + struct drm_framebuffer *fb = &psbfb->base; + struct drm_device *dev = par->dev; + struct fb_var_screeninfo *var = &info->var; + struct drm_psb_private *dev_priv = dev->dev_private; + struct drm_display_mode *drm_mode; + int pitch; + int depth; + int bpp = var->bits_per_pixel; + + if (!fb) + return -ENOMEM; + + switch (bpp) { + case 8: + depth = 8; + break; + case 16: + depth = (var->green.length == 6) ? 16 : 15; + break; + case 24: /* assume this is 32bpp / depth 24 */ + bpp = 32; + /* fallthrough */ + case 32: + depth = (var->transp.length > 0) ? 32 : 24; + break; + default: + DRM_ERROR("Illegal BPP\n"); + return -EINVAL; + } + + pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f; + + if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) { +#if 1 + /* Need to resize the fb object. + * But the generic fbdev code doesn't really understand + * that we can do this. So disable for now. + */ + DRM_INFO("Can't support requested size, too big!\n"); + return -EINVAL; +#else + int ret; + struct ttm_buffer_object *fbo = NULL, *tfbo; + struct ttm_bo_kmap_obj tmp_kmap, tkmap; + + ret = ttm_buffer_object_create(bdev, + pitch * var->yres, + ttm_bo_type_kernel, + TTM_PL_FLAG_MEM_TT | + TTM_PL_FLAG_MEM_VRAM | + TTM_PL_FLAG_NO_EVICT, + 0, 0, &fbo); + if (ret || !fbo) { + DRM_ERROR + ("failed to allocate new resized framebuffer\n"); + return -ENOMEM; + } + + ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap); + if (ret) { + DRM_ERROR("failed to kmap framebuffer.\n"); + ttm_bo_usage_deref_unlocked(&fbo); + return -EINVAL; + } + + DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", + fb->width, fb->height, fb->offset, fbo); + + /* set new screen base */ + info->screen_base = tmp_kmap.virtual; + + tkmap = fb->kmap; + fb->kmap = tmp_kmap; + ttm_bo_kunmap(&tkmap); + + tfbo = fb->bo; + fb->bo = fbo; + ttm_bo_usage_deref_unlocked(&tfbo); +#endif + } + + psbfb->offset = psbfb->bo->offset - dev_priv->pg->gatt_start; + fb->width = var->xres; + fb->height = var->yres; + fb->bits_per_pixel = bpp; + fb->pitch = pitch; + fb->depth = depth; + + info->fix.line_length = psbfb->base.pitch; + info->fix.visual = + (psbfb->base.depth == + 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; + + /* some fbdev's apps don't want these to change */ + info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset; + +#if 0 + /* relates to resize - disable */ + info->fix.smem_len = info->fix.line_length * var->yres; + info->screen_size = info->fix.smem_len; /* ??? */ +#endif + + /* Should we walk the output's modelist or just create our own ??? + * For now, we create and destroy a mode based on the incoming + * parameters. But there's commented out code below which scans + * the output list too. + */ +#if 1 + /* This code is now in the for loop futher down. */ +#endif + + { + struct drm_crtc *crtc; + int ret; + int i; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + head) { + struct psb_intel_crtc *psb_intel_crtc = + to_psb_intel_crtc(crtc); + + for (i = 0; i < par->crtc_count; i++) + if (crtc->base.id == par->crtc_ids[i]) + break; + + if (i == par->crtc_count) + continue; + + if (psb_intel_crtc->mode_set.num_connectors == 0) + continue; + +#if 1 + drm_mode = + psbfb_find_first_mode(&info->var, info, crtc); + if (!drm_mode) + DRM_ERROR("No matching mode found\n"); + psb_intel_crtc->mode_set.mode = drm_mode; +#endif + +#if 0 /* FIXME: TH */ + if (crtc->fb == psb_intel_crtc->mode_set.fb) { +#endif + DRM_DEBUG + ("setting mode on crtc %p with id %u\n", + crtc, crtc->base.id); + ret = + crtc->funcs-> + set_config(&psb_intel_crtc->mode_set); + if (ret) { + DRM_ERROR("Failed setting mode\n"); + return ret; + } +#if 0 + } +#endif + } + DRM_DEBUG("Set par returned OK.\n"); + return 0; + } + + return 0; +} +#if 0 +static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf, + unsigned size) +{ + int ret = 0; + int i; + unsigned submit_size; + + while (size > 0) { + submit_size = (size < 0x60) ? size : 0x60; + size -= submit_size; + ret = psb_2d_wait_available(dev_priv, submit_size); + if (ret) + return ret; + + submit_size <<= 2; + for (i = 0; i < submit_size; i += 4) { + PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i); + } + (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4); + } + return 0; +} + +static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv, + uint32_t dst_offset, uint32_t dst_stride, + uint32_t dst_format, uint16_t dst_x, + uint16_t dst_y, uint16_t size_x, + uint16_t size_y, uint32_t fill) +{ + uint32_t buffer[10]; + uint32_t *buf; + + buf = buffer; + + *buf++ = PSB_2D_FENCE_BH; + + *buf++ = + PSB_2D_DST_SURF_BH | dst_format | (dst_stride << + PSB_2D_DST_STRIDE_SHIFT); + *buf++ = dst_offset; + + *buf++ = + PSB_2D_BLIT_BH | + PSB_2D_ROT_NONE | + PSB_2D_COPYORDER_TL2BR | + PSB_2D_DSTCK_DISABLE | + PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY; + + *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT; + *buf++ = + (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y << + PSB_2D_DST_YSTART_SHIFT); + *buf++ = + (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y << + PSB_2D_DST_YSIZE_SHIFT); + *buf++ = PSB_2D_FLUSH_BH; + + return psbfb_2d_submit(dev_priv, buffer, buf - buffer); +} + +static void psbfb_fillrect_accel(struct fb_info *info, + const struct fb_fillrect *r) +{ + struct psbfb_par *par = info->par; + struct psb_framebuffer *psbfb = par->psbfb; + struct drm_framebuffer *fb = &psbfb->base; + struct drm_psb_private *dev_priv = par->dev->dev_private; + uint32_t offset; + uint32_t stride; + uint32_t format; + + if (!fb) + return; + + offset = psbfb->offset; + stride = fb->pitch; + + switch (fb->depth) { + case 8: + format = PSB_2D_DST_332RGB; + break; + case 15: + format = PSB_2D_DST_555RGB; + break; + case 16: + format = PSB_2D_DST_565RGB; + break; + case 24: + case 32: + /* this is wrong but since we don't do blending its okay */ + format = PSB_2D_DST_8888ARGB; + break; + default: + /* software fallback */ + cfb_fillrect(info, r); + return; + } + + psb_accel_2d_fillrect(dev_priv, + offset, stride, format, + r->dx, r->dy, r->width, r->height, r->color); +} + +static void psbfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + struct psbfb_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + + if (unlikely(info->state != FBINFO_STATE_RUNNING)) + return; + + if (info->flags & FBINFO_HWACCEL_DISABLED) + return cfb_fillrect(info, rect); + /* + * psbfb_fillrect is atomic so need to do instantaneous check of + * power on + */ + if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) || powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) || + !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) + return cfb_fillrect(info, rect); + if (psb_2d_trylock(dev_priv)) { + psbfb_fillrect_accel(info, rect); + psb_2d_unlock(dev_priv); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + } else + cfb_fillrect(info, rect); +} + +uint32_t psb_accel_2d_copy_direction(int xdir, int ydir) +{ + if (xdir < 0) + return (ydir < + 0) ? PSB_2D_COPYORDER_BR2TL : + PSB_2D_COPYORDER_TR2BL; + else + return (ydir < + 0) ? PSB_2D_COPYORDER_BL2TR : + PSB_2D_COPYORDER_TL2BR; +} + +/* + * @srcOffset in bytes + * @srcStride in bytes + * @srcFormat psb 2D format defines + * @dstOffset in bytes + * @dstStride in bytes + * @dstFormat psb 2D format defines + * @srcX offset in pixels + * @srcY offset in pixels + * @dstX offset in pixels + * @dstY offset in pixels + * @sizeX of the copied area + * @sizeY of the copied area + */ +static int psb_accel_2d_copy(struct drm_psb_private *dev_priv, + uint32_t src_offset, uint32_t src_stride, + uint32_t src_format, uint32_t dst_offset, + uint32_t dst_stride, uint32_t dst_format, + uint16_t src_x, uint16_t src_y, + uint16_t dst_x, uint16_t dst_y, + uint16_t size_x, uint16_t size_y) +{ + uint32_t blit_cmd; + uint32_t buffer[10]; + uint32_t *buf; + uint32_t direction; + + buf = buffer; + + direction = + psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y); + + if (direction == PSB_2D_COPYORDER_BR2TL || + direction == PSB_2D_COPYORDER_TR2BL) { + src_x += size_x - 1; + dst_x += size_x - 1; + } + if (direction == PSB_2D_COPYORDER_BR2TL || + direction == PSB_2D_COPYORDER_BL2TR) { + src_y += size_y - 1; + dst_y += size_y - 1; + } + + blit_cmd = + PSB_2D_BLIT_BH | + PSB_2D_ROT_NONE | + PSB_2D_DSTCK_DISABLE | + PSB_2D_SRCCK_DISABLE | + PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction; + + *buf++ = PSB_2D_FENCE_BH; + *buf++ = + PSB_2D_DST_SURF_BH | dst_format | (dst_stride << + PSB_2D_DST_STRIDE_SHIFT); + *buf++ = dst_offset; + *buf++ = + PSB_2D_SRC_SURF_BH | src_format | (src_stride << + PSB_2D_SRC_STRIDE_SHIFT); + *buf++ = src_offset; + *buf++ = + PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | + (src_y << PSB_2D_SRCOFF_YSTART_SHIFT); + *buf++ = blit_cmd; + *buf++ = + (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y << + PSB_2D_DST_YSTART_SHIFT); + *buf++ = + (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y << + PSB_2D_DST_YSIZE_SHIFT); + *buf++ = PSB_2D_FLUSH_BH; + + return psbfb_2d_submit(dev_priv, buffer, buf - buffer); +} + +static void psbfb_copyarea_accel(struct fb_info *info, + const struct fb_copyarea *a) +{ + struct psbfb_par *par = info->par; + struct psb_framebuffer *psbfb = par->psbfb; + struct drm_framebuffer *fb = &psbfb->base; + struct drm_psb_private *dev_priv = par->dev->dev_private; + uint32_t offset; + uint32_t stride; + uint32_t src_format; + uint32_t dst_format; + + if (!fb) + return; + + offset = psbfb->offset; + stride = fb->pitch; + + switch (fb->depth) { + case 8: + src_format = PSB_2D_SRC_332RGB; + dst_format = PSB_2D_DST_332RGB; + break; + case 15: + src_format = PSB_2D_SRC_555RGB; + dst_format = PSB_2D_DST_555RGB; + break; + case 16: + src_format = PSB_2D_SRC_565RGB; + dst_format = PSB_2D_DST_565RGB; + break; + case 24: + case 32: + /* this is wrong but since we don't do blending its okay */ + src_format = PSB_2D_SRC_8888ARGB; + dst_format = PSB_2D_DST_8888ARGB; + break; + default: + /* software fallback */ + cfb_copyarea(info, a); + return; + } + + psb_accel_2d_copy(dev_priv, + offset, stride, src_format, + offset, stride, dst_format, + a->sx, a->sy, a->dx, a->dy, a->width, a->height); +} + +static void psbfb_copyarea(struct fb_info *info, + const struct fb_copyarea *region) +{ + struct psbfb_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + + if (unlikely(info->state != FBINFO_STATE_RUNNING)) + return; + + if (info->flags & FBINFO_HWACCEL_DISABLED) + return cfb_copyarea(info, region); + /* + * psbfb_copyarea is atomic so need to do instantaneous check of + * power on + */ + if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) || powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) || + !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) + return cfb_copyarea(info, region); + + if (psb_2d_trylock(dev_priv)) { + psbfb_copyarea_accel(info, region); + psb_2d_unlock(dev_priv); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + } else + cfb_copyarea(info, region); +} +#endif +void psbfb_imageblit(struct fb_info *info, const struct fb_image *image) +{ + if (unlikely(info->state != FBINFO_STATE_RUNNING)) + return; + + cfb_imageblit(info, image); +} + +static void psbfb_onoff(struct fb_info *info, int dpms_mode) +{ + struct psbfb_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + int i; + + /* + * For each CRTC in this fb, find all associated encoders + * and turn them off, then turn off the CRTC. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = + crtc->helper_private; + + for (i = 0; i < par->crtc_count; i++) + if (crtc->base.id == par->crtc_ids[i]) + break; + + if (i == par->crtc_count) + continue; + + if (dpms_mode == DRM_MODE_DPMS_ON) + crtc_funcs->dpms(crtc, dpms_mode); + + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, + &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs + *encoder_funcs; + encoder_funcs = encoder->helper_private; + encoder_funcs->dpms(encoder, dpms_mode); + } + } + + if (dpms_mode == DRM_MODE_DPMS_OFF) + crtc_funcs->dpms(crtc, dpms_mode); + } +} + +static int psbfb_blank(int blank_mode, struct fb_info *info) +{ + struct psbfb_par *par = info->par; + + par->dpms_state = blank_mode; + PSB_DEBUG_PM("psbfb_blank \n"); + switch (blank_mode) { + case FB_BLANK_UNBLANK: + psbfb_onoff(info, DRM_MODE_DPMS_ON); + break; + case FB_BLANK_NORMAL: + psbfb_onoff(info, DRM_MODE_DPMS_STANDBY); + break; + case FB_BLANK_HSYNC_SUSPEND: + psbfb_onoff(info, DRM_MODE_DPMS_STANDBY); + break; + case FB_BLANK_VSYNC_SUSPEND: + psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND); + break; + case FB_BLANK_POWERDOWN: + psbfb_onoff(info, DRM_MODE_DPMS_OFF); + break; + } + + return 0; +} + + +static int psbfb_kms_off(struct drm_device *dev, int suspend) +{ + struct drm_framebuffer *fb = 0; + DRM_DEBUG("psbfb_kms_off_ioctl\n"); + + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(fb, &dev->mode_config.fb_list, head) { + struct fb_info *info = fb->fbdev; + + if (suspend) { + fb_set_suspend(info, 1); + psbfb_blank(FB_BLANK_POWERDOWN, info); + } + } + mutex_unlock(&dev->mode_config.mutex); + return 0; +} + +int psbfb_kms_off_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + + if (drm_psb_no_fb) + return 0; + acquire_console_sem(); + ret = psbfb_kms_off(dev, 0); + release_console_sem(); + + return ret; +} + +static int psbfb_kms_on(struct drm_device *dev, int resume) +{ + struct drm_framebuffer *fb = 0; + + DRM_DEBUG("psbfb_kms_on_ioctl\n"); + + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(fb, &dev->mode_config.fb_list, head) { + struct fb_info *info = fb->fbdev; + + if (resume) { + fb_set_suspend(info, 0); + psbfb_blank(FB_BLANK_UNBLANK, info); + } + + } + mutex_unlock(&dev->mode_config.mutex); + + return 0; +} + +int psbfb_kms_on_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + + if (drm_psb_no_fb) + return 0; + acquire_console_sem(); + ret = psbfb_kms_on(dev, 0); + release_console_sem(); + drm_helper_disable_unused_functions(dev); + return ret; +} + +void psbfb_suspend(struct drm_device *dev) +{ + acquire_console_sem(); + psbfb_kms_off(dev, 1); + release_console_sem(); +} + +void psbfb_resume(struct drm_device *dev) +{ + acquire_console_sem(); + psbfb_kms_on(dev, 1); + release_console_sem(); + drm_helper_disable_unused_functions(dev); +} + +static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct psbfb_par *par = info->par; + struct psb_framebuffer *psbfb = par->psbfb; + struct ttm_buffer_object *bo = psbfb->bo; + unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long offset = vma->vm_pgoff; + + if (vma->vm_pgoff != 0) + return -EINVAL; + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + if (offset + size > bo->num_pages) + return -EINVAL; + + mutex_lock(&bo->mutex); + if (!psbfb->addr_space) + psbfb->addr_space = vma->vm_file->f_mapping; + mutex_unlock(&bo->mutex); + + return ttm_fbdev_mmap(vma, bo); +} + +int psbfb_sync(struct fb_info *info) +{ + struct psbfb_par *par = info->par; + struct drm_psb_private *dev_priv = par->dev->dev_private; + + if (psb_2d_trylock(dev_priv)) { + /* + * psbfb_sync is atomic so need to do instantaneous check of + * power on + */ + if (!powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) && + !powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) && + powermgmt_is_hw_on(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND)) + psb_idle_2d(par->dev); + + psb_2d_unlock(dev_priv); + } else + udelay(5); + + return 0; +} + +static struct fb_ops psbfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = psbfb_check_var, + .fb_set_par = psbfb_set_par, + .fb_setcolreg = psbfb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_mmap = psbfb_mmap, + /*.fb_sync = psbfb_sync,*/ + .fb_blank = psbfb_blank, +}; + +static struct drm_mode_set panic_mode; + +int psbfb_panic(struct notifier_block *n, unsigned long ununsed, + void *panic_str) +{ + DRM_ERROR("panic occurred, switching back to text console\n"); + drm_crtc_helper_set_config(&panic_mode); + + return 0; +} +EXPORT_SYMBOL(psbfb_panic); + +static struct notifier_block paniced = { + .notifier_call = psbfb_panic, +}; + + +static struct drm_framebuffer *psb_framebuffer_create + (struct drm_device *dev, struct drm_mode_fb_cmd *r, + void *mm_private) +{ + struct psb_framebuffer *fb; + int ret; + + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) + return NULL; + + ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs); + + if (ret) + goto err; + + drm_helper_mode_fill_fb_struct(&fb->base, r); + + fb->bo = mm_private; + + return &fb->base; + +err: + kfree(fb); + return NULL; +} + +static struct drm_framebuffer *psb_user_framebuffer_create + (struct drm_device *dev, struct drm_file *filp, + struct drm_mode_fb_cmd *r) +{ + struct psb_framebuffer *psbfb; + struct ttm_buffer_object *bo = NULL; + struct drm_framebuffer *fb; + struct fb_info *info; + struct ttm_bo_kmap_obj tmp_kmap; + bool is_iomem; + uint64_t size; + + bo = ttm_buffer_object_lookup(psb_fpriv(filp)->tfile, r->handle); + if (!bo) + return NULL; + /*the buffer is used as fb, then it should not be put in swap list*/ + list_del_init(&bo->swap); + + /* JB: TODO not drop, make smarter */ + size = ((uint64_t) bo->num_pages) << PAGE_SHIFT; + if (size < r->height * r->pitch) + return NULL; + + /* JB: TODO not drop, refcount buffer */ +// return psb_framebuffer_create(dev, r, bo); + + fb = psb_framebuffer_create(dev, r, bo); + if (!fb) { + DRM_ERROR("failed to allocate fb.\n"); + return NULL; + } + + psbfb = to_psb_fb(fb); + psbfb->bo = bo; + + info = framebuffer_alloc(sizeof(struct psbfb_par), &dev->pdev->dev); + if (!info) { + return NULL; + } + + strcpy(info->fix.id, "psbfb"); + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + info->fix.type_aux = 0; + info->fix.xpanstep = 1; /* doing it in hw */ + info->fix.ypanstep = 1; /* doing it in hw */ + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_I830; + info->fix.type_aux = 0; + + info->flags = FBINFO_DEFAULT; + + info->fbops = &psbfb_ops; + + info->fix.line_length = fb->pitch; + info->fix.smem_start = + dev->mode_config.fb_base + psbfb->bo->offset; + info->fix.smem_len = size; + + info->flags = FBINFO_DEFAULT; + + if (ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap) != 0) { + DRM_ERROR("error mapping fb\n"); + return NULL; + } + + psbfb->kmap = tmp_kmap; + + info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem); + info->screen_size = size; + +/* it is called for kms flip, the back buffer has been rendered, then we should not clear it*/ +#if 0 + if (is_iomem) + memset_io(info->screen_base, 0, size); + else + memset(info->screen_base, 0, size); +#endif + info->pseudo_palette = fb->pseudo_palette; + info->var.xres_virtual = fb->width; + info->var.yres_virtual = fb->height; + info->var.bits_per_pixel = fb->bits_per_pixel; + info->var.xoffset = 0; + info->var.yoffset = 0; + info->var.activate = FB_ACTIVATE_NOW; + info->var.height = -1; + info->var.width = -1; + + info->var.xres = r->width; + info->var.yres = r->height; + + info->fix.mmio_start = pci_resource_start(dev->pdev, 0); + info->fix.mmio_len = pci_resource_len(dev->pdev, 0); + + info->pixmap.size = 64 * 1024; + info->pixmap.buf_align = 8; + info->pixmap.access_align = 32; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; + + fill_fb_bitfield(&info->var, fb->depth); + + register_framebuffer(info); + + fb->fbdev = info; + + return fb; +} + +int psbfb_create(struct drm_device *dev, uint32_t fb_width, + uint32_t fb_height, uint32_t surface_width, + uint32_t surface_height, struct psb_framebuffer **psbfb_p) +{ + struct fb_info *info; + struct psbfb_par *par; + struct drm_framebuffer *fb; + struct psb_framebuffer *psbfb; + struct ttm_bo_kmap_obj tmp_kmap; + struct drm_mode_fb_cmd mode_cmd; + struct device *device = &dev->pdev->dev; + struct ttm_bo_device *bdev = &psb_priv(dev)->bdev; + struct ttm_buffer_object *fbo = NULL; + int size, aligned_size, ret; + bool is_iomem; + + mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */ + mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */ + + mode_cmd.bpp = 32; + mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8); + mode_cmd.depth = 24; + + size = mode_cmd.pitch * mode_cmd.height; + aligned_size = ALIGN(size, PAGE_SIZE); + ret = ttm_buffer_object_create(bdev, + aligned_size, + ttm_bo_type_kernel, + TTM_PL_FLAG_TT | + TTM_PL_FLAG_VRAM | + TTM_PL_FLAG_NO_EVICT, + 0, 0, 0, NULL, &fbo); + + if (unlikely(ret != 0)) { + DRM_ERROR("failed to allocate framebuffer.\n"); + return -ENOMEM; + } + + mutex_lock(&dev->struct_mutex); + fb = psb_framebuffer_create(dev, &mode_cmd, fbo); + if (!fb) { + DRM_ERROR("failed to allocate fb.\n"); + ret = -ENOMEM; + goto out_err0; + } + psbfb = to_psb_fb(fb); + psbfb->bo = fbo; + + list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); + info = framebuffer_alloc(sizeof(struct psbfb_par), device); + if (!info) { + ret = -ENOMEM; + goto out_err1; + } + + par = info->par; + par->psbfb = psbfb; + + strcpy(info->fix.id, "psbfb"); + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + info->fix.type_aux = 0; + info->fix.xpanstep = 1; /* doing it in hw */ + info->fix.ypanstep = 1; /* doing it in hw */ + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_I830; + info->fix.type_aux = 0; + + info->flags = FBINFO_DEFAULT; + + info->fbops = &psbfb_ops; + + info->fix.line_length = fb->pitch; + info->fix.smem_start = + dev->mode_config.fb_base + psbfb->bo->offset; + info->fix.smem_len = size; + + info->flags = FBINFO_DEFAULT; + + ret = ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap); + if (ret) { + DRM_ERROR("error mapping fb: %d\n", ret); + goto out_err2; + } + + + info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem); + info->screen_size = size; + + if (is_iomem) + memset_io(info->screen_base, 0, size); + else + memset(info->screen_base, 0, size); + + info->pseudo_palette = fb->pseudo_palette; + info->var.xres_virtual = fb->width; + info->var.yres_virtual = fb->height; + info->var.bits_per_pixel = fb->bits_per_pixel; + info->var.xoffset = 0; + info->var.yoffset = 0; + info->var.activate = FB_ACTIVATE_NOW; + info->var.height = -1; + info->var.width = -1; + + info->var.xres = fb_width; + info->var.yres = fb_height; + + info->fix.mmio_start = pci_resource_start(dev->pdev, 0); + info->fix.mmio_len = pci_resource_len(dev->pdev, 0); + + info->pixmap.size = 64 * 1024; + info->pixmap.buf_align = 8; + info->pixmap.access_align = 32; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; + + DRM_DEBUG("fb depth is %d\n", fb->depth); + DRM_DEBUG(" pitch is %d\n", fb->pitch); + fill_fb_bitfield(&info->var, fb->depth); + + fb->fbdev = info; + + par->dev = dev; + + /* To allow resizing without swapping buffers */ + printk(KERN_INFO"allocated %dx%d fb: 0x%08lx, bo %p\n", + psbfb->base.width, + psbfb->base.height, psbfb->bo->offset, psbfb->bo); + + if (psbfb_p) + *psbfb_p = psbfb; + + mutex_unlock(&dev->struct_mutex); + + return 0; +out_err2: + unregister_framebuffer(info); +out_err1: + fb->funcs->destroy(fb); +out_err0: + mutex_unlock(&dev->struct_mutex); + ttm_bo_unref(&fbo); + return ret; +} + +static int psbfb_multi_fb_probe_crtc(struct drm_device *dev, + struct drm_crtc *crtc) +{ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct drm_framebuffer *fb = crtc->fb; + struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); + struct drm_connector *connector; + struct fb_info *info; + struct psbfb_par *par; + struct drm_mode_set *modeset; + unsigned int width, height; + int new_fb = 0; + int ret, i, conn_count; + + if (!drm_helper_crtc_in_use(crtc)) + return 0; + + if (!crtc->desired_mode) + return 0; + + width = crtc->desired_mode->hdisplay; + height = crtc->desired_mode->vdisplay; + + /* is there an fb bound to this crtc already */ + if (!psb_intel_crtc->mode_set.fb) { + ret = + psbfb_create(dev, width, height, width, height, + &psbfb); + if (ret) + return -EINVAL; + new_fb = 1; + } else { + fb = psb_intel_crtc->mode_set.fb; + if ((fb->width < width) || (fb->height < height)) + return -EINVAL; + } + + info = fb->fbdev; + par = info->par; + + modeset = &psb_intel_crtc->mode_set; + modeset->fb = fb; + conn_count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + if (connector->encoder) + if (connector->encoder->crtc == modeset->crtc) { + modeset->connectors[conn_count] = + connector; + conn_count++; + if (conn_count > INTELFB_CONN_LIMIT) + BUG(); + } + } + + for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) + modeset->connectors[i] = NULL; + + par->crtc_ids[0] = crtc->base.id; + + modeset->num_connectors = conn_count; + if (modeset->mode != modeset->crtc->desired_mode) + modeset->mode = modeset->crtc->desired_mode; + + par->crtc_count = 1; + + if (new_fb) { + info->var.pixclock = -1; + if (register_framebuffer(info) < 0) + return -EINVAL; + } else + psbfb_set_par(info); + + printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + info->fix.id); + + /* Switch back to kernel console on panic */ + panic_mode = *modeset; + atomic_notifier_chain_register(&panic_notifier_list, &paniced); + printk(KERN_INFO "registered panic notifier\n"); + + return 0; +} + +static int psbfb_multi_fb_probe(struct drm_device *dev) +{ + + struct drm_crtc *crtc; + int ret = 0; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + ret = psbfb_multi_fb_probe_crtc(dev, crtc); + if (ret) + return ret; + } + return ret; +} + +static int psbfb_single_fb_probe(struct drm_device *dev) +{ + struct drm_crtc *crtc; + struct drm_connector *connector; + unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1; + unsigned int surface_width = 0, surface_height = 0; + int new_fb = 0; + int crtc_count = 0; + int ret, i, conn_count = 0; + struct fb_info *info; + struct psbfb_par *par; + struct drm_mode_set *modeset = NULL; + struct drm_framebuffer *fb = NULL; + struct psb_framebuffer *psbfb = NULL; + + /* first up get a count of crtcs now in use and + * new min/maxes width/heights */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (drm_helper_crtc_in_use(crtc)) { + if (crtc->desired_mode) { + fb = crtc->fb; + if (crtc->desired_mode->hdisplay < + fb_width) + fb_width = + crtc->desired_mode->hdisplay; + + if (crtc->desired_mode->vdisplay < + fb_height) + fb_height = + crtc->desired_mode->vdisplay; + + if (crtc->desired_mode->hdisplay > + surface_width) + surface_width = + crtc->desired_mode->hdisplay; + + if (crtc->desired_mode->vdisplay > + surface_height) + surface_height = + crtc->desired_mode->vdisplay; + + } + crtc_count++; + } + } + + if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { + /* hmm everyone went away - assume VGA cable just fell out + and will come back later. */ + return 0; + } + + /* do we have an fb already? */ + if (list_empty(&dev->mode_config.fb_kernel_list)) { + /* create an fb if we don't have one */ + ret = + psbfb_create(dev, fb_width, fb_height, surface_width, + surface_height, &psbfb); + if (ret) + return -EINVAL; + new_fb = 1; + fb = &psbfb->base; + } else { + fb = list_first_entry(&dev->mode_config.fb_kernel_list, + struct drm_framebuffer, filp_head); + + /* if someone hotplugs something bigger than we have already + * allocated, we are pwned. As really we can't resize an + * fbdev that is in the wild currently due to fbdev not really + * being designed for the lower layers moving stuff around + * under it. - so in the grand style of things - punt. */ + if ((fb->width < surface_width) + || (fb->height < surface_height)) { + DRM_ERROR + ("Framebuffer not large enough to scale" + " console onto.\n"); + return -EINVAL; + } + } + + info = fb->fbdev; + par = info->par; + + crtc_count = 0; + /* okay we need to setup new connector sets in the crtcs */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + modeset = &psb_intel_crtc->mode_set; + modeset->fb = fb; + conn_count = 0; + list_for_each_entry(connector, + &dev->mode_config.connector_list, + head) { + if (connector->encoder) + if (connector->encoder->crtc == + modeset->crtc) { + modeset->connectors[conn_count] = + connector; + conn_count++; + if (conn_count > + INTELFB_CONN_LIMIT) + BUG(); + } + } + + for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) + modeset->connectors[i] = NULL; + + par->crtc_ids[crtc_count++] = crtc->base.id; + + modeset->num_connectors = conn_count; + if (modeset->mode != modeset->crtc->desired_mode) + modeset->mode = modeset->crtc->desired_mode; + } + par->crtc_count = crtc_count; + + if (new_fb) { + info->var.pixclock = -1; + if (register_framebuffer(info) < 0) + return -EINVAL; + } else + psbfb_set_par(info); + + printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + info->fix.id); + + /* Switch back to kernel console on panic */ + panic_mode = *modeset; + atomic_notifier_chain_register(&panic_notifier_list, &paniced); + printk(KERN_INFO "registered panic notifier\n"); + + return 0; +} + +int psbfb_probe(struct drm_device *dev) +{ + int ret = 0; + + DRM_DEBUG("\n"); + + /* something has changed in the lower levels of hell - deal with it + here */ + + /* two modes : a) 1 fb to rule all crtcs. + b) one fb per crtc. + two actions 1) new connected device + 2) device removed. + case a/1 : if the fb surface isn't big enough - + resize the surface fb. + if the fb size isn't big enough - resize fb into surface. + if everything big enough configure the new crtc/etc. + case a/2 : undo the configuration + possibly resize down the fb to fit the new configuration. + case b/1 : see if it is on a new crtc - setup a new fb and add it. + case b/2 : teardown the new fb. + */ + + /* mode a first */ + /* search for an fb */ + if (0 /*i915_fbpercrtc == 1 */) + ret = psbfb_multi_fb_probe(dev); + else + ret = psbfb_single_fb_probe(dev); + + return ret; +} +EXPORT_SYMBOL(psbfb_probe); + +int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) +{ + struct fb_info *info; + struct psb_framebuffer *psbfb = to_psb_fb(fb); + + if (drm_psb_no_fb) + return 0; + + info = fb->fbdev; + + if (info) { + unregister_framebuffer(info); + ttm_bo_kunmap(&psbfb->kmap); + ttm_bo_unref(&psbfb->bo); + framebuffer_release(info); + } + + atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); + memset(&panic_mode, 0, sizeof(struct drm_mode_set)); + return 0; +} +EXPORT_SYMBOL(psbfb_remove); + +static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + /* JB: TODO currently we can't go from a bo to a handle with ttm */ + (void) file_priv; + *handle = 0; + return 0; +} + +static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct drm_device *dev = fb->dev; + if (fb->fbdev) + psbfb_remove(dev, fb); + + /* JB: TODO not drop, refcount buffer */ + drm_framebuffer_cleanup(fb); + + kfree(fb); +} + +static const struct drm_mode_config_funcs psb_mode_funcs = { + .fb_create = psb_user_framebuffer_create, + .fb_changed = psbfb_probe, +}; + +static int psb_create_backlight_property(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private; + struct drm_property *backlight; + + if (dev_priv->backlight_property) + return 0; + + backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE, "backlight", 2); + backlight->values[0] = 0; + backlight->values[1] = 100; + + dev_priv->backlight_property = backlight; + + return 0; +} + +static void psb_setup_outputs(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct drm_connector *connector; + + drm_mode_create_scaling_mode_property(dev); + + psb_create_backlight_property(dev); + + if (IS_MRST(dev)) { + if (dev_priv->iLVDS_enable) + /* Set up integrated LVDS for MRST */ + mrst_lvds_init(dev, &dev_priv->mode_dev); + else { + /* Set up integrated MIPI for MRST */ + mrst_dsi_init(dev, &dev_priv->mode_dev); + } + } else { + psb_intel_lvds_init(dev, &dev_priv->mode_dev); + psb_intel_sdvo_init(dev, SDVOB); + } + + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + struct psb_intel_output *psb_intel_output = + to_psb_intel_output(connector); + struct drm_encoder *encoder = &psb_intel_output->enc; + int crtc_mask = 0, clone_mask = 0; + + /* valid crtcs */ + switch (psb_intel_output->type) { + case INTEL_OUTPUT_SDVO: + crtc_mask = ((1 << 0) | (1 << 1)); + clone_mask = (1 << INTEL_OUTPUT_SDVO); + break; + case INTEL_OUTPUT_LVDS: + if (IS_MRST(dev)) + crtc_mask = (1 << 0); + else + crtc_mask = (1 << 1); + + clone_mask = (1 << INTEL_OUTPUT_LVDS); + break; + case INTEL_OUTPUT_MIPI: + crtc_mask = (1 << 0); + clone_mask = (1 << INTEL_OUTPUT_MIPI); + break; + } + encoder->possible_crtcs = crtc_mask; + encoder->possible_clones = + psb_intel_connector_clones(dev, clone_mask); + } +} + +static void *psb_bo_from_handle(struct drm_device *dev, + struct drm_file *file_priv, + unsigned int handle) +{ + return ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile, + handle); +} + +static size_t psb_bo_size(struct drm_device *dev, void *bof) +{ + struct ttm_buffer_object *bo = bof; + return bo->num_pages << PAGE_SHIFT; +} + +static size_t psb_bo_offset(struct drm_device *dev, void *bof) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct ttm_buffer_object *bo = bof; + + size_t offset = bo->offset - dev_priv->pg->gatt_start; + DRM_DEBUG("Offset %u\n", offset); + return offset; +} + +static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo) +{ +#if 0 /* JB: Not used for the drop */ + struct ttm_buffer_object *bo = bof; + We should do things like check if + the buffer is in a scanout : able + place.And make sure that its pinned. +#endif + return 0; + } + + static int psb_bo_unpin_for_scanout(struct drm_device *dev, + void *bo) { +#if 0 /* JB: Not used for the drop */ + struct ttm_buffer_object *bo = bof; +#endif + return 0; + } + + void psb_modeset_init(struct drm_device *dev) + { + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; + int i; + int num_pipe; + + /* Init mm functions */ + mode_dev->bo_from_handle = psb_bo_from_handle; + mode_dev->bo_size = psb_bo_size; + mode_dev->bo_offset = psb_bo_offset; + mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout; + mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout; + + drm_mode_config_init(dev); + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + + dev->mode_config.funcs = (void *) &psb_mode_funcs; + + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + + /* set memory base */ + /* MRST and PSB should use BAR 2*/ + dev->mode_config.fb_base = + pci_resource_start(dev->pdev, 2); + + if (IS_MRST(dev)) + num_pipe = 1; + else + num_pipe = 2; + + + for (i = 0; i < num_pipe; i++) + psb_intel_crtc_init(dev, i, mode_dev); + + psb_setup_outputs(dev); + + /* setup fbs */ + /* drm_initial_config(dev); */ + } + + void psb_modeset_cleanup(struct drm_device *dev) + { + drm_mode_config_cleanup(dev); + } diff --git a/drivers/gpu/drm/psb/psb_fb.h b/drivers/gpu/drm/psb/psb_fb.h new file mode 100644 index 0000000..aa0b23c --- /dev/null +++ b/drivers/gpu/drm/psb/psb_fb.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2008, Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Eric Anholt + * + **/ + +#ifndef _PSB_FB_H_ +#define _PSB_FB_H_ + +struct psb_framebuffer { + struct drm_framebuffer base; + struct address_space *addr_space; + struct ttm_buffer_object *bo; + struct ttm_bo_kmap_obj kmap; + uint64_t offset; +}; + +#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base) + + +extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask); + +extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t); + +#endif + diff --git a/drivers/gpu/drm/psb/psb_fence.c b/drivers/gpu/drm/psb/psb_fence.c new file mode 100644 index 0000000..b8c64b0 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_fence.c @@ -0,0 +1,359 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include +#include "psb_drv.h" +#include "psb_msvdx.h" +#include "lnc_topaz.h" + +static void psb_print_ta_fence_status(struct ttm_fence_device *fdev) +{ + struct drm_psb_private *dev_priv = + container_of(fdev, struct drm_psb_private, fdev); + struct psb_scheduler_seq *seq = dev_priv->scheduler.seq; + int i; + + for (i=0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) { + DRM_INFO("Type 0x%02x, sequence %lu, reported %d\n", + (1 << i), + (unsigned long) seq->sequence, + seq->reported); + seq++; + } +} + +static void psb_poll_ta(struct ttm_fence_device *fdev, + uint32_t waiting_types) +{ + struct drm_psb_private *dev_priv = + container_of(fdev, struct drm_psb_private, fdev); + uint32_t cur_flag = 1; + uint32_t flags = 0; + uint32_t sequence = 0; + uint32_t remaining = 0xFFFFFFFF; + uint32_t diff; + + struct psb_scheduler *scheduler; + struct psb_scheduler_seq *seq; + struct ttm_fence_class_manager *fc = + &fdev->fence_class[PSB_ENGINE_TA]; + + scheduler = &dev_priv->scheduler; + seq = scheduler->seq; + + while (likely(waiting_types & remaining)) { + if (!(waiting_types & cur_flag)) + goto skip; + if (seq->reported) + goto skip; + if (flags == 0) + sequence = seq->sequence; + else if (sequence != seq->sequence) { + ttm_fence_handler(fdev, PSB_ENGINE_TA, + sequence, flags, 0); + sequence = seq->sequence; + flags = 0; + } + flags |= cur_flag; + + /* + * Sequence may not have ended up on the ring yet. + * In that case, report it but don't mark it as + * reported. A subsequent poll will report it again. + */ + + diff = (fc->latest_queued_sequence - sequence) & + fc->sequence_mask; + if (diff < fc->wrap_diff) + seq->reported = 1; + +skip: + cur_flag <<= 1; + remaining <<= 1; + seq++; + } + + if (flags) + ttm_fence_handler(fdev, PSB_ENGINE_TA, sequence, flags, 0); + +} + +static void psb_poll_other(struct ttm_fence_device *fdev, + uint32_t fence_class, uint32_t waiting_types) +{ + struct drm_psb_private *dev_priv = + container_of(fdev, struct drm_psb_private, fdev); + struct ttm_fence_class_manager *fc = + &fdev->fence_class[fence_class]; + uint32_t sequence; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + if (unlikely(!dev_priv)) + return; + + if (waiting_types) { + switch (fence_class) { + case PSB_ENGINE_VIDEO: + sequence = msvdx_priv->msvdx_current_sequence; + break; + case LNC_ENGINE_ENCODE: + sequence = *((uint32_t *)topaz_priv->topaz_sync_addr); + break; + default: + sequence = dev_priv->comm[fence_class << 4]; + break; + } + + ttm_fence_handler(fdev, fence_class, sequence, + _PSB_FENCE_TYPE_EXE, 0); + + switch (fence_class) { + case PSB_ENGINE_2D: + if (dev_priv->fence0_irq_on && !fc->waiting_types) { + psb_2D_irq_off(dev_priv); + dev_priv->fence0_irq_on = 0; + } else if (!dev_priv->fence0_irq_on + && fc->waiting_types) { + psb_2D_irq_on(dev_priv); + dev_priv->fence0_irq_on = 1; + } + break; +#if 0 + /* + * FIXME: MSVDX irq switching + */ + + case PSB_ENGINE_VIDEO: + if (dev_priv->fence2_irq_on && !fc->waiting_types) { + psb_msvdx_irq_off(dev_priv); + dev_priv->fence2_irq_on = 0; + } else if (!dev_priv->fence2_irq_on + && fc->pending_exe_flush) { + psb_msvdx_irq_on(dev_priv); + dev_priv->fence2_irq_on = 1; + } + break; +#endif + default: + return; + } + } +} + +static void psb_fence_poll(struct ttm_fence_device *fdev, + uint32_t fence_class, uint32_t waiting_types) +{ + if (unlikely((PSB_D_PM & drm_psb_debug) && (fence_class == 0))) + PSB_DEBUG_PM("psb_fence_poll: %d\n", fence_class); + switch (fence_class) { + case PSB_ENGINE_TA: + psb_poll_ta(fdev, waiting_types); + break; + default: + psb_poll_other(fdev, fence_class, waiting_types); + break; + } +} + +void psb_fence_error(struct drm_device *dev, + uint32_t fence_class, + uint32_t sequence, uint32_t type, int error) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + struct ttm_fence_device *fdev = &dev_priv->fdev; + unsigned long irq_flags; + struct ttm_fence_class_manager *fc = + &fdev->fence_class[fence_class]; + + BUG_ON(fence_class >= PSB_NUM_ENGINES); + write_lock_irqsave(&fc->lock, irq_flags); + ttm_fence_handler(fdev, fence_class, sequence, type, error); + write_unlock_irqrestore(&fc->lock, irq_flags); +} + +int psb_fence_emit_sequence(struct ttm_fence_device *fdev, + uint32_t fence_class, + uint32_t flags, uint32_t *sequence, + unsigned long *timeout_jiffies) +{ + struct drm_psb_private *dev_priv = + container_of(fdev, struct drm_psb_private, fdev); + uint32_t seq = 0; + int ret; + + if (!dev_priv) + return -EINVAL; + + if (fence_class >= PSB_NUM_ENGINES) + return -EINVAL; + + switch (fence_class) { + case PSB_ENGINE_2D: + spin_lock(&dev_priv->sequence_lock); + seq = ++dev_priv->sequence[fence_class]; + spin_unlock(&dev_priv->sequence_lock); + ret = psb_blit_sequence(dev_priv, seq); + if (ret) + return ret; + break; + case PSB_ENGINE_VIDEO: + spin_lock(&dev_priv->sequence_lock); + seq = dev_priv->sequence[fence_class]++; + spin_unlock(&dev_priv->sequence_lock); + break; + case LNC_ENGINE_ENCODE: + spin_lock(&dev_priv->sequence_lock); + seq = dev_priv->sequence[fence_class]++; + spin_unlock(&dev_priv->sequence_lock); + break; + default: + spin_lock(&dev_priv->sequence_lock); + seq = dev_priv->sequence[fence_class]; + spin_unlock(&dev_priv->sequence_lock); + } + + *sequence = seq; + + if (fence_class == PSB_ENGINE_TA) + *timeout_jiffies = jiffies + DRM_HZ / 2; + else + *timeout_jiffies = jiffies + DRM_HZ * 3; + + return 0; +} + +uint32_t psb_fence_advance_sequence(struct drm_device *dev, + uint32_t fence_class) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + uint32_t sequence; + + spin_lock(&dev_priv->sequence_lock); + sequence = ++dev_priv->sequence[fence_class]; + spin_unlock(&dev_priv->sequence_lock); + + return sequence; +} + +static void psb_fence_lockup(struct ttm_fence_object *fence, + uint32_t fence_types) +{ + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + + if (fence->fence_class == PSB_ENGINE_TA) { + + /* + * The 3D engine has its own lockup detection. + * Just extend the fence expiry time. + */ + + DRM_INFO("Extending 3D fence timeout.\n"); + write_lock(&fc->lock); + + DRM_INFO("Sequence %lu, types 0x%08x signaled 0x%08x\n", + (unsigned long) fence->sequence, fence_types, + fence->info.signaled_types); + + if (time_after_eq(jiffies, fence->timeout_jiffies)) + fence->timeout_jiffies = jiffies + DRM_HZ / 2; + + psb_print_ta_fence_status(fence->fdev); + write_unlock(&fc->lock); + } else if (fence->fence_class == LNC_ENGINE_ENCODE) { + DRM_ERROR + ("TOPAZ timeout (probable lockup) detected on engine %u " + "fence type 0x%08x\n", + (unsigned int) fence->fence_class, + (unsigned int) fence_types); + + write_lock(&fc->lock); + lnc_topaz_handle_timeout(fence->fdev); + ttm_fence_handler(fence->fdev, fence->fence_class, + fence->sequence, fence_types, -EBUSY); + write_unlock(&fc->lock); + } else { + DRM_ERROR + ("GPU timeout (probable lockup) detected on engine %u " + "fence type 0x%08x\n", + (unsigned int) fence->fence_class, + (unsigned int) fence_types); + write_lock(&fc->lock); + ttm_fence_handler(fence->fdev, fence->fence_class, + fence->sequence, fence_types, -EBUSY); + write_unlock(&fc->lock); + } +} + +void psb_fence_handler(struct drm_device *dev, uint32_t fence_class) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + struct ttm_fence_device *fdev = &dev_priv->fdev; + struct ttm_fence_class_manager *fc = + &fdev->fence_class[fence_class]; + unsigned long irq_flags; + +#ifdef FIX_TG_16 + if (fence_class == PSB_ENGINE_2D) { + + if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) && + (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) && + ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & + _PSB_C2B_STATUS_BUSY) == 0)) + psb_resume_ta_2d_idle(dev_priv); + } +#endif + write_lock_irqsave(&fc->lock, irq_flags); + psb_fence_poll(fdev, fence_class, fc->waiting_types); + write_unlock_irqrestore(&fc->lock, irq_flags); +} + + +static struct ttm_fence_driver psb_ttm_fence_driver = { + .has_irq = NULL, + .emit = psb_fence_emit_sequence, + .flush = NULL, + .poll = psb_fence_poll, + .needed_flush = NULL, + .wait = NULL, + .signaled = NULL, + .lockup = psb_fence_lockup, +}; + +int psb_ttm_fence_device_init(struct ttm_fence_device *fdev) +{ + struct drm_psb_private *dev_priv = + container_of(fdev, struct drm_psb_private, fdev); + struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30), + .flush_diff = (1 << 29), + .sequence_mask = 0xFFFFFFFF + }; + + return ttm_fence_device_init(PSB_NUM_ENGINES, + dev_priv->mem_global_ref.object, + fdev, &fci, 1, + &psb_ttm_fence_driver); +} diff --git a/drivers/gpu/drm/psb/psb_gtt.c b/drivers/gpu/drm/psb/psb_gtt.c new file mode 100644 index 0000000..7cb5a3d --- /dev/null +++ b/drivers/gpu/drm/psb/psb_gtt.c @@ -0,0 +1,278 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +#include +#include "psb_drv.h" + +static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) +{ + uint32_t mask = PSB_PTE_VALID; + + if (type & PSB_MMU_CACHED_MEMORY) + mask |= PSB_PTE_CACHED; + if (type & PSB_MMU_RO_MEMORY) + mask |= PSB_PTE_RO; + if (type & PSB_MMU_WO_MEMORY) + mask |= PSB_PTE_WO; + + return (pfn << PAGE_SHIFT) | mask; +} + +struct psb_gtt *psb_gtt_alloc(struct drm_device *dev) +{ + struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + + if (!tmp) + return NULL; + + init_rwsem(&tmp->sem); + tmp->dev = dev; + + return tmp; +} + +void psb_gtt_takedown(struct psb_gtt *pg, int free) +{ + struct drm_psb_private *dev_priv = pg->dev->dev_private; + + if (!pg) + return; + + if (pg->gtt_map) { + iounmap(pg->gtt_map); + pg->gtt_map = NULL; + } + if (pg->initialized) { + pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL, + pg->gmch_ctrl); + PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL); + (void) PSB_RVDC32(PSB_PGETBL_CTL); + } + if (free) + kfree(pg); +} + +int psb_gtt_init(struct psb_gtt *pg, int resume) +{ + struct drm_device *dev = pg->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + unsigned gtt_pages; + unsigned long stolen_size, vram_stolen_size, ci_stolen_size; + unsigned long rar_stolen_size; + unsigned i, num_pages; + unsigned pfn_base; + uint32_t vram_pages; + + int ret = 0; + uint32_t pte; + + pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl); + pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, + pg->gmch_ctrl | _PSB_GMCH_ENABLED); + + pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL); + PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); + (void) PSB_RVDC32(PSB_PGETBL_CTL); + + pg->initialized = 1; + + pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK; + + pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE); + pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE); + gtt_pages = + pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT; + pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE) + >> PAGE_SHIFT; + + pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base); + vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE; + + /* CI is not included in the stolen size since the TOPAZ MMU bug */ + ci_stolen_size = dev_priv->ci_region_size; + /* add CI & RAR share buffer space to stolen_size */ + /* stolen_size = vram_stolen_size + ci_stolen_size; */ + stolen_size = vram_stolen_size; + + rar_stolen_size = dev_priv->rar_region_size; + stolen_size += rar_stolen_size; + + PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start); + PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start); + PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start); + PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages); + PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024); + + if (resume && (gtt_pages != pg->gtt_pages) && + (stolen_size != pg->stolen_size)) { + DRM_ERROR("GTT resume error.\n"); + ret = -EINVAL; + goto out_err; + } + + pg->gtt_pages = gtt_pages; + pg->stolen_size = stolen_size; + pg->vram_stolen_size = vram_stolen_size; + pg->ci_stolen_size = ci_stolen_size; + pg->rar_stolen_size = rar_stolen_size; + pg->gtt_map = + ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT); + if (!pg->gtt_map) { + DRM_ERROR("Failure to map gtt.\n"); + ret = -ENOMEM; + goto out_err; + } + + /* + * insert vram stolen pages. + */ + + pfn_base = pg->stolen_base >> PAGE_SHIFT; + vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT; + PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n", + num_pages, pfn_base); + for (i = 0; i < num_pages; ++i) { + pte = psb_gtt_mask_pte(pfn_base + i, 0); + iowrite32(pte, pg->gtt_map + i); + } +#if 0 + /* + * insert CI stolen pages + */ + + pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT; + num_pages = ci_stolen_size >> PAGE_SHIFT; + PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n", + num_pages, pfn_base); + for (; i < num_pages; ++i) { + pte = psb_gtt_mask_pte(pfn_base + i, 0); + iowrite32(pte, pg->gtt_map + i); + } +#endif + + /* + * insert RAR stolen pages + */ + if (rar_stolen_size != 0) { + pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT; + num_pages = rar_stolen_size >> PAGE_SHIFT; + PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n", + num_pages, pfn_base); + for (; i < num_pages + vram_pages ; ++i) { + pte = psb_gtt_mask_pte(pfn_base + i - vram_pages, 0); + iowrite32(pte, pg->gtt_map + i); + } + } + /* + * Init rest of gtt. + */ + + pfn_base = page_to_pfn(dev_priv->scratch_page); + pte = psb_gtt_mask_pte(pfn_base, 0); + PSB_DEBUG_INIT("Initializing the rest of a total " + "of %d gtt pages.\n", pg->gatt_pages); + + for (; i < pg->gatt_pages; ++i) + iowrite32(pte, pg->gtt_map + i); + (void) ioread32(pg->gtt_map + i - 1); + + return 0; + +out_err: + psb_gtt_takedown(pg, 0); + return ret; +} + +int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages, + unsigned offset_pages, unsigned num_pages, + unsigned desired_tile_stride, + unsigned hw_tile_stride, int type) +{ + unsigned rows = 1; + unsigned add; + unsigned row_add; + unsigned i; + unsigned j; + uint32_t *cur_page = NULL; + uint32_t pte; + + if (hw_tile_stride) + rows = num_pages / desired_tile_stride; + else + desired_tile_stride = num_pages; + + add = desired_tile_stride; + row_add = hw_tile_stride; + + down_read(&pg->sem); + for (i = 0; i < rows; ++i) { + cur_page = pg->gtt_map + offset_pages; + for (j = 0; j < desired_tile_stride; ++j) { + pte = + psb_gtt_mask_pte(page_to_pfn(*pages++), type); + iowrite32(pte, cur_page++); + } + offset_pages += add; + } + (void) ioread32(cur_page - 1); + up_read(&pg->sem); + + return 0; +} + +int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages, + unsigned num_pages, unsigned desired_tile_stride, + unsigned hw_tile_stride) +{ + struct drm_psb_private *dev_priv = pg->dev->dev_private; + unsigned rows = 1; + unsigned add; + unsigned row_add; + unsigned i; + unsigned j; + uint32_t *cur_page = NULL; + unsigned pfn_base = page_to_pfn(dev_priv->scratch_page); + uint32_t pte = psb_gtt_mask_pte(pfn_base, 0); + + if (hw_tile_stride) + rows = num_pages / desired_tile_stride; + else + desired_tile_stride = num_pages; + + add = desired_tile_stride; + row_add = hw_tile_stride; + + down_read(&pg->sem); + for (i = 0; i < rows; ++i) { + cur_page = pg->gtt_map + offset_pages; + for (j = 0; j < desired_tile_stride; ++j) + iowrite32(pte, cur_page++); + + offset_pages += add; + } + (void) ioread32(cur_page - 1); + up_read(&pg->sem); + + return 0; +} diff --git a/drivers/gpu/drm/psb/psb_hotplug.c b/drivers/gpu/drm/psb/psb_hotplug.c new file mode 100644 index 0000000..38e1f35 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_hotplug.c @@ -0,0 +1,427 @@ +/* + * Copyright © 2009 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * James C. Gualario + * + */ +#include "psb_umevents.h" +#include "psb_hotplug.h" +/** + * inform the kernel of the work to be performed and related function. + * + */ +DECLARE_WORK(hotplug_dev_create_work, &psb_hotplug_dev_create_wq); +DECLARE_WORK(hotplug_dev_remove_work, &psb_hotplug_dev_remove_wq); +DECLARE_WORK(hotplug_dev_change_work, &psb_hotplug_dev_change_wq); +/** + * psb_hotplug_notify_change_um - notify user mode of hotplug changes + * + * @name: name of event to notify user mode of change to + * @state: hotplug state to search for event object in + * + */ +int psb_hotplug_notify_change_um(const char *name, + struct hotplug_state *state) +{ + strcpy(&(state->hotplug_change_wq_data.dev_name_arry + [state->hotplug_change_wq_data.dev_name_write][0]), name); + state->hotplug_change_wq_data.dev_name_arry_rw_status + [state->hotplug_change_wq_data.dev_name_write] = + DRM_HOTPLUG_READY_TO_READ; + if (state->hotplug_change_wq_data.dev_name_read_write_wrap_ack == 1) + state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0; + state->hotplug_change_wq_data.dev_name_write++; + if (state->hotplug_change_wq_data.dev_name_write == + state->hotplug_change_wq_data.dev_name_read) { + state->hotplug_change_wq_data.dev_name_write--; + return IRQ_NONE; + } + if (state->hotplug_change_wq_data.dev_name_write > + DRM_HOTPLUG_RING_DEPTH_MAX) { + state->hotplug_change_wq_data.dev_name_write = 0; + state->hotplug_change_wq_data.dev_name_write_wrap = 1; + } + state->hotplug_change_wq_data.hotplug_dev_list = state->list; + queue_work(state->hotplug_wq, &(state->hotplug_change_wq_data.work)); + return IRQ_HANDLED; +} +/** + * + * psb_hotplug_create_and_notify_um - create and notify user mode of new dev + * + * @name: name to give for new event / device + * @state: hotplug state to track new event /device in + * + */ +int psb_hotplug_create_and_notify_um(const char *name, + struct hotplug_state *state) +{ + strcpy(&(state->hotplug_create_wq_data.dev_name_arry + [state->hotplug_create_wq_data.dev_name_write][0]), name); + state->hotplug_create_wq_data.dev_name_arry_rw_status + [state->hotplug_create_wq_data.dev_name_write] = + DRM_HOTPLUG_READY_TO_READ; + if (state->hotplug_create_wq_data.dev_name_read_write_wrap_ack == 1) + state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0; + state->hotplug_create_wq_data.dev_name_write++; + if (state->hotplug_create_wq_data.dev_name_write == + state->hotplug_create_wq_data.dev_name_read) { + state->hotplug_create_wq_data.dev_name_write--; + return IRQ_NONE; + } + if (state->hotplug_create_wq_data.dev_name_write > + DRM_HOTPLUG_RING_DEPTH_MAX) { + state->hotplug_create_wq_data.dev_name_write = 0; + state->hotplug_create_wq_data.dev_name_write_wrap = 1; + } + state->hotplug_create_wq_data.hotplug_dev_list = state->list; + queue_work(state->hotplug_wq, &(state->hotplug_create_wq_data.work)); + return IRQ_HANDLED; +} +EXPORT_SYMBOL(psb_hotplug_create_and_notify_um); +/** + * psb_hotplug_remove_and_notify_um - remove device and notify user mode + * + * @name: name of event / device to remove + * @state: hotplug state to remove event / device from + * + */ +int psb_hotplug_remove_and_notify_um(const char *name, + struct hotplug_state *state) +{ + strcpy(&(state->hotplug_remove_wq_data.dev_name_arry + [state->hotplug_remove_wq_data.dev_name_write][0]), name); + state->hotplug_remove_wq_data.dev_name_arry_rw_status + [state->hotplug_remove_wq_data.dev_name_write] = + DRM_HOTPLUG_READY_TO_READ; + if (state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack == 1) + state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0; + state->hotplug_remove_wq_data.dev_name_write++; + if (state->hotplug_remove_wq_data.dev_name_write == + state->hotplug_remove_wq_data.dev_name_read) { + state->hotplug_remove_wq_data.dev_name_write--; + return IRQ_NONE; + } + if (state->hotplug_remove_wq_data.dev_name_write > + DRM_HOTPLUG_RING_DEPTH_MAX) { + state->hotplug_remove_wq_data.dev_name_write = 0; + state->hotplug_remove_wq_data.dev_name_write_wrap = 1; + } + state->hotplug_remove_wq_data.hotplug_dev_list = state->list; + queue_work(state->hotplug_wq, &(state->hotplug_remove_wq_data.work)); + return IRQ_HANDLED; +} +EXPORT_SYMBOL(psb_hotplug_remove_and_notify_um); +/** + * psb_hotplug_device_pool_create_and_init - make new hotplug device pool + * + * @parent_kobj: parent kobject to associate hotplug kset with + * @state: hotplug state to assocaite workqueues with + * + */ +struct umevent_list *psb_hotplug_device_pool_create_and_init( + struct kobject *parent_kobj, + struct hotplug_state *state) +{ + struct umevent_list *new_hotplug_dev_list = NULL; + + new_hotplug_dev_list = psb_umevent_create_list(); + if (new_hotplug_dev_list) + psb_umevent_init(parent_kobj, new_hotplug_dev_list, + "psb_hotplug"); + + state->hotplug_wq = create_singlethread_workqueue("hotplug-wq"); + if (!state->hotplug_wq) + return NULL; + + INIT_WORK(&state->hotplug_create_wq_data.work, + psb_hotplug_dev_create_wq); + INIT_WORK(&state->hotplug_remove_wq_data.work, + psb_hotplug_dev_remove_wq); + INIT_WORK(&state->hotplug_change_wq_data.work, + psb_hotplug_dev_change_wq); + + state->hotplug_create_wq_data.dev_name_read = 0; + state->hotplug_create_wq_data.dev_name_write = 0; + state->hotplug_create_wq_data.dev_name_write_wrap = 0; + state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0; + memset(&(state->hotplug_create_wq_data.dev_name_arry_rw_status[0]), + 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH); + + state->hotplug_remove_wq_data.dev_name_read = 0; + state->hotplug_remove_wq_data.dev_name_write = 0; + state->hotplug_remove_wq_data.dev_name_write_wrap = 0; + state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0; + memset(&(state->hotplug_remove_wq_data.dev_name_arry_rw_status[0]), + 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH); + + state->hotplug_change_wq_data.dev_name_read = 0; + state->hotplug_change_wq_data.dev_name_write = 0; + state->hotplug_change_wq_data.dev_name_write_wrap = 0; + state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0; + memset(&(state->hotplug_change_wq_data.dev_name_arry_rw_status[0]), + 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH); + + return new_hotplug_dev_list; +} +EXPORT_SYMBOL(psb_hotplug_device_pool_create_and_init); +/** + * + * psb_hotplug_init - init hotplug subsystem + * + * @parent_kobj: parent kobject to associate hotplug state with + * + */ +struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj) +{ + struct hotplug_state *state; + state = kzalloc(sizeof(struct hotplug_state), GFP_KERNEL); + state->list = NULL; + state->list = psb_hotplug_device_pool_create_and_init( + parent_kobj, + state); + return state; +} +/** + * psb_hotplug_device_pool_destroy - destroy all hotplug related resources + * + * @state: hotplug state to destroy + * + */ +void psb_hotplug_device_pool_destroy(struct hotplug_state *state) +{ + flush_workqueue(state->hotplug_wq); + destroy_workqueue(state->hotplug_wq); + psb_umevent_cleanup(state->list); + kfree(state); +} +EXPORT_SYMBOL(psb_hotplug_device_pool_destroy); +/** + * psb_hotplug_dev_create_wq - create workqueue implementation + * + * @work: work struct to use for kernel scheduling + * + */ +void psb_hotplug_dev_create_wq(struct work_struct *work) +{ + struct hotplug_disp_workqueue_data *wq_data; + struct umevent_obj *wq_working_hotplug_disp_obj; + wq_data = to_hotplug_disp_workqueue_data(work); + if (wq_data->dev_name_write_wrap == 1) { + wq_data->dev_name_read_write_wrap_ack = 1; + wq_data->dev_name_write_wrap = 0; + while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + wq_working_hotplug_disp_obj = + psb_create_umevent_obj( + &wq_data->dev_name_arry + [wq_data->dev_name_read][0], + wq_data->hotplug_dev_list); + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + psb_umevent_notify + (wq_working_hotplug_disp_obj); + } + wq_data->dev_name_read++; + } + wq_data->dev_name_read = 0; + while (wq_data->dev_name_read < wq_data->dev_name_write-1) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + wq_working_hotplug_disp_obj = + psb_create_umevent_obj( + &wq_data->dev_name_arry + [wq_data->dev_name_read][0], + wq_data->hotplug_dev_list); + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + psb_umevent_notify + (wq_working_hotplug_disp_obj); + } + wq_data->dev_name_read++; + } + } else { + while (wq_data->dev_name_read < wq_data->dev_name_write) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + wq_working_hotplug_disp_obj = + psb_create_umevent_obj( + &wq_data->dev_name_arry + [wq_data->dev_name_read][0], + wq_data->hotplug_dev_list); + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + psb_umevent_notify + (wq_working_hotplug_disp_obj); + } + wq_data->dev_name_read++; + } + } + if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX) + wq_data->dev_name_read = 0; +} +EXPORT_SYMBOL(psb_hotplug_dev_create_wq); +/** + * psb_hotplug_dev_remove_wq - remove workqueue implementation + * + * @work: work struct to use for kernel scheduling + * + */ +void psb_hotplug_dev_remove_wq(struct work_struct *work) +{ + struct hotplug_disp_workqueue_data *wq_data; + wq_data = to_hotplug_disp_workqueue_data(work); + if (wq_data->dev_name_write_wrap == 1) { + wq_data->dev_name_read_write_wrap_ack = 1; + wq_data->dev_name_write_wrap = 0; + while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + psb_umevent_remove_from_list( + wq_data->hotplug_dev_list, + &wq_data->dev_name_arry + [wq_data->dev_name_read][0]); + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + } + wq_data->dev_name_read++; + } + wq_data->dev_name_read = 0; + while (wq_data->dev_name_read < wq_data->dev_name_write-1) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + psb_umevent_remove_from_list( + wq_data->hotplug_dev_list, + &wq_data->dev_name_arry + [wq_data->dev_name_read][0]); + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + } + wq_data->dev_name_read++; + } + } else { + while (wq_data->dev_name_read < wq_data->dev_name_write) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + psb_umevent_remove_from_list( + wq_data->hotplug_dev_list, + &wq_data->dev_name_arry + [wq_data->dev_name_read][0]); + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + } + wq_data->dev_name_read++; + } + } + if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX) + wq_data->dev_name_read = 0; +} +EXPORT_SYMBOL(psb_hotplug_dev_remove_wq); +/** + * psb_hotplug_dev_change_wq - change workqueue implementation + * + * @work: work struct to use for kernel scheduling + * + */ +void psb_hotplug_dev_change_wq(struct work_struct *work) +{ + struct hotplug_disp_workqueue_data *wq_data; + struct umevent_obj *wq_working_hotplug_disp_obj; + wq_data = to_hotplug_disp_workqueue_data(work); + if (wq_data->dev_name_write_wrap == 1) { + wq_data->dev_name_read_write_wrap_ack = 1; + wq_data->dev_name_write_wrap = 0; + while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + + wq_working_hotplug_disp_obj = + psb_umevent_find_obj( + &wq_data->dev_name_arry + [wq_data->dev_name_read][0], + wq_data->hotplug_dev_list); + psb_umevent_notify_change_gfxsock + (wq_working_hotplug_disp_obj); + } + wq_data->dev_name_read++; + } + wq_data->dev_name_read = 0; + while (wq_data->dev_name_read < wq_data->dev_name_write-1) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + + wq_working_hotplug_disp_obj = + psb_umevent_find_obj( + &wq_data->dev_name_arry + [wq_data->dev_name_read][0], + wq_data->hotplug_dev_list); + psb_umevent_notify_change_gfxsock + (wq_working_hotplug_disp_obj); + } + wq_data->dev_name_read++; + } + } else { + while (wq_data->dev_name_read < wq_data->dev_name_write) { + if (wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] == + DRM_HOTPLUG_READY_TO_READ) { + wq_data->dev_name_arry_rw_status + [wq_data->dev_name_read] = + DRM_HOTPLUG_READ_COMPLETE; + + wq_working_hotplug_disp_obj = + psb_umevent_find_obj( + &wq_data->dev_name_arry + [wq_data->dev_name_read][0], + wq_data->hotplug_dev_list); + psb_umevent_notify_change_gfxsock + (wq_working_hotplug_disp_obj); + } + wq_data->dev_name_read++; + } + } + if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX) + wq_data->dev_name_read = 0; +} +EXPORT_SYMBOL(psb_hotplug_dev_change_wq); diff --git a/drivers/gpu/drm/psb/psb_hotplug.h b/drivers/gpu/drm/psb/psb_hotplug.h new file mode 100644 index 0000000..8a63efc --- /dev/null +++ b/drivers/gpu/drm/psb/psb_hotplug.h @@ -0,0 +1,96 @@ +/* + * Copyright © 2009 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * James C. Gualario + * + */ +#ifndef _PSB_HOTPLUG_H_ +#define _PSB_HOTPLUG_H_ +/** + * required includes + * + */ +#include "psb_umevents.h" +/** + * hotplug specific defines + * + */ +#define DRM_HOTPLUG_RING_DEPTH 256 +#define DRM_HOTPLUG_RING_DEPTH_MAX (DRM_HOTPLUG_RING_DEPTH-1) +#define DRM_HOTPLUG_READY_TO_READ 1 +#define DRM_HOTPLUG_READ_COMPLETE 2 +/** + * hotplug workqueue data struct. + */ +struct hotplug_disp_workqueue_data { + struct work_struct work; + const char *dev_name; + int dev_name_write; + int dev_name_read; + int dev_name_write_wrap; + int dev_name_read_write_wrap_ack; + char dev_name_arry[DRM_HOTPLUG_RING_DEPTH][24]; + int dev_name_arry_rw_status[DRM_HOTPLUG_RING_DEPTH]; + struct umevent_list *hotplug_dev_list; +}; +/** + * hotplug state structure + * + */ +struct hotplug_state { + struct workqueue_struct *hotplug_wq; + struct hotplug_disp_workqueue_data hotplug_remove_wq_data; + struct hotplug_disp_workqueue_data hotplug_create_wq_data; + struct hotplug_disp_workqueue_data hotplug_change_wq_data; + struct umevent_list *list; +}; +/** + * main interface function prototytpes for hotplug support. + * + */ +struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj); +extern int psb_hotplug_notify_change_um(const char *name, + struct hotplug_state *state); +extern int psb_hotplug_create_and_notify_um(const char *name, + struct hotplug_state *state); +extern int psb_hotplug_remove_and_notify_um(const char *name, + struct hotplug_state *state); +extern struct umevent_list *psb_hotplug_device_pool_create_and_init( + struct kobject *parent_kobj, + struct hotplug_state *state); +extern void psb_hotplug_device_pool_destroy(struct hotplug_state *state); +/** + * to go back and forth between work strauct and workqueue data + * + */ +#define to_hotplug_disp_workqueue_data(x) \ + container_of(x, struct hotplug_disp_workqueue_data, work) + +/** + * function prototypes for workqueue implementation + * + */ +extern void psb_hotplug_dev_create_wq(struct work_struct *work); +extern void psb_hotplug_dev_remove_wq(struct work_struct *work); +extern void psb_hotplug_dev_change_wq(struct work_struct *work); +#endif diff --git a/drivers/gpu/drm/psb/psb_intel_bios.c b/drivers/gpu/drm/psb/psb_intel_bios.c new file mode 100644 index 0000000..02e4e27 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_bios.c @@ -0,0 +1,309 @@ +/* + * Copyright © 2006 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ +#include "drmP.h" +#include "drm.h" +#include "psb_drm.h" +#include "psb_drv.h" +#include "psb_intel_drv.h" +#include "psb_intel_reg.h" +#include "psb_intel_bios.h" + + +static void * find_section(struct bdb_header *bdb, int section_id) +{ + u8 *base = (u8 *)bdb; + int index = 0; + u16 total, current_size; + u8 current_id; + + /* skip to first section */ + index += bdb->header_size; + total = bdb->bdb_size; + + /* walk the sections looking for section_id */ + while (index < total) { + current_id = *(base + index); + index++; + current_size = *((u16 *)(base + index)); + index += 2; + if (current_id == section_id) + return base + index; + index += current_size; + } + + return NULL; +} + +static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, + struct lvds_dvo_timing *dvo_timing) +{ + panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | + dvo_timing->hactive_lo; + panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + + ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); + panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + + dvo_timing->hsync_pulse_width; + panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + + ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); + + panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | + dvo_timing->vactive_lo; + panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + + dvo_timing->vsync_off; + panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + + dvo_timing->vsync_pulse_width; + panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + + ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); + panel_fixed_mode->clock = dvo_timing->clock * 10; + panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; + + /* Some VBTs have bogus h/vtotal values */ + if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) + panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; + if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) + panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; + + drm_mode_set_name(panel_fixed_mode); +} + +static void parse_backlight_data(struct drm_psb_private * dev_priv, + struct bdb_header *bdb) +{ + struct bdb_lvds_backlight * vbt_lvds_bl = NULL; + struct bdb_lvds_backlight * lvds_bl; + u8 p_type = 0; + void * bl_start = NULL; + struct bdb_lvds_options * lvds_opts + = find_section(bdb, BDB_LVDS_OPTIONS); + + dev_priv->lvds_bl = NULL; + + if(lvds_opts) { + DRM_DEBUG("lvds_options found at %p\n", lvds_opts); + p_type = lvds_opts->panel_type; + } else { + DRM_DEBUG("no lvds_options\n"); + return; + } + + bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT); + vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type; + + lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL); + if(!lvds_bl) { + DRM_DEBUG("No memory\n"); + return; + } + + memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl)); + + dev_priv->lvds_bl = lvds_bl; +} + +/* Try to find integrated panel data */ +static void parse_lfp_panel_data(struct drm_psb_private *dev_priv, + struct bdb_header *bdb) +{ + struct bdb_lvds_options *lvds_options; + struct bdb_lvds_lfp_data *lvds_lfp_data; + struct bdb_lvds_lfp_data_entry *entry; + struct lvds_dvo_timing *dvo_timing; + struct drm_display_mode *panel_fixed_mode; + + /* Defaults if we can't find VBT info */ + dev_priv->lvds_dither = 0; + dev_priv->lvds_vbt = 0; + + lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); + if (!lvds_options) + return; + + dev_priv->lvds_dither = lvds_options->pixel_dither; + if (lvds_options->panel_type == 0xff) + return; + + lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); + if (!lvds_lfp_data) + return; + + dev_priv->lvds_vbt = 1; + + entry = &lvds_lfp_data->data[lvds_options->panel_type]; + dvo_timing = &entry->dvo_timing; + + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), + GFP_KERNEL); + + fill_detail_timing_data(panel_fixed_mode, dvo_timing); + + dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; + + DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); + drm_mode_debug_printmodeline(panel_fixed_mode); + + return; +} + +/* Try to find sdvo panel data */ +static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv, + struct bdb_header *bdb) +{ + struct bdb_sdvo_lvds_options *sdvo_lvds_options; + struct lvds_dvo_timing *dvo_timing; + struct drm_display_mode *panel_fixed_mode; + + dev_priv->sdvo_lvds_vbt_mode = NULL; + + sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); + if (!sdvo_lvds_options) + return; + + dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); + if (!dvo_timing) + return; + + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); + + if (!panel_fixed_mode) + return; + + fill_detail_timing_data(panel_fixed_mode, + dvo_timing + sdvo_lvds_options->panel_type); + + dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; + + return; +} + +static void parse_general_features(struct drm_psb_private *dev_priv, + struct bdb_header *bdb) +{ + struct bdb_general_features *general; + + /* Set sensible defaults in case we can't find the general block */ + dev_priv->int_tv_support = 1; + dev_priv->int_crt_support = 1; + + general = find_section(bdb, BDB_GENERAL_FEATURES); + if (general) { + dev_priv->int_tv_support = general->int_tv_support; + dev_priv->int_crt_support = general->int_crt_support; + dev_priv->lvds_use_ssc = general->enable_ssc; + + if (dev_priv->lvds_use_ssc) { + if (IS_I855(dev_priv->dev)) + dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; + else + dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; + } + } +} + +/** + * psb_intel_init_bios - initialize VBIOS settings & find VBT + * @dev: DRM device + * + * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers + * to appropriate values. + * + * VBT existence is a sanity check that is relied on by other i830_bios.c code. + * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may + * feed an updated VBT back through that, compared to what we'll fetch using + * this method of groping around in the BIOS data. + * + * Returns 0 on success, nonzero on failure. + */ +bool psb_intel_init_bios(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev->pdev; + struct vbt_header *vbt = NULL; + struct bdb_header *bdb; + u8 __iomem *bios; + size_t size; + int i; + + bios = pci_map_rom(pdev, &size); + if (!bios) + return -1; + + /* Scour memory looking for the VBT signature */ + for (i = 0; i + 4 < size; i++) { + if (!memcmp(bios + i, "$VBT", 4)) { + vbt = (struct vbt_header *)(bios + i); + break; + } + } + + if (!vbt) { + DRM_ERROR("VBT signature missing\n"); + pci_unmap_rom(pdev, bios); + return -1; + } + + bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); + + /* Grab useful general definitions */ + parse_general_features(dev_priv, bdb); + parse_lfp_panel_data(dev_priv, bdb); + parse_sdvo_panel_data(dev_priv, bdb); + parse_backlight_data(dev_priv, bdb); + + pci_unmap_rom(pdev, bios); + + return 0; +} + +/** + * Destory and free VBT data + */ +void psb_intel_destory_bios(struct drm_device * dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct drm_display_mode * sdvo_lvds_vbt_mode = + dev_priv->sdvo_lvds_vbt_mode; + struct drm_display_mode * lfp_lvds_vbt_mode = + dev_priv->lfp_lvds_vbt_mode; + struct bdb_lvds_backlight * lvds_bl = + dev_priv->lvds_bl; + + /*free sdvo panel mode*/ + if(sdvo_lvds_vbt_mode) { + dev_priv->sdvo_lvds_vbt_mode = NULL; + kfree(sdvo_lvds_vbt_mode); + } + + if(lfp_lvds_vbt_mode) { + dev_priv->lfp_lvds_vbt_mode = NULL; + kfree(lfp_lvds_vbt_mode); + } + + if(lvds_bl) { + dev_priv->lvds_bl = NULL; + kfree(lvds_bl); + } +} diff --git a/drivers/gpu/drm/psb/psb_intel_bios.h b/drivers/gpu/drm/psb/psb_intel_bios.h new file mode 100644 index 0000000..1b0251d --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_bios.h @@ -0,0 +1,436 @@ +/* + * Copyright © 2006 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#ifndef _I830_BIOS_H_ +#define _I830_BIOS_H_ + +#include "drmP.h" + +struct vbt_header { + u8 signature[20]; /**< Always starts with 'VBT$' */ + u16 version; /**< decimal */ + u16 header_size; /**< in bytes */ + u16 vbt_size; /**< in bytes */ + u8 vbt_checksum; + u8 reserved0; + u32 bdb_offset; /**< from beginning of VBT */ + u32 aim_offset[4]; /**< from beginning of VBT */ +} __attribute__((packed)); + + +struct bdb_header { + u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ + u16 version; /**< decimal */ + u16 header_size; /**< in bytes */ + u16 bdb_size; /**< in bytes */ +}; + +/* strictly speaking, this is a "skip" block, but it has interesting info */ +struct vbios_data { + u8 type; /* 0 == desktop, 1 == mobile */ + u8 relstage; + u8 chipset; + u8 lvds_present:1; + u8 tv_present:1; + u8 rsvd2:6; /* finish byte */ + u8 rsvd3[4]; + u8 signon[155]; + u8 copyright[61]; + u16 code_segment; + u8 dos_boot_mode; + u8 bandwidth_percent; + u8 rsvd4; /* popup memory size */ + u8 resize_pci_bios; + u8 rsvd5; /* is crt already on ddc2 */ +} __attribute__((packed)); + +/* + * There are several types of BIOS data blocks (BDBs), each block has + * an ID and size in the first 3 bytes (ID in first, size in next 2). + * Known types are listed below. + */ +#define BDB_GENERAL_FEATURES 1 +#define BDB_GENERAL_DEFINITIONS 2 +#define BDB_OLD_TOGGLE_LIST 3 +#define BDB_MODE_SUPPORT_LIST 4 +#define BDB_GENERIC_MODE_TABLE 5 +#define BDB_EXT_MMIO_REGS 6 +#define BDB_SWF_IO 7 +#define BDB_SWF_MMIO 8 +#define BDB_DOT_CLOCK_TABLE 9 +#define BDB_MODE_REMOVAL_TABLE 10 +#define BDB_CHILD_DEVICE_TABLE 11 +#define BDB_DRIVER_FEATURES 12 +#define BDB_DRIVER_PERSISTENCE 13 +#define BDB_EXT_TABLE_PTRS 14 +#define BDB_DOT_CLOCK_OVERRIDE 15 +#define BDB_DISPLAY_SELECT 16 +/* 17 rsvd */ +#define BDB_DRIVER_ROTATION 18 +#define BDB_DISPLAY_REMOVE 19 +#define BDB_OEM_CUSTOM 20 +#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */ +#define BDB_SDVO_LVDS_OPTIONS 22 +#define BDB_SDVO_PANEL_DTDS 23 +#define BDB_SDVO_LVDS_PNP_IDS 24 +#define BDB_SDVO_LVDS_POWER_SEQ 25 +#define BDB_TV_OPTIONS 26 +#define BDB_LVDS_OPTIONS 40 +#define BDB_LVDS_LFP_DATA_PTRS 41 +#define BDB_LVDS_LFP_DATA 42 +#define BDB_LVDS_BACKLIGHT 43 +#define BDB_LVDS_POWER 44 +#define BDB_SKIP 254 /* VBIOS private block, ignore */ + +struct bdb_general_features { + /* bits 1 */ + u8 panel_fitting:2; + u8 flexaim:1; + u8 msg_enable:1; + u8 clear_screen:3; + u8 color_flip:1; + + /* bits 2 */ + u8 download_ext_vbt:1; + u8 enable_ssc:1; + u8 ssc_freq:1; + u8 enable_lfp_on_override:1; + u8 disable_ssc_ddt:1; + u8 rsvd8:3; /* finish byte */ + + /* bits 3 */ + u8 disable_smooth_vision:1; + u8 single_dvi:1; + u8 rsvd9:6; /* finish byte */ + + /* bits 4 */ + u8 legacy_monitor_detect; + + /* bits 5 */ + u8 int_crt_support:1; + u8 int_tv_support:1; + u8 rsvd11:6; /* finish byte */ +} __attribute__((packed)); + +struct bdb_general_definitions { + /* DDC GPIO */ + u8 crt_ddc_gmbus_pin; + + /* DPMS bits */ + u8 dpms_acpi:1; + u8 skip_boot_crt_detect:1; + u8 dpms_aim:1; + u8 rsvd1:5; /* finish byte */ + + /* boot device bits */ + u8 boot_display[2]; + u8 child_dev_size; + + /* device info */ + u8 tv_or_lvds_info[33]; + u8 dev1[33]; + u8 dev2[33]; + u8 dev3[33]; + u8 dev4[33]; + /* may be another device block here on some platforms */ +}; + +struct bdb_lvds_options { + u8 panel_type; + u8 rsvd1; + /* LVDS capabilities, stored in a dword */ + u8 pfit_mode:2; + u8 pfit_text_mode_enhanced:1; + u8 pfit_gfx_mode_enhanced:1; + u8 pfit_ratio_auto:1; + u8 pixel_dither:1; + u8 lvds_edid:1; + u8 rsvd2:1; + u8 rsvd4; +} __attribute__((packed)); + +struct bdb_lvds_backlight { + u8 type:2; + u8 pol:1; + u8 gpio:3; + u8 gmbus:2; + u16 freq; + u8 minbrightness; + u8 i2caddr; + u8 brightnesscmd; + /*FIXME: more...*/ +}__attribute__((packed)); + +/* LFP pointer table contains entries to the struct below */ +struct bdb_lvds_lfp_data_ptr { + u16 fp_timing_offset; /* offsets are from start of bdb */ + u8 fp_table_size; + u16 dvo_timing_offset; + u8 dvo_table_size; + u16 panel_pnp_id_offset; + u8 pnp_table_size; +} __attribute__((packed)); + +struct bdb_lvds_lfp_data_ptrs { + u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ + struct bdb_lvds_lfp_data_ptr ptr[16]; +} __attribute__((packed)); + +/* LFP data has 3 blocks per entry */ +struct lvds_fp_timing { + u16 x_res; + u16 y_res; + u32 lvds_reg; + u32 lvds_reg_val; + u32 pp_on_reg; + u32 pp_on_reg_val; + u32 pp_off_reg; + u32 pp_off_reg_val; + u32 pp_cycle_reg; + u32 pp_cycle_reg_val; + u32 pfit_reg; + u32 pfit_reg_val; + u16 terminator; +} __attribute__((packed)); + +struct lvds_dvo_timing { + u16 clock; /**< In 10khz */ + u8 hactive_lo; + u8 hblank_lo; + u8 hblank_hi:4; + u8 hactive_hi:4; + u8 vactive_lo; + u8 vblank_lo; + u8 vblank_hi:4; + u8 vactive_hi:4; + u8 hsync_off_lo; + u8 hsync_pulse_width; + u8 vsync_pulse_width:4; + u8 vsync_off:4; + u8 rsvd0:6; + u8 hsync_off_hi:2; + u8 h_image; + u8 v_image; + u8 max_hv; + u8 h_border; + u8 v_border; + u8 rsvd1:3; + u8 digital:2; + u8 vsync_positive:1; + u8 hsync_positive:1; + u8 rsvd2:1; +} __attribute__((packed)); + +struct lvds_pnp_id { + u16 mfg_name; + u16 product_code; + u32 serial; + u8 mfg_week; + u8 mfg_year; +} __attribute__((packed)); + +struct bdb_lvds_lfp_data_entry { + struct lvds_fp_timing fp_timing; + struct lvds_dvo_timing dvo_timing; + struct lvds_pnp_id pnp_id; +} __attribute__((packed)); + +struct bdb_lvds_lfp_data { + struct bdb_lvds_lfp_data_entry data[16]; +} __attribute__((packed)); + +struct aimdb_header { + char signature[16]; + char oem_device[20]; + u16 aimdb_version; + u16 aimdb_header_size; + u16 aimdb_size; +} __attribute__((packed)); + +struct aimdb_block { + u8 aimdb_id; + u16 aimdb_size; +} __attribute__((packed)); + +struct vch_panel_data { + u16 fp_timing_offset; + u8 fp_timing_size; + u16 dvo_timing_offset; + u8 dvo_timing_size; + u16 text_fitting_offset; + u8 text_fitting_size; + u16 graphics_fitting_offset; + u8 graphics_fitting_size; +} __attribute__((packed)); + +struct vch_bdb_22 { + struct aimdb_block aimdb_block; + struct vch_panel_data panels[16]; +} __attribute__((packed)); + +struct bdb_sdvo_lvds_options { + u8 panel_backlight; + u8 h40_set_panel_type; + u8 panel_type; + u8 ssc_clk_freq; + u16 als_low_trip; + u16 als_high_trip; + u8 sclalarcoeff_tab_row_num; + u8 sclalarcoeff_tab_row_size; + u8 coefficient[8]; + u8 panel_misc_bits_1; + u8 panel_misc_bits_2; + u8 panel_misc_bits_3; + u8 panel_misc_bits_4; +} __attribute__((packed)); + + +extern bool psb_intel_init_bios(struct drm_device *dev); +extern void psb_intel_destory_bios(struct drm_device * dev); + +/* + * Driver<->VBIOS interaction occurs through scratch bits in + * GR18 & SWF*. + */ + +/* GR18 bits are set on display switch and hotkey events */ +#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */ +#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */ +#define GR18_HK_NONE (0x0<<3) +#define GR18_HK_LFP_STRETCH (0x1<<3) +#define GR18_HK_TOGGLE_DISP (0x2<<3) +#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */ +#define GR18_HK_POPUP_DISABLED (0x6<<3) +#define GR18_HK_POPUP_ENABLED (0x7<<3) +#define GR18_HK_PFIT (0x8<<3) +#define GR18_HK_APM_CHANGE (0xa<<3) +#define GR18_HK_MULTIPLE (0xc<<3) +#define GR18_USER_INT_EN (1<<2) +#define GR18_A0000_FLUSH_EN (1<<1) +#define GR18_SMM_EN (1<<0) + +/* Set by driver, cleared by VBIOS */ +#define SWF00_YRES_SHIFT 16 +#define SWF00_XRES_SHIFT 0 +#define SWF00_RES_MASK 0xffff + +/* Set by VBIOS at boot time and driver at runtime */ +#define SWF01_TV2_FORMAT_SHIFT 8 +#define SWF01_TV1_FORMAT_SHIFT 0 +#define SWF01_TV_FORMAT_MASK 0xffff + +#define SWF10_VBIOS_BLC_I2C_EN (1<<29) +#define SWF10_GTT_OVERRIDE_EN (1<<28) +#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */ +#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24) +#define SWF10_OLD_TOGGLE 0x0 +#define SWF10_TOGGLE_LIST_1 0x1 +#define SWF10_TOGGLE_LIST_2 0x2 +#define SWF10_TOGGLE_LIST_3 0x3 +#define SWF10_TOGGLE_LIST_4 0x4 +#define SWF10_PANNING_EN (1<<23) +#define SWF10_DRIVER_LOADED (1<<22) +#define SWF10_EXTENDED_DESKTOP (1<<21) +#define SWF10_EXCLUSIVE_MODE (1<<20) +#define SWF10_OVERLAY_EN (1<<19) +#define SWF10_PLANEB_HOLDOFF (1<<18) +#define SWF10_PLANEA_HOLDOFF (1<<17) +#define SWF10_VGA_HOLDOFF (1<<16) +#define SWF10_ACTIVE_DISP_MASK 0xffff +#define SWF10_PIPEB_LFP2 (1<<15) +#define SWF10_PIPEB_EFP2 (1<<14) +#define SWF10_PIPEB_TV2 (1<<13) +#define SWF10_PIPEB_CRT2 (1<<12) +#define SWF10_PIPEB_LFP (1<<11) +#define SWF10_PIPEB_EFP (1<<10) +#define SWF10_PIPEB_TV (1<<9) +#define SWF10_PIPEB_CRT (1<<8) +#define SWF10_PIPEA_LFP2 (1<<7) +#define SWF10_PIPEA_EFP2 (1<<6) +#define SWF10_PIPEA_TV2 (1<<5) +#define SWF10_PIPEA_CRT2 (1<<4) +#define SWF10_PIPEA_LFP (1<<3) +#define SWF10_PIPEA_EFP (1<<2) +#define SWF10_PIPEA_TV (1<<1) +#define SWF10_PIPEA_CRT (1<<0) + +#define SWF11_MEMORY_SIZE_SHIFT 16 +#define SWF11_SV_TEST_EN (1<<15) +#define SWF11_IS_AGP (1<<14) +#define SWF11_DISPLAY_HOLDOFF (1<<13) +#define SWF11_DPMS_REDUCED (1<<12) +#define SWF11_IS_VBE_MODE (1<<11) +#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */ +#define SWF11_DPMS_MASK 0x07 +#define SWF11_DPMS_OFF (1<<2) +#define SWF11_DPMS_SUSPEND (1<<1) +#define SWF11_DPMS_STANDBY (1<<0) +#define SWF11_DPMS_ON 0 + +#define SWF14_GFX_PFIT_EN (1<<31) +#define SWF14_TEXT_PFIT_EN (1<<30) +#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */ +#define SWF14_POPUP_EN (1<<28) +#define SWF14_DISPLAY_HOLDOFF (1<<27) +#define SWF14_DISP_DETECT_EN (1<<26) +#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */ +#define SWF14_DRIVER_STATUS (1<<24) +#define SWF14_OS_TYPE_WIN9X (1<<23) +#define SWF14_OS_TYPE_WINNT (1<<22) +/* 21:19 rsvd */ +#define SWF14_PM_TYPE_MASK 0x00070000 +#define SWF14_PM_ACPI_VIDEO (0x4 << 16) +#define SWF14_PM_ACPI (0x3 << 16) +#define SWF14_PM_APM_12 (0x2 << 16) +#define SWF14_PM_APM_11 (0x1 << 16) +#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */ + /* if GR18 indicates a display switch */ +#define SWF14_DS_PIPEB_LFP2_EN (1<<15) +#define SWF14_DS_PIPEB_EFP2_EN (1<<14) +#define SWF14_DS_PIPEB_TV2_EN (1<<13) +#define SWF14_DS_PIPEB_CRT2_EN (1<<12) +#define SWF14_DS_PIPEB_LFP_EN (1<<11) +#define SWF14_DS_PIPEB_EFP_EN (1<<10) +#define SWF14_DS_PIPEB_TV_EN (1<<9) +#define SWF14_DS_PIPEB_CRT_EN (1<<8) +#define SWF14_DS_PIPEA_LFP2_EN (1<<7) +#define SWF14_DS_PIPEA_EFP2_EN (1<<6) +#define SWF14_DS_PIPEA_TV2_EN (1<<5) +#define SWF14_DS_PIPEA_CRT2_EN (1<<4) +#define SWF14_DS_PIPEA_LFP_EN (1<<3) +#define SWF14_DS_PIPEA_EFP_EN (1<<2) +#define SWF14_DS_PIPEA_TV_EN (1<<1) +#define SWF14_DS_PIPEA_CRT_EN (1<<0) + /* if GR18 indicates a panel fitting request */ +#define SWF14_PFIT_EN (1<<0) /* 0 means disable */ + /* if GR18 indicates an APM change request */ +#define SWF14_APM_HIBERNATE 0x4 +#define SWF14_APM_SUSPEND 0x3 +#define SWF14_APM_STANDBY 0x1 +#define SWF14_APM_RESTORE 0x0 + +#endif /* _I830_BIOS_H_ */ diff --git a/drivers/gpu/drm/psb/psb_intel_display.c b/drivers/gpu/drm/psb/psb_intel_display.c new file mode 100644 index 0000000..9cc0ec1 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_display.c @@ -0,0 +1,2484 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ + +#include + +#include +#include "psb_fb.h" +#include "psb_intel_display.h" +#include "psb_powermgmt.h" + +struct psb_intel_clock_t { + /* given values */ + int n; + int m1, m2; + int p1, p2; + /* derived values */ + int dot; + int vco; + int m; + int p; +}; + +struct psb_intel_range_t { + int min, max; +}; + +struct psb_intel_p2_t { + int dot_limit; + int p2_slow, p2_fast; +}; + +#define INTEL_P2_NUM 2 + +struct psb_intel_limit_t { + struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1; + struct psb_intel_p2_t p2; +}; + +#define I8XX_DOT_MIN 25000 +#define I8XX_DOT_MAX 350000 +#define I8XX_VCO_MIN 930000 +#define I8XX_VCO_MAX 1400000 +#define I8XX_N_MIN 3 +#define I8XX_N_MAX 16 +#define I8XX_M_MIN 96 +#define I8XX_M_MAX 140 +#define I8XX_M1_MIN 18 +#define I8XX_M1_MAX 26 +#define I8XX_M2_MIN 6 +#define I8XX_M2_MAX 16 +#define I8XX_P_MIN 4 +#define I8XX_P_MAX 128 +#define I8XX_P1_MIN 2 +#define I8XX_P1_MAX 33 +#define I8XX_P1_LVDS_MIN 1 +#define I8XX_P1_LVDS_MAX 6 +#define I8XX_P2_SLOW 4 +#define I8XX_P2_FAST 2 +#define I8XX_P2_LVDS_SLOW 14 +#define I8XX_P2_LVDS_FAST 14 /* No fast option */ +#define I8XX_P2_SLOW_LIMIT 165000 + +#define I9XX_DOT_MIN 20000 +#define I9XX_DOT_MAX 400000 +#define I9XX_VCO_MIN 1400000 +#define I9XX_VCO_MAX 2800000 +#define I9XX_N_MIN 3 +#define I9XX_N_MAX 8 +#define I9XX_M_MIN 70 +#define I9XX_M_MAX 120 +#define I9XX_M1_MIN 10 +#define I9XX_M1_MAX 20 +#define I9XX_M2_MIN 5 +#define I9XX_M2_MAX 9 +#define I9XX_P_SDVO_DAC_MIN 5 +#define I9XX_P_SDVO_DAC_MAX 80 +#define I9XX_P_LVDS_MIN 7 +#define I9XX_P_LVDS_MAX 98 +#define I9XX_P1_MIN 1 +#define I9XX_P1_MAX 8 +#define I9XX_P2_SDVO_DAC_SLOW 10 +#define I9XX_P2_SDVO_DAC_FAST 5 +#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 +#define I9XX_P2_LVDS_SLOW 14 +#define I9XX_P2_LVDS_FAST 7 +#define I9XX_P2_LVDS_SLOW_LIMIT 112000 + +#define INTEL_LIMIT_I8XX_DVO_DAC 0 +#define INTEL_LIMIT_I8XX_LVDS 1 +#define INTEL_LIMIT_I9XX_SDVO_DAC 2 +#define INTEL_LIMIT_I9XX_LVDS 3 + +static const struct psb_intel_limit_t psb_intel_limits[] = { + { /* INTEL_LIMIT_I8XX_DVO_DAC */ + .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, + .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, + .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, + .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, + .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, + .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, + .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, + .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX}, + .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, + .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST}, + }, + { /* INTEL_LIMIT_I8XX_LVDS */ + .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, + .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, + .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, + .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, + .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, + .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, + .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, + .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX}, + .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, + .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST}, + }, + { /* INTEL_LIMIT_I9XX_SDVO_DAC */ + .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, + .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, + .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, + .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, + .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, + .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, + .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX}, + .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, + .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, + .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = + I9XX_P2_SDVO_DAC_FAST}, + }, + { /* INTEL_LIMIT_I9XX_LVDS */ + .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, + .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, + .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, + .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, + .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, + .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, + .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX}, + .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, + /* The single-channel range is 25-112Mhz, and dual-channel + * is 80-224Mhz. Prefer single channel as much as possible. + */ + .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, + .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST}, + }, +}; + +static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + const struct psb_intel_limit_t *limit; + + if (IS_I9XX(dev)) { + if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; + else + limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + } else { + if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS]; + else + limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; + } + return limit; +} + +/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ + +static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock) +{ + clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); + clock->p = clock->p1 * clock->p2; + clock->vco = refclk * clock->m / (clock->n + 2); + clock->dot = clock->vco / clock->p; +} + +/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ + +static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock) +{ + clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); + clock->p = clock->p1 * clock->p2; + clock->vco = refclk * clock->m / (clock->n + 2); + clock->dot = clock->vco / clock->p; +} + +static void psb_intel_clock(struct drm_device *dev, int refclk, + struct psb_intel_clock_t *clock) +{ + if (IS_I9XX(dev)) + return i9xx_clock(refclk, clock); + else + return i8xx_clock(refclk, clock); +} + +/** + * Returns whether any output on the specified pipe is of the specified type + */ +bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *l_entry; + + list_for_each_entry(l_entry, &mode_config->connector_list, head) { + if (l_entry->encoder && l_entry->encoder->crtc == crtc) { + struct psb_intel_output *psb_intel_output = + to_psb_intel_output(l_entry); + if (psb_intel_output->type == type) + return true; + } + } + return false; +} + +#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } +/** + * Returns whether the given set of divisors are valid for a given refclk with + * the given connectors. + */ + +static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc, + struct psb_intel_clock_t *clock) +{ + const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); + + if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) + INTELPllInvalid("p1 out of range\n"); + if (clock->p < limit->p.min || limit->p.max < clock->p) + INTELPllInvalid("p out of range\n"); + if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) + INTELPllInvalid("m2 out of range\n"); + if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) + INTELPllInvalid("m1 out of range\n"); + if (clock->m1 <= clock->m2) + INTELPllInvalid("m1 <= m2\n"); + if (clock->m < limit->m.min || limit->m.max < clock->m) + INTELPllInvalid("m out of range\n"); + if (clock->n < limit->n.min || limit->n.max < clock->n) + INTELPllInvalid("n out of range\n"); + if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) + INTELPllInvalid("vco out of range\n"); + /* XXX: We may need to be checking "Dot clock" + * depending on the multiplier, connector, etc., + * rather than just a single range. + */ + if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) + INTELPllInvalid("dot out of range\n"); + + return true; +} + +/** + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + */ +static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target, + int refclk, + struct psb_intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + struct psb_intel_clock_t clock; + const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); + int err = target; + + if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { + /* + * For LVDS, if the panel is on, just rely on its current + * settings for dual-channel. We haven't figured out how to + * reliably set up different single/dual channel state, if we + * even can. + */ + if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; + } else { + if (target < limit->p2.dot_limit) + clock.p2 = limit->p2.p2_slow; + else + clock.p2 = limit->p2.p2_fast; + } + + memset(best_clock, 0, sizeof(*best_clock)); + + for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; + clock.m1++) { + for (clock.m2 = limit->m2.min; + clock.m2 < clock.m1 && clock.m2 <= limit->m2.max; + clock.m2++) { + for (clock.n = limit->n.min; + clock.n <= limit->n.max; clock.n++) { + for (clock.p1 = limit->p1.min; + clock.p1 <= limit->p1.max; + clock.p1++) { + int this_err; + + psb_intel_clock(dev, refclk, &clock); + + if (!psb_intel_PLL_is_valid + (crtc, &clock)) + continue; + + this_err = abs(clock.dot - target); + if (this_err < err) { + *best_clock = clock; + err = this_err; + } + } + } + } + } + + return err != target; +} + +void psb_intel_wait_for_vblank(struct drm_device *dev) +{ + /* Wait for 20ms, i.e. one cycle at 50hz. */ + udelay(20000); +} + +int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + /* struct drm_i915_master_private *master_priv; */ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); + struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev; + int pipe = psb_intel_crtc->pipe; + unsigned long Start, Offset; + int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE); + int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); + int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; + u32 dspcntr; + int ret = 0; + + /* no fb bound */ + if (!crtc->fb) { + DRM_DEBUG("No FB bound\n"); + return 0; + } + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + if (IS_MRST(dev) && (pipe == 0)) + dspbase = MRST_DSPABASE; + + Start = mode_dev->bo_offset(dev, psbfb->bo); + Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); + + REG_WRITE(dspstride, crtc->fb->pitch); + + dspcntr = REG_READ(dspcntr_reg); + dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; + + switch (crtc->fb->bits_per_pixel) { + case 8: + dspcntr |= DISPPLANE_8BPP; + break; + case 16: + if (crtc->fb->depth == 15) + dspcntr |= DISPPLANE_15_16BPP; + else + dspcntr |= DISPPLANE_16BPP; + break; + case 24: + case 32: + dspcntr |= DISPPLANE_32BPP_NO_ALPHA; + break; + default: + DRM_ERROR("Unknown color depth\n"); + ret = -EINVAL; + goto psb_intel_pipe_set_base_exit; + } + REG_WRITE(dspcntr_reg, dspcntr); + + DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); + if (IS_I965G(dev) || IS_MRST(dev)) { + REG_WRITE(dspbase, Offset); + REG_READ(dspbase); + REG_WRITE(dspsurf, Start); + REG_READ(dspsurf); + } else { + REG_WRITE(dspbase, Start + Offset); + REG_READ(dspbase); + } + +psb_intel_pipe_set_base_exit: + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + + return ret; +} + +int psb_kms_flip_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); + struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev; + int pipe = psb_intel_crtc->pipe; + + struct psb_scheduler *scheduler = &dev_priv->scheduler; + struct psb_task *task = NULL; + task = kzalloc(sizeof(*task), GFP_KERNEL); + if (!task) + return -ENOMEM; + INIT_LIST_HEAD(&task->head); + INIT_LIST_HEAD(&task->buf.head); + task->task_type = psb_flip_task; + + spin_lock_irq(&scheduler->lock); + list_add_tail(&task->head, &scheduler->ta_queue); + /** + * From this point we may no longer dereference task, + * as the object it points to may be freed by another thread. + */ + + task = NULL; + spin_unlock_irq(&scheduler->lock); + + /* no fb bound */ + if (!crtc->fb) { + DRM_DEBUG("No FB bound\n"); + return 0; + } + + dev_priv->flip_start[pipe] = mode_dev->bo_offset(dev, psbfb->bo); + dev_priv->flip_offset[pipe] = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); + dev_priv->flip_stride[pipe] = crtc->fb->pitch; + dev_priv->pipe_active[pipe] = 1; + dev_priv->pipe_active[1-pipe] = 0; + + return 0; +} + +/** + * Sets the power management mode of the pipe and plane. + * + * This code should probably grow support for turning the cursor off and back + * on appropriately at the same time as we're turning the pipe off/on. + */ +static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + /* struct drm_i915_master_private *master_priv; */ + /* struct drm_i915_private *dev_priv = dev->dev_private; */ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; + int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + u32 temp; + bool enabled; + + /* XXX: When our outputs are all unaware of DPMS modes other than off + * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. + */ + switch (mode) { + case DRM_MODE_DPMS_ON: + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + /* Enable the DPLL */ + temp = REG_READ(dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + REG_WRITE(dpll_reg, temp); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + } + + /* Enable the pipe */ + temp = REG_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) == 0) + REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); + + /* Enable the plane */ + temp = REG_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { + REG_WRITE(dspcntr_reg, + temp | DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); + } + + psb_intel_crtc_load_lut(crtc); + + /* Give the overlay scaler a chance to enable + * if it's on this pipe */ + /* psb_intel_crtc_dpms_video(crtc, true); TODO */ + break; + case DRM_MODE_DPMS_OFF: + /* Give the overlay scaler a chance to disable + * if it's on this pipe */ + /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ + + /* Disable the VGA plane that we never use */ + REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); + + /* Disable display plane */ + temp = REG_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) != 0) { + REG_WRITE(dspcntr_reg, + temp & ~DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); + REG_READ(dspbase_reg); + } + + if (!IS_I9XX(dev)) { + /* Wait for vblank for the disable to take effect */ + psb_intel_wait_for_vblank(dev); + } + + /* Next, disable display pipes */ + temp = REG_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) != 0) { + REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); + REG_READ(pipeconf_reg); + } + + /* Wait for vblank for the disable to take effect. */ + psb_intel_wait_for_vblank(dev); + + temp = REG_READ(dpll_reg); + if ((temp & DPLL_VCO_ENABLE) != 0) { + REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); + REG_READ(dpll_reg); + } + + /* Wait for the clocks to turn off. */ + udelay(150); + break; + } + + enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; + +#if 0 /* JB: Add vblank support later */ + if (enabled) + dev_priv->vblank_pipe |= (1 << pipe); + else + dev_priv->vblank_pipe &= ~(1 << pipe); +#endif + + psb_intel_crtc->dpms_mode = mode; + +#if 0 /* JB: Add sarea support later */ + if (!dev->primary->master) + return 0; + + master_priv = dev->primary->master->driver_priv; + if (!master_priv->sarea_priv) + return 0; + + switch (pipe) { + case 0: + master_priv->sarea_priv->planeA_w = + enabled ? crtc->mode.hdisplay : 0; + master_priv->sarea_priv->planeA_h = + enabled ? crtc->mode.vdisplay : 0; + break; + case 1: + master_priv->sarea_priv->planeB_w = + enabled ? crtc->mode.hdisplay : 0; + master_priv->sarea_priv->planeB_h = + enabled ? crtc->mode.vdisplay : 0; + break; + default: + DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); + break; + } +#endif + + /*Set FIFO Watermarks*/ + REG_WRITE(DSPARB, 0x3F3E); +} + +static void psb_intel_crtc_prepare(struct drm_crtc *crtc) +{ + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void psb_intel_crtc_commit(struct drm_crtc *crtc) +{ + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); +} + +void psb_intel_encoder_prepare(struct drm_encoder *encoder) +{ + struct drm_encoder_helper_funcs *encoder_funcs = + encoder->helper_private; + /* lvds has its own version of prepare see psb_intel_lvds_prepare */ + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); +} + +void psb_intel_encoder_commit(struct drm_encoder *encoder) +{ + struct drm_encoder_helper_funcs *encoder_funcs = + encoder->helper_private; + /* lvds has its own version of commit see psb_intel_lvds_commit */ + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); +} + +static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + + +/** + * Return the pipe currently connected to the panel fitter, + * or -1 if the panel fitter is not present or not in use + */ +static int psb_intel_panel_fitter_pipe(struct drm_device *dev) +{ + u32 pfit_control; + + /* i830 doesn't have a panel fitter */ + if (IS_I830(dev)) + return -1; + + pfit_control = REG_READ(PFIT_CONTROL); + + /* See if the panel fitter is in use */ + if ((pfit_control & PFIT_ENABLE) == 0) + return -1; + + /* 965 can place panel fitter on either pipe */ + if (IS_I965G(dev) || IS_MRST(dev)) + return (pfit_control >> 29) & 0x3; + + /* older chips can only use pipe 1 */ + return 1; +} + +static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + int fp_reg = (pipe == 0) ? FPA0 : FPB0; + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; + int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; + int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; + int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; + int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; + int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; + int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; + int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; + int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; + int refclk; + struct psb_intel_clock_t clock; + u32 dpll = 0, fp = 0, dspcntr, pipeconf; + bool ok, is_sdvo = false, is_dvo = false; + bool is_crt = false, is_lvds = false, is_tv = false; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct psb_intel_output *psb_intel_output = + to_psb_intel_output(connector); + + if (!connector->encoder + || connector->encoder->crtc != crtc) + continue; + + switch (psb_intel_output->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + is_sdvo = true; + break; + case INTEL_OUTPUT_DVO: + is_dvo = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; + break; + case INTEL_OUTPUT_ANALOG: + is_crt = true; + break; + } + } + + if (IS_I9XX(dev)) + refclk = 96000; + else + refclk = 48000; + + ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, + &clock); + if (!ok) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return 0; + } + + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; + + dpll = DPLL_VGA_MODE_DIS; + if (IS_I9XX(dev)) { + if (is_lvds) { + dpll |= DPLLB_MODE_LVDS; + if (IS_POULSBO(dev)) + dpll |= DPLL_DVO_HIGH_SPEED; + } else + dpll |= DPLLB_MODE_DAC_SERIAL; + if (is_sdvo) { + dpll |= DPLL_DVO_HIGH_SPEED; + if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) { + int sdvo_pixel_multiply = + adjusted_mode->clock / mode->clock; + dpll |= + (sdvo_pixel_multiply - + 1) << SDVO_MULTIPLIER_SHIFT_HIRES; + } + } + + /* compute bitmask from p1 value */ + dpll |= (1 << (clock.p1 - 1)) << 16; + switch (clock.p2) { + case 5: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; + break; + case 7: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; + break; + case 10: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; + break; + case 14: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; + } + if (IS_I965G(dev)) + dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); + } else { + if (is_lvds) { + dpll |= + (1 << (clock.p1 - 1)) << + DPLL_FPA01_P1_POST_DIV_SHIFT; + } else { + if (clock.p1 == 2) + dpll |= PLL_P1_DIVIDE_BY_TWO; + else + dpll |= + (clock.p1 - + 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; + if (clock.p2 == 4) + dpll |= PLL_P2_DIVIDE_BY_4; + } + } + + if (is_tv) { + /* XXX: just matching BIOS for now */ +/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + } +#if 0 + else if (is_lvds) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; +#endif + else + dpll |= PLL_REF_INPUT_DREFCLK; + + /* setup pipeconf */ + pipeconf = REG_READ(pipeconf_reg); + + /* Set up the display plane register */ + dspcntr = DISPPLANE_GAMMA_ENABLE; + + if (pipe == 0) + dspcntr |= DISPPLANE_SEL_PIPE_A; + else + dspcntr |= DISPPLANE_SEL_PIPE_B; + + dspcntr |= DISPLAY_PLANE_ENABLE; + pipeconf |= PIPEACONF_ENABLE; + dpll |= DPLL_VCO_ENABLE; + + + /* Disable the panel fitter if it was on our pipe */ + if (psb_intel_panel_fitter_pipe(dev) == pipe) + REG_WRITE(PFIT_CONTROL, 0); + + DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); + drm_mode_debug_printmodeline(mode); + + if (dpll & DPLL_VCO_ENABLE) { + REG_WRITE(fp_reg, fp); + REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); + REG_READ(dpll_reg); + udelay(150); + } + + /* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. + */ + if (is_lvds) { + u32 lvds = REG_READ(LVDS); + + lvds |= + LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | + LVDS_PIPEB_SELECT; + /* Set the B0-B3 data pairs corresponding to + * whether we're going to + * set the DPLLs for dual-channel mode or not. + */ + if (clock.p2 == 7) + lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; + else + lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); + + /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) + * appropriately here, but we need to look more + * thoroughly into how panels behave in the two modes. + */ + + REG_WRITE(LVDS, lvds); + REG_READ(LVDS); + } + + REG_WRITE(fp_reg, fp); + REG_WRITE(dpll_reg, dpll); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + + if (IS_I965G(dev)) { + int sdvo_pixel_multiply = + adjusted_mode->clock / mode->clock; + REG_WRITE(dpll_md_reg, + (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | + ((sdvo_pixel_multiply - + 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); + } else { + /* write it again -- the BIOS does, after all */ + REG_WRITE(dpll_reg, dpll); + } + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + + REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | + ((adjusted_mode->crtc_hblank_end - 1) << 16)); + REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | + ((adjusted_mode->crtc_hsync_end - 1) << 16)); + REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | + ((adjusted_mode->crtc_vtotal - 1) << 16)); + REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | + ((adjusted_mode->crtc_vblank_end - 1) << 16)); + REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | + ((adjusted_mode->crtc_vsync_end - 1) << 16)); + /* pipesrc and dspsize control the size that is scaled from, + * which should always be the user's requested size. + */ + REG_WRITE(dspsize_reg, + ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); + REG_WRITE(dsppos_reg, 0); + REG_WRITE(pipesrc_reg, + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); + REG_WRITE(pipeconf_reg, pipeconf); + REG_READ(pipeconf_reg); + + psb_intel_wait_for_vblank(dev); + + REG_WRITE(dspcntr_reg, dspcntr); + + /* Flush the plane changes */ + { + struct drm_crtc_helper_funcs *crtc_funcs = + crtc->helper_private; + crtc_funcs->mode_set_base(crtc, x, y, old_fb); + } + + psb_intel_wait_for_vblank(dev); + + return 0; +} + +/** Loads the palette/gamma unit for the CRTC with the prepared values */ +void psb_intel_crtc_load_lut(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; + int i; + + /* The clocks have to be on to load the palette. */ + if (!crtc->enabled) + return; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + for (i = 0; i < 256; i++) { + REG_WRITE(palreg + 4 * i, + (psb_intel_crtc->lut_r[i] << 16) | + (psb_intel_crtc->lut_g[i] << 8) | + psb_intel_crtc->lut_b[i]); + } + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } +} + +#ifndef CONFIG_MRST +/** + * Save HW states of giving crtc + */ +static void psb_intel_crtc_save(struct drm_crtc * crtc) +{ + struct drm_device * dev = crtc->dev; + // struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; + struct psb_intel_crtc * psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_intel_crtc_state * crtc_state = psb_intel_crtc->crtc_state; + int pipeA = (psb_intel_crtc->pipe == 0); + uint32_t paletteReg; + int i; + + DRM_DEBUG("\n"); + + if(!crtc_state) { + DRM_DEBUG("No CRTC state found\n"); + return; + } + + crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR); + crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF); + crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC); + crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0); + crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1); + crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B); + crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B); + crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B); + crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B); + crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B); + crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B); + crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B); + crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE); + + /*NOTE: DSPSIZE DSPPOS only for psb*/ + crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE); + crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS); + + crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE); + + DRM_DEBUG("(%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n", + crtc_state->saveDSPCNTR, + crtc_state->savePIPECONF, + crtc_state->savePIPESRC, + crtc_state->saveFP0, + crtc_state->saveFP1, + crtc_state->saveDPLL, + crtc_state->saveHTOTAL, + crtc_state->saveHBLANK, + crtc_state->saveHSYNC, + crtc_state->saveVTOTAL, + crtc_state->saveVBLANK, + crtc_state->saveVSYNC, + crtc_state->saveDSPSTRIDE, + crtc_state->saveDSPSIZE, + crtc_state->saveDSPPOS, + crtc_state->saveDSPBASE + ); + + paletteReg = pipeA ? PALETTE_A : PALETTE_B; + for(i=0; i<256; ++i) { + crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2)); + } +} + +/** + * Restore HW states of giving crtc + */ +static void psb_intel_crtc_restore(struct drm_crtc * crtc) +{ + struct drm_device * dev = crtc->dev; + // struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; + struct psb_intel_crtc * psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_intel_crtc_state * crtc_state = psb_intel_crtc->crtc_state; + // struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; + int pipeA = (psb_intel_crtc->pipe == 0); + uint32_t paletteReg; + int i; + + DRM_DEBUG("\n"); + + if(!crtc_state) { + DRM_DEBUG("No crtc state\n"); + return; + } + + DRM_DEBUG("current: (%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n", + REG_READ(pipeA ? DSPACNTR : DSPBCNTR), + REG_READ(pipeA ? PIPEACONF : PIPEBCONF), + REG_READ(pipeA ? PIPEASRC : PIPEBSRC), + REG_READ(pipeA ? FPA0 : FPB0), + REG_READ(pipeA ? FPA1 : FPB1), + REG_READ(pipeA ? DPLL_A : DPLL_B), + REG_READ(pipeA ? HTOTAL_A : HTOTAL_B), + REG_READ(pipeA ? HBLANK_A : HBLANK_B), + REG_READ(pipeA ? HSYNC_A : HSYNC_B), + REG_READ(pipeA ? VTOTAL_A : VTOTAL_B), + REG_READ(pipeA ? VBLANK_A : VBLANK_B), + REG_READ(pipeA ? VSYNC_A : VSYNC_B), + REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE), + REG_READ(pipeA ? DSPASIZE : DSPBSIZE), + REG_READ(pipeA ? DSPAPOS : DSPBPOS), + REG_READ(pipeA ? DSPABASE : DSPBBASE) + ); + + DRM_DEBUG("saved: (%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)\n", + crtc_state->saveDSPCNTR, + crtc_state->savePIPECONF, + crtc_state->savePIPESRC, + crtc_state->saveFP0, + crtc_state->saveFP1, + crtc_state->saveDPLL, + crtc_state->saveHTOTAL, + crtc_state->saveHBLANK, + crtc_state->saveHSYNC, + crtc_state->saveVTOTAL, + crtc_state->saveVBLANK, + crtc_state->saveVSYNC, + crtc_state->saveDSPSTRIDE, + crtc_state->saveDSPSIZE, + crtc_state->saveDSPPOS, + crtc_state->saveDSPBASE + ); + + +#if 0 + if(drm_helper_crtc_in_use(crtc)) + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + + if(psb_intel_panel_fitter_pipe(dev) == psb_intel_crtc->pipe) { + REG_WRITE(PFIT_CONTROL, crtc_state->savePFITCTRL); + DRM_DEBUG("write pfit_controle: %x\n", REG_READ(PFIT_CONTROL)); + } +#endif + + if(crtc_state->saveDPLL & DPLL_VCO_ENABLE) { + REG_WRITE(pipeA ? DPLL_A : DPLL_B, + crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); + REG_READ(pipeA ? DPLL_A : DPLL_B); + DRM_DEBUG("write dpll: %x\n", REG_READ(pipeA ? DPLL_A : DPLL_B)); + udelay(150); + } + + REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0); + REG_READ(pipeA ? FPA0 : FPB0); + + REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1); + REG_READ(pipeA ? FPA1 : FPB1); + + REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL); + REG_READ(pipeA ? DPLL_A : DPLL_B); + udelay(150); + + REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL); + REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK); + REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC); + REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL); + REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK); + REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC); + REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE); + + REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE); + REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS); + + REG_WRITE(pipeA ? PIPEASRC :PIPEBSRC, crtc_state->savePIPESRC); + REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); + REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF); + + psb_intel_wait_for_vblank(dev); + + REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR); + REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); + + psb_intel_wait_for_vblank(dev); + + paletteReg = pipeA ? PALETTE_A : PALETTE_B; + for(i=0; i<256; ++i) { + REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); + } +} +#endif + +static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, uint32_t height) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; + struct psb_gtt * pg = dev_priv->pg; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev; + int pipe = psb_intel_crtc->pipe; + uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; + uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; + uint32_t temp; + size_t addr = 0; + size_t size; + void *bo; + int ret; + + DRM_DEBUG("\n"); + + /* if we want to turn of the cursor ignore width and height */ + if (!handle) { + DRM_DEBUG("cursor off\n"); + /* turn of the cursor */ + temp = 0; + temp |= CURSOR_MODE_DISABLE; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + REG_WRITE(control, temp); + REG_WRITE(base, 0); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + + /* unpin the old bo */ + if (psb_intel_crtc->cursor_bo) { + mode_dev->bo_unpin_for_scanout(dev, + psb_intel_crtc-> + cursor_bo); + psb_intel_crtc->cursor_bo = NULL; + } + + return 0; + } + + /* Currently we only support 64x64 cursors */ + if (width != 64 || height != 64) { + DRM_ERROR("we currently only support 64x64 cursors\n"); + return -EINVAL; + } + + bo = mode_dev->bo_from_handle(dev, file_priv, handle); + if (!bo) + return -ENOENT; + ret = mode_dev->bo_pin_for_scanout(dev, bo); + if (ret) + return ret; + size = mode_dev->bo_size(dev, bo); + if (size < width * height * 4) { + DRM_ERROR("buffer is to small\n"); + return -ENOMEM; + } + addr = mode_dev->bo_offset(dev, bo); + if(IS_POULSBO(dev)) { + addr += pg->stolen_base; + } + + psb_intel_crtc->cursor_addr = addr; + + temp = 0; + /* set the pipe for the cursor */ + temp |= (pipe << 28); + temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + REG_WRITE(control, temp); + REG_WRITE(base, addr); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + + /* unpin the old bo */ + if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) { + mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo); + psb_intel_crtc->cursor_bo = bo; + } + + return 0; +} + +static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + uint32_t temp = 0; + uint32_t adder; + + if (x < 0) { + temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); + x = -x; + } + if (y < 0) { + temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); + y = -y; + } + + temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); + temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); + + adder = psb_intel_crtc->cursor_addr; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); + REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + return 0; +} + +static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, + u16 *green, u16 *blue, uint32_t size) +{ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int i; + + if (size != 256) + return; + + for (i = 0; i < 256; i++) { + psb_intel_crtc->lut_r[i] = red[i] >> 8; + psb_intel_crtc->lut_g[i] = green[i] >> 8; + psb_intel_crtc->lut_b[i] = blue[i] >> 8; + } + + psb_intel_crtc_load_lut(crtc); +} + +/* Returns the clock of the currently programmed mode of the given pipe. */ +static int psb_intel_crtc_clock_get(struct drm_device *dev, + struct drm_crtc *crtc) +{ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + u32 dpll; + u32 fp; + struct psb_intel_clock_t clock; + bool is_lvds; + struct drm_psb_private *dev_priv = dev->dev_private; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B); + if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) + fp = REG_READ((pipe == 0) ? FPA0 : FPB0); + else + fp = REG_READ((pipe == 0) ? FPA1 : FPB1); + is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + dpll = (pipe == 0) ? dev_priv->saveDPLL_A : dev_priv->saveDPLL_B; + if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) + fp = (pipe == 0) ? dev_priv->saveFPA0 : dev_priv->saveFPB0; + else + fp = (pipe == 0) ? dev_priv->saveFPA1 : dev_priv->saveFPB1; + is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN); + } + + clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; + clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; + clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; + + if (is_lvds) { + clock.p1 = + ffs((dpll & + DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> + DPLL_FPA01_P1_POST_DIV_SHIFT); + clock.p2 = 14; + + if ((dpll & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + /* XXX: might not be 66MHz */ + i8xx_clock(66000, &clock); + } else + i8xx_clock(48000, &clock); + } else { + if (dpll & PLL_P1_DIVIDE_BY_TWO) + clock.p1 = 2; + else { + clock.p1 = + ((dpll & + DPLL_FPA01_P1_POST_DIV_MASK_I830) >> + DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; + } + if (dpll & PLL_P2_DIVIDE_BY_4) + clock.p2 = 4; + else + clock.p2 = 2; + + i8xx_clock(48000, &clock); + } + + /* XXX: It would be nice to validate the clocks, but we can't reuse + * i830PllIsValid() because it relies on the xf86_config connector + * configuration being accurate, which it isn't necessarily. + */ + + return clock.dot; +} + +/** Returns the currently programmed mode of the given pipe. */ +struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, + struct drm_crtc *crtc) +{ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + struct drm_display_mode *mode; + int htot; + int hsync; + int vtot; + int vsync; + struct drm_psb_private *dev_priv = dev->dev_private; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); + hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B); + vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); + vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + htot = (pipe == 0) ? dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B; + hsync = (pipe == 0) ? dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B; + vtot = (pipe == 0) ? dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B; + vsync = (pipe == 0) ? dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B; + } + + mode = kzalloc(sizeof(*mode), GFP_KERNEL); + if (!mode) + return NULL; + + mode->clock = psb_intel_crtc_clock_get(dev, crtc); + mode->hdisplay = (htot & 0xffff) + 1; + mode->htotal = ((htot & 0xffff0000) >> 16) + 1; + mode->hsync_start = (hsync & 0xffff) + 1; + mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; + mode->vdisplay = (vtot & 0xffff) + 1; + mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; + mode->vsync_start = (vsync & 0xffff) + 1; + mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; + + drm_mode_set_name(mode); + drm_mode_set_crtcinfo(mode, 0); + + return mode; +} + +static void psb_intel_crtc_destroy(struct drm_crtc *crtc) +{ + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + +#ifndef CONFIG_MRST + if(psb_intel_crtc->crtc_state) + kfree(psb_intel_crtc->crtc_state); +#endif + drm_crtc_cleanup(crtc); + kfree(psb_intel_crtc); +} + +static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { + .dpms = psb_intel_crtc_dpms, + .mode_fixup = psb_intel_crtc_mode_fixup, + .mode_set = psb_intel_crtc_mode_set, + .mode_set_base = psb_intel_pipe_set_base, + .prepare = psb_intel_crtc_prepare, + .commit = psb_intel_crtc_commit, +}; + +static const struct drm_crtc_helper_funcs mrst_helper_funcs; + +const struct drm_crtc_funcs psb_intel_crtc_funcs = { +#ifndef CONFIG_MRST + .save = psb_intel_crtc_save, + .restore = psb_intel_crtc_restore, +#endif + .cursor_set = psb_intel_crtc_cursor_set, + .cursor_move = psb_intel_crtc_cursor_move, + .gamma_set = psb_intel_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = psb_intel_crtc_destroy, + .set_base = psb_kms_flip_set_base, +}; + + +void psb_intel_crtc_init(struct drm_device *dev, int pipe, + struct psb_intel_mode_device *mode_dev) +{ + struct psb_intel_crtc *psb_intel_crtc; + int i; + uint16_t *r_base, *g_base, *b_base; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter psb_intel_crtc_init \n"); +#endif /* PRINT_JLIU7 */ + + /* We allocate a extra array of drm_connector pointers + * for fbdev after the crtc */ + psb_intel_crtc = + kzalloc(sizeof(struct psb_intel_crtc) + + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), + GFP_KERNEL); + if (psb_intel_crtc == NULL) + return; + +#ifndef CONFIG_MRST + psb_intel_crtc->crtc_state = kzalloc(sizeof(struct psb_intel_crtc_state), + GFP_KERNEL); + if(!psb_intel_crtc->crtc_state) { + DRM_INFO("Crtc state error: No memory\n"); + kfree(psb_intel_crtc); + return; + } +#endif + + drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); + psb_intel_crtc->pipe = pipe; + + r_base = psb_intel_crtc->base.gamma_store; + g_base = r_base + 256; + b_base = g_base + 256; + for (i = 0; i < 256; i++) { + psb_intel_crtc->lut_r[i] = i; + psb_intel_crtc->lut_g[i] = i; + psb_intel_crtc->lut_b[i] = i; + r_base[i] = i << 8; + g_base[i] = i << 8; + b_base[i] = i << 8; + } + + psb_intel_crtc->mode_dev = mode_dev; + psb_intel_crtc->cursor_addr = 0; + psb_intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; + + if (IS_MRST(dev)) { + drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs); + } else { + drm_crtc_helper_add(&psb_intel_crtc->base, + &psb_intel_helper_funcs); + } + + /* Setup the array of drm_connector pointer array */ + psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; + psb_intel_crtc->mode_set.connectors = + (struct drm_connector **) (psb_intel_crtc + 1); + psb_intel_crtc->mode_set.num_connectors = 0; + +#if 0 /* JB: not drop, What should go in here? */ + if (i915_fbpercrtc) +#endif +} + +struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) +{ + struct drm_crtc *crtc = NULL; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + if (psb_intel_crtc->pipe == pipe) + break; + } + return crtc; +} + +int psb_intel_connector_clones(struct drm_device *dev, int type_mask) +{ + int index_mask = 0; + struct drm_connector *connector; + int entry = 0; + + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + struct psb_intel_output *psb_intel_output = + to_psb_intel_output(connector); + if (type_mask & (1 << psb_intel_output->type)) + index_mask |= (1 << entry); + entry++; + } + return index_mask; +} + +#if 0 /* JB: Should be per device */ +static void psb_intel_setup_outputs(struct drm_device *dev) +{ + struct drm_connector *connector; + + psb_intel_crt_init(dev); + + /* Set up integrated LVDS */ + if (IS_MOBILE(dev) && !IS_I830(dev)) + psb_intel_lvds_init(dev); + + if (IS_I9XX(dev)) { + psb_intel_sdvo_init(dev, SDVOB); + psb_intel_sdvo_init(dev, SDVOC); + } else + psb_intel_dvo_init(dev); + + if (IS_I9XX(dev) && !IS_I915G(dev)) + psb_intel_tv_init(dev); + + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + struct psb_intel_output *psb_intel_output = + to_psb_intel_output(connector); + struct drm_encoder *encoder = &psb_intel_output->enc; + int crtc_mask = 0, clone_mask = 0; + + /* valid crtcs */ + switch (psb_intel_output->type) { + case INTEL_OUTPUT_DVO: + case INTEL_OUTPUT_SDVO: + crtc_mask = ((1 << 0) | (1 << 1)); + clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | + (1 << INTEL_OUTPUT_DVO) | + (1 << INTEL_OUTPUT_SDVO)); + break; + case INTEL_OUTPUT_ANALOG: + crtc_mask = ((1 << 0) | (1 << 1)); + clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | + (1 << INTEL_OUTPUT_DVO) | + (1 << INTEL_OUTPUT_SDVO)); + break; + case INTEL_OUTPUT_LVDS: + crtc_mask = (1 << 1); + clone_mask = (1 << INTEL_OUTPUT_LVDS); + break; + case INTEL_OUTPUT_TVOUT: + crtc_mask = ((1 << 0) | (1 << 1)); + clone_mask = (1 << INTEL_OUTPUT_TVOUT); + break; + } + encoder->possible_crtcs = crtc_mask; + encoder->possible_clones = + psb_intel_connector_clones(dev, clone_mask); + } +} +#endif + +#if 0 /* JB: Rework framebuffer code into something none device specific */ +static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct psb_intel_framebuffer *psb_intel_fb = to_psb_intel_framebuffer(fb); + struct drm_device *dev = fb->dev; + + if (fb->fbdev) + intelfb_remove(dev, fb); + + drm_framebuffer_cleanup(fb); + drm_gem_object_unreference(fb->mm_private); + + kfree(psb_intel_fb); +} + +static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct drm_gem_object *object = fb->mm_private; + + return drm_gem_handle_create(file_priv, object, handle); +} + +static const struct drm_framebuffer_funcs psb_intel_fb_funcs = { + .destroy = psb_intel_user_framebuffer_destroy, + .create_handle = psb_intel_user_framebuffer_create_handle, +}; + +struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd + *mode_cmd, + void *mm_private) +{ + struct psb_intel_framebuffer *psb_intel_fb; + + psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL); + if (!psb_intel_fb) + return NULL; + + if (!drm_framebuffer_init(dev, &psb_intel_fb->base, &psb_intel_fb_funcs)) + return NULL; + + drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd); + + return &psb_intel_fb->base; +} + + +static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct + drm_device + *dev, + struct + drm_file + *filp, + struct + drm_mode_fb_cmd + *mode_cmd) +{ + struct drm_gem_object *obj; + + obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); + if (!obj) + return NULL; + + return psb_intel_framebuffer_create(dev, mode_cmd, obj); +} + +static int psb_intel_insert_new_fb(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_framebuffer *fb, + struct drm_mode_fb_cmd *mode_cmd) +{ + struct psb_intel_framebuffer *psb_intel_fb; + struct drm_gem_object *obj; + struct drm_crtc *crtc; + + psb_intel_fb = to_psb_intel_framebuffer(fb); + + mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); + + if (!obj) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + drm_gem_object_unreference(psb_intel_fb->base.mm_private); + drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj); + mutex_unlock(&dev->struct_mutex); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->fb == fb) { + struct drm_crtc_helper_funcs *crtc_funcs = + crtc->helper_private; + crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y); + } + } + return 0; +} + +static const struct drm_mode_config_funcs psb_intel_mode_funcs = { + .resize_fb = psb_intel_insert_new_fb, + .fb_create = psb_intel_user_framebuffer_create, + .fb_changed = intelfb_probe, +}; +#endif + +#if 0 /* Should be per device */ +void psb_intel_modeset_init(struct drm_device *dev) +{ + int num_pipe; + int i; + + drm_mode_config_init(dev); + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + + dev->mode_config.funcs = (void *) &psb_intel_mode_funcs; + + if (IS_I965G(dev)) { + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; + } else { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } + + /* set memory base */ + /* MRST and PSB should use BAR 2*/ + dev->mode_config.fb_base = + pci_resource_start(dev->pdev, 2); + + if (IS_MOBILE(dev) || IS_I9XX(dev)) + num_pipe = 2; + else + num_pipe = 1; + DRM_DEBUG("%d display pipe%s available.\n", + num_pipe, num_pipe > 1 ? "s" : ""); + + for (i = 0; i < num_pipe; i++) + psb_intel_crtc_init(dev, i); + + psb_intel_setup_outputs(dev); + + /* setup fbs */ + /* drm_initial_config(dev); */ +} +#endif + +void psb_intel_modeset_cleanup(struct drm_device *dev) +{ + drm_mode_config_cleanup(dev); +} + + +/* current intel driver doesn't take advantage of encoders + always give back the encoder for the connector +*/ +struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector) +{ + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + + return &psb_intel_output->enc; +} + +/* MRST_PLATFORM start */ + +#if DUMP_REGISTER +void dump_dc_registers(struct drm_device *dev) +{ + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + unsigned int i = 0; + + DRM_INFO("jliu7 dump_dc_registers\n"); + + + if (0x80000000 & REG_READ(0x70008)) { + for (i = 0x20a0; i < 0x20af; i += 4) { + DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n", i, (unsigned int) REG_READ(i)); + } + + for (i = 0xf014; i < 0xf047; i += 4) { + DRM_INFO + ("jliu7 pipe A dpll register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } + + for (i = 0x60000; i < 0x6005f; i += 4) { + DRM_INFO + ("jliu7 pipe A timing register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } + + for (i = 0x61140; i < 0x61143; i += 4) { + DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } + + for (i = 0x61180; i < 0x6123F; i += 4) { + DRM_INFO + ("jliu7 LVDS PORT register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } + + for (i = 0x61254; i < 0x612AB; i += 4) { + DRM_INFO("jliu7 BLC register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } + + for (i = 0x70000; i < 0x70047; i += 4) { + DRM_INFO + ("jliu7 PIPE A control register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } + + for (i = 0x70180; i < 0x7020b; i += 4) { + DRM_INFO("jliu7 display A control register=0x%x," + "value=%x\n", i, + (unsigned int) REG_READ(i)); + } + + for (i = 0x71400; i < 0x71403; i += 4) { + DRM_INFO + ("jliu7 VGA Display Plane Control register=0x%x," + "value=%x\n", i, (unsigned int) REG_READ(i)); + } + } + + if (0x80000000 & REG_READ(0x71008)) { + for (i = 0x61000; i < 0x6105f; i += 4) { + DRM_INFO + ("jliu7 pipe B timing register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } + + for (i = 0x71000; i < 0x71047; i += 4) { + DRM_INFO + ("jliu7 PIPE B control register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } + + for (i = 0x71180; i < 0x7120b; i += 4) { + DRM_INFO("jliu7 display B control register=0x%x," + "value=%x\n", i, + (unsigned int) REG_READ(i)); + } + } +#if 0 + for (i = 0x70080; i < 0x700df; i += 4) { + DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); + } +#endif + +} + +void dump_dsi_registers(struct drm_device *dev) +{ + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + unsigned int i = 0; + + DRM_INFO("jliu7 dump_dsi_registers\n"); + + for (i = 0xb000; i < 0xb064; i += 4) { + DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i, + (unsigned int) REG_READ(i)); + } + + i = 0xb104; + DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n", + i, (unsigned int) REG_READ(i)); +} +#endif /* DUMP_REGISTER */ + + +struct mrst_limit_t { + struct psb_intel_range_t dot, m, p1; +}; + +struct mrst_clock_t { + /* derived values */ + int dot; + int m; + int p1; +}; + +#define MRST_LIMIT_LVDS_100L 0 +#define MRST_LIMIT_LVDS_83 1 +#define MRST_LIMIT_LVDS_100 2 + +#define MRST_DOT_MIN 19750 +#define MRST_DOT_MAX 120000 +#define MRST_M_MIN_100L 20 +#define MRST_M_MIN_100 10 +#define MRST_M_MIN_83 12 +#define MRST_M_MAX_100L 34 +#define MRST_M_MAX_100 17 +#define MRST_M_MAX_83 20 +#define MRST_P1_MIN 2 +#define MRST_P1_MAX_0 7 +#define MRST_P1_MAX_1 8 + +static const struct mrst_limit_t mrst_limits[] = { + { /* MRST_LIMIT_LVDS_100L */ + .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, + .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L}, + .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1}, + }, + { /* MRST_LIMIT_LVDS_83L */ + .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, + .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83}, + .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0}, + }, + { /* MRST_LIMIT_LVDS_100 */ + .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, + .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100}, + .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1}, + }, +}; + +#define MRST_M_MIN 10 +static const u32 mrst_m_converts[] = { + 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C, + 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25, + 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c, +}; + +#define COUNT_MAX 0x10000000 +void mrstWaitForPipeDisable(struct drm_device *dev) +{ + int count, temp; + + /* FIXME JLIU7_PO */ + psb_intel_wait_for_vblank(dev); + return; + + /* Wait for for the pipe disable to take effect. */ + for (count = 0; count < COUNT_MAX; count++) { + temp = REG_READ(PIPEACONF); + if ((temp & PIPEACONF_PIPE_STATE) == 0) + break; + } + + if (count == COUNT_MAX) { +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n"); +#endif /* PRINT_JLIU7 */ + } else { +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n", + count); +#endif /* PRINT_JLIU7 */ + } +} + +void mrstWaitForPipeEnable(struct drm_device *dev) +{ + int count, temp; + + /* FIXME JLIU7_PO */ + psb_intel_wait_for_vblank(dev); + return; + + /* Wait for for the pipe disable to take effect. */ + for (count = 0; count < COUNT_MAX; count++) { + temp = REG_READ(PIPEACONF); + if ((temp & PIPEACONF_PIPE_STATE) == 1) + break; + } + + if (count == COUNT_MAX) { +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n"); +#endif /* PRINT_JLIU7 */ + } else { +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n", + count); +#endif /* PRINT_JLIU7 */ + } +} + +static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc) +{ + const struct mrst_limit_t *limit; + struct drm_device *dev = crtc->dev; + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + + if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) + || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { + if (dev_priv->sku_100L) + limit = &mrst_limits[MRST_LIMIT_LVDS_100L]; + if (dev_priv->sku_83) + limit = &mrst_limits[MRST_LIMIT_LVDS_83]; + if (dev_priv->sku_100) + limit = &mrst_limits[MRST_LIMIT_LVDS_100]; + } else { + limit = NULL; +#if PRINT_JLIU7 + DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n"); +#endif /* PRINT_JLIU7 */ + } + + return limit; +} + +/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ +static void mrst_clock(int refclk, struct mrst_clock_t *clock) +{ + clock->dot = (refclk * clock->m) / (14 * clock->p1); +} + +void mrstPrintPll(char *prefix, struct mrst_clock_t *clock) +{ +#if PRINT_JLIU7 + DRM_INFO + ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n", + prefix, clock->dot, clock->m, clock->p1); +#endif /* PRINT_JLIU7 */ +} + +/** + * Returns a set of divisors for the desired target clock with the given refclk, + * or FALSE. Divisor values are the actual divisors for + */ +static bool +mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk, + struct mrst_clock_t *best_clock) +{ + struct mrst_clock_t clock; + const struct mrst_limit_t *limit = mrst_limit(crtc); + int err = target; + + memset(best_clock, 0, sizeof(*best_clock)); + + for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { + for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; + clock.p1++) { + int this_err; + + mrst_clock(refclk, &clock); + + this_err = abs(clock.dot - target); + if (this_err < err) { + *best_clock = clock; + err = this_err; + } + } + } + DRM_DEBUG("mrstFindBestPLL err = %d.\n", err); + + return err != target; +} + +/** + * Sets the power management mode of the pipe and plane. + * + * This code should probably grow support for turning the cursor off and back + * on appropriately at the same time as we're turning the pipe off/on. + */ +static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + int pipe = psb_intel_crtc->pipe; + int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B; + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; + int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + u32 temp; + bool enabled; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n", + mode, pipe); +#endif /* PRINT_JLIU7 */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + /* XXX: When our outputs are all unaware of DPMS modes other than off + * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. + */ + switch (mode) { + case DRM_MODE_DPMS_ON: + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + /* Enable the DPLL */ + temp = REG_READ(dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + REG_WRITE(dpll_reg, temp); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + } + + /* Enable the pipe */ + temp = REG_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) == 0) + REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); + + /* Enable the plane */ + temp = REG_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { + REG_WRITE(dspcntr_reg, + temp | DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); + } + + psb_intel_crtc_load_lut(crtc); + + /* Give the overlay scaler a chance to enable + if it's on this pipe */ + /* psb_intel_crtc_dpms_video(crtc, true); TODO */ + break; + case DRM_MODE_DPMS_OFF: + /* Give the overlay scaler a chance to disable + * if it's on this pipe */ + /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ + + /* Disable the VGA plane that we never use */ + REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); + + /* Disable display plane */ + temp = REG_READ(dspcntr_reg); + if ((temp & DISPLAY_PLANE_ENABLE) != 0) { + REG_WRITE(dspcntr_reg, + temp & ~DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); + REG_READ(dspbase_reg); + } + + if (!IS_I9XX(dev)) { + /* Wait for vblank for the disable to take effect */ + psb_intel_wait_for_vblank(dev); + } + + /* Next, disable display pipes */ + temp = REG_READ(pipeconf_reg); + if ((temp & PIPEACONF_ENABLE) != 0) { + REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); + REG_READ(pipeconf_reg); + } + + /* Wait for for the pipe disable to take effect. */ + mrstWaitForPipeDisable(dev); + + temp = REG_READ(dpll_reg); + if ((temp & DPLL_VCO_ENABLE) != 0) { + REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); + REG_READ(dpll_reg); + } + + /* Wait for the clocks to turn off. */ + udelay(150); + break; + } + +#if DUMP_REGISTER + dump_dc_registers(dev); +#endif /* DUMP_REGISTER */ + + enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; + +#if 0 /* JB: Add vblank support later */ + if (enabled) + dev_priv->vblank_pipe |= (1 << pipe); + else + dev_priv->vblank_pipe &= ~(1 << pipe); +#endif + + psb_intel_crtc->dpms_mode = mode; + +#if 0 /* JB: Add sarea support later */ + if (!dev->primary->master) + return; + + master_priv = dev->primary->master->driver_priv; + if (!master_priv->sarea_priv) + return; + + switch (pipe) { + case 0: + master_priv->sarea_priv->planeA_w = + enabled ? crtc->mode.hdisplay : 0; + master_priv->sarea_priv->planeA_h = + enabled ? crtc->mode.vdisplay : 0; + break; + case 1: + master_priv->sarea_priv->planeB_w = + enabled ? crtc->mode.hdisplay : 0; + master_priv->sarea_priv->planeB_h = + enabled ? crtc->mode.vdisplay : 0; + break; + default: + DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); + break; + } +#endif + + /*Set FIFO Watermarks*/ + REG_WRITE(DSPARB, 0x3FFF); + REG_WRITE(DSPFW1, 0x3F88080A); + REG_WRITE(DSPFW2, 0x0b060808); + REG_WRITE(DSPFW3, 0x0); + REG_WRITE(DSPFW4, 0x08030404); + REG_WRITE(DSPFW5, 0x04040404); + REG_WRITE(DSPFW6, 0x78); + REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000); + /* Must write Bit 14 of the Chicken Bit Register */ + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +static int mrst_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + int pipe = psb_intel_crtc->pipe; + int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0; + int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B; + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; + int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; + int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; + int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; + int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; + int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; + int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; + int refclk = 0; + struct mrst_clock_t clock; + u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport; + bool ok, is_sdvo = false; + bool is_crt = false, is_lvds = false, is_tv = false; + bool is_mipi = false; + struct drm_mode_config *mode_config = &dev->mode_config; + struct psb_intel_output *psb_intel_output = NULL; + uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; + struct drm_encoder *encoder; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n"); +#endif /* PRINT_JLIU7 */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + memcpy(&psb_intel_crtc->saved_mode, mode, sizeof(struct drm_display_mode)); + memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode)); + + list_for_each_entry(encoder, &mode_config->encoder_list, head) { + + if (encoder->crtc != crtc) + continue; + + psb_intel_output = enc_to_psb_intel_output(encoder); + switch (psb_intel_output->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + is_sdvo = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; + break; + case INTEL_OUTPUT_ANALOG: + is_crt = true; + break; + case INTEL_OUTPUT_MIPI: + is_mipi = true; + break; + } + } + + if (is_lvds | is_mipi) { + /*FIXME JLIU7 Get panel power delay parameters from + config data */ + REG_WRITE(0x61208, 0x25807d0); + REG_WRITE(0x6120c, 0x1f407d0); + REG_WRITE(0x61210, 0x270f04); + } + + /* Disable the VGA plane that we never use */ + REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); + + /* Disable the panel fitter if it was on our pipe */ + if (psb_intel_panel_fitter_pipe(dev) == pipe) + REG_WRITE(PFIT_CONTROL, 0); + + REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); + + if (psb_intel_output) + drm_connector_property_get_value(&psb_intel_output->base, + dev->mode_config.scaling_mode_property, &scalingType); + + if (scalingType == DRM_MODE_SCALE_NO_SCALE) { + /*Moorestown doesn't have register support for centering so we need to + mess with the h/vblank and h/vsync start and ends to get centering*/ + int offsetX = 0, offsetY = 0; + + offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; + offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; + + REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) | + ((adjusted_mode->crtc_vtotal - 1) << 16)); + REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - offsetX - 1) | + ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16)); + REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - offsetX - 1) | + ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16)); + REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - offsetY - 1) | + ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16)); + REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - offsetY - 1) | + ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16)); + } else { + REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | + ((adjusted_mode->crtc_vtotal - 1) << 16)); + REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | + ((adjusted_mode->crtc_hblank_end - 1) << 16)); + REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | + ((adjusted_mode->crtc_hsync_end - 1) << 16)); + REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | + ((adjusted_mode->crtc_vblank_end - 1) << 16)); + REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | + ((adjusted_mode->crtc_vsync_end - 1) << 16)); + } + + /* Flush the plane changes */ + { + struct drm_crtc_helper_funcs *crtc_funcs = + crtc->helper_private; + crtc_funcs->mode_set_base(crtc, x, y, old_fb); + } + + /* setup pipeconf */ + pipeconf = REG_READ(pipeconf_reg); + + /* Set up the display plane register */ + dspcntr = REG_READ(dspcntr_reg); + dspcntr |= DISPPLANE_GAMMA_ENABLE; + + if (pipe == 0) + dspcntr |= DISPPLANE_SEL_PIPE_A; + else + dspcntr |= DISPPLANE_SEL_PIPE_B; + + dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE; + dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE; + + if (is_mipi) + goto mrst_crtc_mode_set_exit; + + if (dev_priv->sku_100L) + refclk = 100000; + else if (dev_priv->sku_83) + refclk = 166000; + else if (dev_priv->sku_100) + refclk = 200000; + + dpll = 0; /*BIT16 = 0 for 100MHz reference */ + + ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock); + + if (!ok) { +#if PRINT_JLIU7 + DRM_INFO + ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n"); +#endif /* PRINT_JLIU7 */ + } else { +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d," + "m = %x, p1 = %x. \n", clock.dot, clock.m, + clock.p1); +#endif /* PRINT_JLIU7 */ + } + + fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8; + + dpll |= DPLL_VGA_MODE_DIS; + + + dpll |= DPLL_VCO_ENABLE; + + if (is_lvds) + dpll |= DPLLA_MODE_LVDS; + else + dpll |= DPLLB_MODE_DAC_SERIAL; + + if (is_sdvo) { + int sdvo_pixel_multiply = + adjusted_mode->clock / mode->clock; + + dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= + (sdvo_pixel_multiply - + 1) << SDVO_MULTIPLIER_SHIFT_HIRES; + } + + + /* compute bitmask from p1 value */ + dpll |= (1 << (clock.p1 - 2)) << 17; + + dpll |= DPLL_VCO_ENABLE; + +#if PRINT_JLIU7 + mrstPrintPll("chosen", &clock); +#endif /* PRINT_JLIU7 */ + +#if 0 + if (!xf86ModesEqual(mode, adjusted_mode)) { + xf86DrvMsg(pScrn->scrnIndex, X_INFO, + "Adjusted mode for pipe %c:\n", + pipe == 0 ? 'A' : 'B'); + xf86PrintModeline(pScrn->scrnIndex, mode); + } + i830PrintPll("chosen", &clock); +#endif + + if (dpll & DPLL_VCO_ENABLE) { + REG_WRITE(fp_reg, fp); + REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); + REG_READ(dpll_reg); +/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */ + udelay(150); + } + + /* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. + */ + if (is_lvds) { + + /*lvdsport = 0x803003c0;*/ + /*lvdsport = 0x813003c0;*/ + lvdsport = dev_priv->gct_data.Panel_Port_Control; + + REG_WRITE(LVDS, lvdsport); + } + + REG_WRITE(fp_reg, fp); + REG_WRITE(dpll_reg, dpll); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + + /* write it again -- the BIOS does, after all */ + REG_WRITE(dpll_reg, dpll); + REG_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + + REG_WRITE(pipeconf_reg, pipeconf); + REG_READ(pipeconf_reg); + + /* Wait for for the pipe enable to take effect. */ + mrstWaitForPipeEnable(dev); + + REG_WRITE(dspcntr_reg, dspcntr); + psb_intel_wait_for_vblank(dev); + +mrst_crtc_mode_set_exit: + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + + return 0; +} + + +static const struct drm_crtc_helper_funcs mrst_helper_funcs = { + .dpms = mrst_crtc_dpms, + .mode_fixup = psb_intel_crtc_mode_fixup, + .mode_set = mrst_crtc_mode_set, + .mode_set_base = psb_intel_pipe_set_base, + .prepare = psb_intel_crtc_prepare, + .commit = psb_intel_crtc_commit, +}; + +/* MRST_PLATFORM end */ diff --git a/drivers/gpu/drm/psb/psb_intel_display.h b/drivers/gpu/drm/psb/psb_intel_display.h new file mode 100644 index 0000000..dcb79d4 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_display.h @@ -0,0 +1,31 @@ + +/* copyright (c) 2008, Intel Corporation + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ + +#ifndef _INTEL_DISPLAY_H_ +#define _INTEL_DISPLAY_H_ + +bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); + +#endif diff --git a/drivers/gpu/drm/psb/psb_intel_drv.h b/drivers/gpu/drm/psb/psb_intel_drv.h new file mode 100644 index 0000000..a64ce59 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_drv.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2006 Dave Airlie + * Copyright (c) 2007 Intel Corporation + * Jesse Barnes + */ +#ifndef __INTEL_DRV_H__ +#define __INTEL_DRV_H__ + +#include +#include +#include +#include + +#include + +/* + * MOORESTOWN defines + */ +#define MRST_I2C 0 + +#define DUMP_REGISTER 0 +#define MRST_24BIT_LVDS 1 +#define MRST_24BIT_DOT_1 0 +#define MRST_24BIT_WA 0 + +#define PRINT_JLIU7 0 +#define DELAY_TIME1 2000 /* 1000 = 1ms */ + +/* + * Display related stuff + */ + +/* store information about an Ixxx DVO */ +/* The i830->i865 use multiple DVOs with multiple i2cs */ +/* the i915, i945 have a single sDVO i2c bus - which is different */ +#define MAX_OUTPUTS 6 +/* maximum connectors per crtcs in the mode set */ +#define INTELFB_CONN_LIMIT 4 + +#define INTEL_I2C_BUS_DVO 1 +#define INTEL_I2C_BUS_SDVO 2 + +/* these are outputs from the chip - integrated only + * external chips are via DVO or SDVO output */ +#define INTEL_OUTPUT_UNUSED 0 +#define INTEL_OUTPUT_ANALOG 1 +#define INTEL_OUTPUT_DVO 2 +#define INTEL_OUTPUT_SDVO 3 +#define INTEL_OUTPUT_LVDS 4 +#define INTEL_OUTPUT_TVOUT 5 +#define INTEL_OUTPUT_MIPI 6 + +#define INTEL_DVO_CHIP_NONE 0 +#define INTEL_DVO_CHIP_LVDS 1 +#define INTEL_DVO_CHIP_TMDS 2 +#define INTEL_DVO_CHIP_TVOUT 4 + +struct opregion_header { + u8 signature[16]; + u32 size; + u32 opregion_ver; + u8 bios_ver[32]; + u8 vbios_ver[16]; + u8 driver_ver[16]; + u32 mboxes; + u8 reserved[164]; +}__attribute__((packed)); + +struct opregion_apci { + /*FIXME: add it later*/ +}__attribute__((packed)); + +struct opregion_swsci { + /*FIXME: add it later*/ +}__attribute__((packed)); + +struct opregion_acpi { + /*FIXME: add it later*/ +}__attribute__((packed)); + +struct psb_intel_opregion { + struct opregion_header * header; + struct opregion_acpi * acpi; + struct opregion_swsci * swsci; + struct opregion_asle * asle; + int enabled; +}; + +/** + * Hold information useally put on the device driver privates here, + * since it needs to be shared across multiple of devices drivers privates. + */ +struct psb_intel_mode_device { + + /* + * Abstracted memory manager operations + */ + void *(*bo_from_handle) (struct drm_device *dev, + struct drm_file *file_priv, + unsigned int handle); + size_t(*bo_size) (struct drm_device *dev, void *bo); + size_t(*bo_offset) (struct drm_device *dev, void *bo); + int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo); + int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo); + + /* + * Cursor + */ + int cursor_needs_physical; + + /* + * LVDS info + */ + int backlight_duty_cycle; /* restore backlight to this value */ + bool panel_wants_dither; + struct drm_display_mode *panel_fixed_mode; + struct drm_display_mode *vbt_mode; /* if any */ + + uint32_t saveBLC_PWM_CTL; +}; + +struct psb_intel_i2c_chan { + /* for getting at dev. private (mmio etc.) */ + struct drm_device *drm_dev; + u32 reg; /* GPIO reg */ + struct i2c_adapter adapter; + struct i2c_algo_bit_data algo; + u8 slave_addr; +}; + +struct psb_intel_output { + struct drm_connector base; + + struct drm_encoder enc; + int type; + struct psb_intel_i2c_chan *i2c_bus; /* for control functions */ + struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */ + bool load_detect_temp; + void *dev_priv; + + struct psb_intel_mode_device *mode_dev; + +}; + +struct psb_intel_crtc_state { + uint32_t saveDSPCNTR; + uint32_t savePIPECONF; + uint32_t savePIPESRC; + uint32_t saveDPLL; + uint32_t saveFP0; + uint32_t saveFP1; + uint32_t saveHTOTAL; + uint32_t saveHBLANK; + uint32_t saveHSYNC; + uint32_t saveVTOTAL; + uint32_t saveVBLANK; + uint32_t saveVSYNC; + uint32_t saveDSPSTRIDE; + uint32_t saveDSPSIZE; + uint32_t saveDSPPOS; + uint32_t saveDSPBASE; + uint32_t savePalette[256]; +}; + +struct psb_intel_crtc { + struct drm_crtc base; + int pipe; + int plane; + uint32_t cursor_addr; + u8 lut_r[256], lut_g[256], lut_b[256]; + int dpms_mode; + struct psb_intel_framebuffer *fbdev_fb; + /* a mode_set for fbdev users on this crtc */ + struct drm_mode_set mode_set; + + /* current bo we scanout from */ + void *scanout_bo; + + /* current bo we cursor from */ + void *cursor_bo; + + struct drm_display_mode saved_mode; + struct drm_display_mode saved_adjusted_mode; + + struct psb_intel_mode_device *mode_dev; + +/*FIXME: Workaround to avoid MRST block.*/ +#ifndef CONFIG_MRST + /** + * Saved Crtc HW states + */ + struct psb_intel_crtc_state * crtc_state; +#endif +}; + +#define to_psb_intel_crtc(x) container_of(x, struct psb_intel_crtc, base) +#define to_psb_intel_output(x) container_of(x, struct psb_intel_output, base) +#define enc_to_psb_intel_output(x) container_of(x, struct psb_intel_output, enc) +#define to_psb_intel_framebuffer(x) container_of(x, struct psb_intel_framebuffer, base) + +struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev, + const u32 reg, const char *name); +void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan); +int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output); +extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output); + +extern void psb_intel_crtc_init(struct drm_device *dev, int pipe, + struct psb_intel_mode_device *mode_dev); +extern void psb_intel_crt_init(struct drm_device *dev); +extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device); +extern void psb_intel_dvo_init(struct drm_device *dev); +extern void psb_intel_tv_init(struct drm_device *dev); +extern void psb_intel_lvds_init(struct drm_device *dev, + struct psb_intel_mode_device *mode_dev); +extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level); +extern void mrst_lvds_init(struct drm_device *dev, + struct psb_intel_mode_device *mode_dev); +extern void mrst_dsi_init(struct drm_device *dev, + struct psb_intel_mode_device *mode_dev); + +extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc); +extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); +extern void psb_intel_encoder_commit(struct drm_encoder *encoder); + +extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector + *connector); + +extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, + struct drm_crtc *crtc); +extern void psb_intel_wait_for_vblank(struct drm_device *dev); +extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, + int pipe); +extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, + int sdvoB); +extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector); +extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, + int enable); +extern int intelfb_probe(struct drm_device *dev); +extern int intelfb_remove(struct drm_device *dev, + struct drm_framebuffer *fb); +extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device + *dev, struct + drm_mode_fb_cmd + *mode_cmd, + void *mm_private); +#endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/psb/psb_intel_dsi.c b/drivers/gpu/drm/psb/psb_intel_dsi.c new file mode 100644 index 0000000..bcfee62 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_dsi.c @@ -0,0 +1,1798 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * jim liu + */ + +#include +#include +#include + +#define DRM_MODE_ENCODER_MIPI 5 +#define DRM_MODE_CONNECTOR_MIPI 13 + +#if DUMP_REGISTER +extern void dump_dsi_registers(struct drm_device *dev); +#endif /* DUMP_REGISTER */ + +int dsi_backlight; /* restore backlight to this value */ + +/** + * Returns the maximum level of the backlight duty cycle field. + */ +static u32 mrst_dsi_get_max_backlight(struct drm_device *dev) +{ +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n"); +#endif /* PRINT_JLIU7 */ + + return BRIGHTNESS_MAX_LEVEL; + +/* FIXME jliu7 need to revisit */ +} + +/** + * Sets the power state for the panel. + */ +static void mrst_dsi_set_power(struct drm_device *dev, + struct psb_intel_output *output, bool on) +{ + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + u32 pp_status; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_set_power \n"); +#endif /* PRINT_JLIU7 */ + /* + * The DIS device must be ready before we can change power state. + */ + if (!dev_priv->dsi_device_ready) + { + return; + } + + /* + * We don't support dual DSI yet. May be in POR in the future. + */ + if (dev_priv->dual_display) + { + return; + } + + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + if (on) { + if (dev_priv->dpi & (!dev_priv->dpi_panel_on)) + { + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrst_dsi_set_power dpi = on \n"); +#endif /* PRINT_JLIU7 */ + REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON); +#if 0 /*FIXME JLIU7 */ + REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_ON_DATA); + REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_ON); +#endif /*FIXME JLIU7 */ + + dev_priv->dpi_panel_on = true; + + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | + POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while ((pp_status & (PP_ON | PP_READY)) == PP_READY); + } + else if ((!dev_priv->dpi) & (!dev_priv->dbi_panel_on)) + { +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrst_dsi_set_power dbi = on \n"); +#endif /* PRINT_JLIU7 */ + + dev_priv->DBI_CB_pointer = 0; + /* exit sleep mode */ + *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = exit_sleep_mode; + +#if 0 /*FIXME JLIU7 */ + /* Check MIPI Adatper command registers */ + while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0); +#endif /*FIXME JLIU7 */ + + /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/ + REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1); + REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0); + + /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another + command. This delay allows the supply voltages and clock circuits to stabilize */ + udelay(5000); + + dev_priv->DBI_CB_pointer = 0; + + /* set display on */ + *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = set_display_on ; + +#if 0 /*FIXME JLIU7 */ + /* Check MIPI Adatper command registers */ + while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0); +#endif /*FIXME JLIU7 */ + + /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/ + REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1); + REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0); + + dev_priv->dbi_panel_on = true; + } +/*FIXME JLIU7 */ +/* Need to figure out how to control the MIPI panel power on sequence*/ + + } + else + { +/*FIXME JLIU7 */ +/* Need to figure out how to control the MIPI panel power down sequence*/ + /* + * Only save the current backlight value if we're going from + * on to off. + */ + if (dev_priv->dpi & dev_priv->dpi_panel_on) + { +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrst_dsi_set_power dpi = off \n"); +#endif /* PRINT_JLIU7 */ + + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & + ~POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while (pp_status & PP_ON); + +#if 0 /*FIXME JLIU7 */ + REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_OFF_DATA); + REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_OFF); +#endif /*FIXME JLIU7 */ + REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN); + dev_priv->dpi_panel_on = false; + } + else if ((!dev_priv->dpi) & dev_priv->dbi_panel_on) + { +#if PRINT_JLIU7 + DRM_INFO("JLIU7 mrst_dsi_set_power dbi = off \n"); +#endif /* PRINT_JLIU7 */ + dev_priv->DBI_CB_pointer = 0; + /* enter sleep mode */ + *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = enter_sleep_mode; + + /* Check MIPI Adatper command registers */ + while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0); + + /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/ + REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1); + REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0); + dev_priv->dbi_panel_on = false; + } + } + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_dpms \n"); +#endif /* PRINT_JLIU7 */ + + if (mode == DRM_MODE_DPMS_ON) + mrst_dsi_set_power(dev, output, true); + else + mrst_dsi_set_power(dev, output, false); + + /* XXX: We never power down the DSI pairs. */ +} + +static void mrst_dsi_save(struct drm_connector *connector) +{ +#if 0 /* JB: Disable for drop */ + struct drm_device *dev = connector->dev; + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_save \n"); +#endif /* PRINT_JLIU7 */ + + dev_priv->savePP_ON = REG_READ(LVDSPP_ON); + dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF); + dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL); + dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE); + dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); + dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & + BACKLIGHT_DUTY_CYCLE_MASK); + + /* + * make backlight to full brightness + */ + dsi_backlight = mrst_dsi_get_max_backlight(dev); +#endif +} + +static void mrst_dsi_restore(struct drm_connector *connector) +{ +#if 0 /* JB: Disable for drop */ + struct drm_device *dev = connector->dev; + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_restore \n"); +#endif /* PRINT_JLIU7 */ + + REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); + REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON); + REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF); + REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE); + REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); + if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) + mrst_dsi_set_power(dev, true); + else + mrst_dsi_set_power(dev, false); +#endif +} + +static void mrst_dsi_prepare(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); + struct psb_intel_mode_device *mode_dev = output->mode_dev; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_prepare \n"); +#endif /* PRINT_JLIU7 */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); + mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & + BACKLIGHT_DUTY_CYCLE_MASK); + + mrst_dsi_set_power(dev, output, false); + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +static void mrst_dsi_commit( struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); + struct psb_intel_mode_device *mode_dev = output->mode_dev; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_commit \n"); +#endif /* PRINT_JLIU7 */ + + if (mode_dev->backlight_duty_cycle == 0) + mode_dev->backlight_duty_cycle = + mrst_dsi_get_max_backlight(dev); + + mrst_dsi_set_power(dev, output, true); + +#if DUMP_REGISTER + dump_dsi_registers(dev); +#endif /* DUMP_REGISTER */ +} + +#if 0 +/* ************************************************************************* *\ +FUNCTION: GetHS_TX_timeoutCount + ` +DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock + (txbyteclkhs). To timeout this timer 1+ of the above said value is recommended. + + In non-burst mode, Value greater than one DPI frame time in byte clock(txbyteclkhs). + To timeout this timer 1+ of the above said value is recommended. + +\* ************************************************************************* */ +static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv) +{ + + u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0; + + /* Total pixels need to be transfer per line*/ + HTotalPixel = (dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch) * dev_priv->laneCount + dev_priv->HactiveArea; + + /* byte count = (pixel count * bits per pixel) / 8 */ + HTOT_count = (HTotalPixel * dev_priv->bpp) / 8; + + if (dev_priv->videoModeFormat == BURST_MODE) + { + timeoutCount = HTOT_count + 1; +#if 1 /*FIXME remove it after power-on */ + VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch + + dev_priv->VsyncWidth; + /* timeoutCount = (HTOT_count * VTOT_count) + 1; */ + timeoutCount = (HTOT_count * VTOT_count) + 1; +#endif + } + else + { + VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch + + dev_priv->VsyncWidth; + /* timeoutCount = (HTOT_count * VTOT_count) + 1; */ + timeoutCount = (HTOT_count * VTOT_count) + 1; + } + + return timeoutCount & 0xFFFF; +} + +/* ************************************************************************* *\ +FUNCTION: GetLP_RX_timeoutCount + +DESCRIPTION: The timeout value is protocol specific. Time out value is calculated + from txclkesc(50ns). + + Minimum value = + Time to send one Trigger message = 4 X txclkesc [Escape mode entry sequence) + + 8-bit trigger message (2x8xtxclkesc) + +1 txclksesc [stop_state] + = 21 X txclkesc [ 15h] + + Maximum Value = + Time to send a long packet with maximum payload data + = 4 X txclkesc [Escape mode entry sequence) + + 8-bit Low power data transmission Command (2x8xtxclkesc) + + packet header [ 4X8X2X txclkesc] + +payload [ nX8X2Xtxclkesc] + +CRC[2X8X2txclkesc] + +1 txclksesc [stop_state] + = 117 txclkesc +n[payload in terms of bytes]X16txclkesc. + +\* ************************************************************************* */ +static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv) +{ + + u32 timeoutCount = 0; + + if (dev_priv->config_phase) + { + /* Assuming 256 byte DDB data.*/ + timeoutCount = 117 + 256 * 16; + } + else + { + /* For DPI video only mode use the minimum value.*/ + timeoutCount = 0x15; +#if 1 /*FIXME remove it after power-on */ + /* Assuming 256 byte DDB data.*/ + timeoutCount = 117 + 256 * 16; +#endif + } + + return timeoutCount; +} +#endif // #if 0 - to avoid warnings + +/* ************************************************************************* *\ +FUNCTION: GetHSA_Count + +DESCRIPTION: Shows the horizontal sync value in terms of byte clock + (txbyteclkhs) + Minimum HSA period should be sufficient to transmit a hsync start short + packet(4 bytes) + i) For Non-burst Mode with sync pulse, Min value � 4 in decimal [plus + an optional 6 bytes for a zero payload blanking packet]. But if + the value is less than 10 but more than 4, then this count will + be added to the HBP�s count for one lane. + ii) For Non-Burst Sync Event & Burst Mode, there is no HSA, so you + can program this to zero. If you program this register, these + byte values will be added to HBP. + iii) For Burst mode of operation, normally the values programmed in + terms of byte clock are based on the principle - time for transfering + HSA in Burst mode is the same as in non-bust mode. +\* ************************************************************************* */ +static u32 GetHSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 HSA_count; + u32 HSA_countX8; + + /* byte clock count = (pixel clock count * bits per pixel) /8 */ + /*HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp; + + if (dev_priv->videoModeFormat == BURST_MODE) + { + HSA_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated; + } + + HSA_count = HSA_countX8 / 8;*/ + + /* since mode_set already computed Display Controller timings, + * read the register and compute mipi timings. + */ + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + HSA_countX8 = REG_READ(HSYNC_A); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else + HSA_countX8 = dev_priv->saveHSYNC_A; + + /* Get the hsync pulse width */ + HSA_count = ((HSA_countX8 & 0xffff0000)>>16) - (HSA_countX8 & 0xffff); + /* compute HSA according to equation: + (hsync_end - hsync_start) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/ + HSA_count = (HSA_count * dev_priv->bpp)/(2 * 8 * 2); + if (HSA_count < 4) /* minimum value of 4 */ + HSA_count = 4; + + return HSA_count; +} + +/* ************************************************************************* *\ +FUNCTION: GetHBP_Count + +DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs. + Minimum HBP period should be sufficient to transmit a �hsync end short + packet(4 bytes) + Blanking packet overhead(6 bytes) + RGB packet header(4 bytes)� + For Burst mode of operation, normally the values programmed in terms of + byte clock are based on the principle - time for transfering HBP + in Burst mode is the same as in non-bust mode. + + Min value � 14 in decimal [ accounted with zero payload for blanking packet] for one lane. + Max value � any value greater than 14 based on DPI resolution +\* ************************************************************************* */ +static u32 GetHBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 HBP_count; + u32 HBE, HSE; + + /* byte clock count = (pixel clock count * bits per pixel) /8 */ + /*HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp; + + if (dev_priv->videoModeFormat == BURST_MODE) + { + HBP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated; + } + + HBP_count = HBP_countX8 / 8;*/ + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + HBE = (REG_READ(HBLANK_A) & 0xffff0000) >> 16; + HSE = (REG_READ(HSYNC_A) & 0xffff0000) >> 16; + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + HBE = (dev_priv->saveHBLANK_A & 0xffff0000) >> 16; + HSE = (dev_priv->saveHSYNC_A & 0xffff0000) >> 16; + } + + /* Get the hsync pulse width */ + HBP_count = HBE - HSE; + /* compute HSA according to equation: + * (hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/ + HBP_count = (HBP_count * dev_priv->bpp)/(2 * 8 * 2); + if (HBP_count < 8) /* minimum value of 8 */ + HBP_count = 8; + + return HBP_count; +} + +/* ************************************************************************* *\ +FUNCTION: GetHFP_Count + +DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs. + Minimum HFP period should be sufficient to transmit �RGB Data packet + footer(2 bytes) + Blanking packet overhead(6 bytes)� for non burst mode. + + For burst mode, Minimum HFP period should be sufficient to transmit + Blanking packet overhead(6 bytes)� + + For Burst mode of operation, normally the values programmed in terms of + byte clock are based on the principle - time for transfering HFP + in Burst mode is the same as in non-bust mode. + + Min value � 8 in decimal for non-burst mode [accounted with zero payload + for blanking packet] for one lane. + Min value � 6 in decimal for burst mode for one lane. + + Max value � any value greater than the minimum vaue based on DPI resolution +\* ************************************************************************* */ +static u32 GetHFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 HFP_count; + u32 HBS, HSS; + + /* byte clock count = (pixel clock count * bits per pixel) /8 */ + /*HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp; + + if (dev_priv->videoModeFormat == BURST_MODE) + { + HFP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated; + } + + HFP_count = HFP_countX8 / 8;*/ + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + HBS = REG_READ(HBLANK_A) & 0xffff; + HSS = REG_READ(HSYNC_A) & 0xffff; + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + HBS = dev_priv->saveHBLANK_A & 0xffff; + HSS = dev_priv->saveHSYNC_A & 0xffff; + } + + /* Get the hsync pulse width */ + HFP_count = HSS - HBS; + /* compute HSA according to equation: + * (hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/ + HFP_count = (HFP_count * dev_priv->bpp)/(2 * 8 * 2); + if (HFP_count < 8) /* minimum value of 8 */ + HFP_count = 8; + + return HFP_count; +} + +/* ************************************************************************* *\ +FUNCTION: GetHAdr_Count + +DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs. + In Non Burst Mode, Count equal to RGB word count value + + In Burst Mode, RGB pixel packets are time-compressed, leaving more time + during a scan line for LP mode (saving power) or for multiplexing + other transmissions onto the DSI link. Hence, the count equals the + time in txbyteclkhs for sending time compressed RGB pixels plus + the time needed for moving to power save mode or the time needed + for secondary channel to use the DSI link. + + But if the left out time for moving to low power mode is less than + 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and + 6txbyteclkhs for a blanking packet with zero payload], then + this count will be added to the HFP's count for one lane. + + Min value � 8 in decimal for non-burst mode [accounted with zero payload + for blanking packet] for one lane. + Min value � 6 in decimal for burst mode for one lane. + + Max value � any value greater than the minimum vaue based on DPI resolution +\* ************************************************************************* */ +static u32 GetHAdr_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 HAdr_count; + u32 Hactive; + + /* byte clock count = (pixel clock count * bits per pixel) /8 */ + /*HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp; + + if (dev_priv->videoModeFormat == BURST_MODE) + { + HAdr_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated; + } + + HAdr_count = HAdr_countX8 / 8;*/ + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + Hactive = (REG_READ(HTOTAL_A) & 0x0fff) + 1; + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else + Hactive = (dev_priv->saveHTOTAL_A & 0x0fff) + 1; + + /* compute HAdr according to equation: + * (hactive * 24 bpp/8) / 2 lanes)*/ + + HAdr_count = (Hactive * dev_priv->bpp/8) / 2; + + return HAdr_count; +} + +/* ************************************************************************* *\ +FUNCTION: GetVSA_Count + +DESCRIPTION: Shows the vertical sync value in terms of lines + +\* ************************************************************************* */ +static u32 GetVSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 VSA_count; + u32 VSA_countX8; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + VSA_countX8 = REG_READ(VSYNC_A); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else + VSA_countX8 = dev_priv->saveVSYNC_A; + + /* Get the vsync pulse width */ + VSA_count = ((VSA_countX8 & 0xffff0000)>>16) - (VSA_countX8 & 0xffff); + + if (VSA_count < 2) /* minimum value of 2 */ + VSA_count = 2; + + return VSA_count; +} + +/* ************************************************************************* *\ + * FUNCTION: GetVBP_Count + * + * DESCRIPTION: Shows the vertical back porch value in lines. + * +\* ************************************************************************* */ +static u32 GetVBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 VBP_count; + u32 VBE, VSE; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + VBE = (REG_READ(VBLANK_A) & 0xffff0000) >> 16; + VSE = (REG_READ(VSYNC_A) & 0xffff0000) >> 16; + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + VBE = (dev_priv->saveVBLANK_A & 0xffff0000) >> 16; + VSE = (dev_priv->saveVSYNC_A & 0xffff0000) >> 16; + } + + /* Get the hsync pulse width */ + VBP_count = VBE - VSE; + + if (VBP_count < 2) /* minimum value of 2 */ + VBP_count = 2; + + return VBP_count; +} +/* ************************************************************************* *\ + * FUNCTION: GetVFP_Count + * + * DESCRIPTION: Shows the vertical front porch value in terms of lines. + * +\* ************************************************************************* */ +static u32 GetVFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 VFP_count; + u32 VBS, VSS; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + VBS = REG_READ(VBLANK_A) & 0xffff; + VSS = REG_READ(VSYNC_A) & 0xffff; + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + VBS = dev_priv->saveVBLANK_A & 0xffff; + VSS = dev_priv->saveVSYNC_A & 0xffff; + } + + /* Get the hsync pulse width */ + VFP_count = VSS - VBS; + + if (VFP_count < 2) /* minimum value of 2 */ + VFP_count = 2; + + return VFP_count; +} + +#if 0 +/* ************************************************************************* *\ +FUNCTION: GetHighLowSwitchCount + +DESCRIPTION: High speed to low power or Low power to high speed switching time + in terms byte clock (txbyteclkhs). This value is based on the + byte clock (txbyteclkhs) and low power clock frequency (txclkesc) + + Typical value - Number of byte clocks required to switch from low power mode + to high speed mode after "txrequesths" is asserted. + + The worst count value among the low to high or high to low switching time + in terms of txbyteclkhs has to be programmed in this register. + + Usefull Formulae: + DDR clock period = 2 times UI + txbyteclkhs clock = 8 times UI + Tlpx = 1 / txclkesc + CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec) + LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI] + CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec) + Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx] +\* ************************************************************************* */ +static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount; + +/* ************************************************************************* *\ + CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec) + Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx] + + Tlpx = 50 ns, Using max txclkesc (20MHz) + + txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns + UI_period = 500 / dev_priv->DDR_Clock; in ns + + HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx + = 9000 / dev_priv->DDR_Clock + 200; + + HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period + = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock) + = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + +\* ************************************************************************* */ + HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1; + +/* ************************************************************************* *\ + CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec) + LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI] + + LP_to_HS = 10 * UI_period + 5 * Tlpx = + = 5000 / dev_priv->DDR_Clock + 250; + + LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period + = (5000 / dev_priv->DDR_Clock + 250) / (4000 / dev_priv->DDR_Clock) + = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + +\* ************************************************************************* */ + LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1; + + if (HighToLowSwitchCount > LowToHighSwitchCount) + { + HighLowSwitchCount = HighToLowSwitchCount; + } + else + { + HighLowSwitchCount = LowToHighSwitchCount; + } + + + /* FIXME jliu need to fine tune the above formulae and remove the following after power on */ + if (HighLowSwitchCount < 0x1f) + HighLowSwitchCount = 0x1f; + + return HighLowSwitchCount; +} + +/* ************************************************************************* *\ +FUNCTION: mrst_gen_long_write + ` +DESCRIPTION: + +\* ************************************************************************* */ +static void mrst_gen_long_write(struct drm_device *dev, u32 *data, u16 wc,u8 vc) +{ + u32 gen_data_reg = HS_GEN_DATA_REG; + u32 gen_ctrl_reg = HS_GEN_CTRL_REG; + u32 date_full_bit = HS_DATA_FIFO_FULL; + u32 control_full_bit = HS_CTRL_FIFO_FULL; + u16 wc_saved = wc; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_gen_long_write \n"); +#endif /* PRINT_JLIU7 */ + + /* sanity check */ + if (vc > 4) + { + DRM_ERROR(KERN_ERR "MIPI Virtual channel Can't greater than 4. \n"); + return; + } + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + if (0) /* FIXME JLIU7 check if it is in LP*/ + { + gen_data_reg = LP_GEN_DATA_REG; + gen_ctrl_reg = LP_GEN_CTRL_REG; + date_full_bit = LP_DATA_FIFO_FULL; + control_full_bit = LP_CTRL_FIFO_FULL; + } + + while (wc >= 4) + { + /* Check if MIPI IP generic data fifo is not full */ + while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit) == date_full_bit); + + /* write to data buffer */ + REG_WRITE(gen_data_reg, *data); + + wc -= 4; + data ++; + } + + switch (wc) + { + case 1: + REG_WRITE8(gen_data_reg, *((u8 *)data)); + break; + case 2: + REG_WRITE16(gen_data_reg, *((u16 *)data)); + break; + case 3: + REG_WRITE16(gen_data_reg, *((u16 *)data)); + data = (u32*)((u8*) data + 2); + REG_WRITE8(gen_data_reg, *((u8 *)data)); + break; + } + + /* Check if MIPI IP generic control fifo is not full */ + while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit) == control_full_bit); + /* write to control buffer */ + REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6)); + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +/* ************************************************************************* *\ +FUNCTION: mrst_init_HIMAX_MIPI_bridge + ` +DESCRIPTION: + +\* ************************************************************************* */ +static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev) +{ + u32 gen_data[2]; + u16 wc = 0; + u8 vc =0; + u32 gen_data_intel = 0x200105; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n"); +#endif /* PRINT_JLIU7 */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + /* exit sleep mode */ + wc = 0x5; + gen_data[0] = gen_data_intel | (0x11 << 24); + gen_data[1] = 0; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_pixel_format */ + gen_data[0] = gen_data_intel | (0x3A << 24); + gen_data[1] = 0x77; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* Set resolution for (800X480) */ + wc = 0x8; + gen_data[0] = gen_data_intel | (0x2A << 24); + gen_data[1] = 0x1F030000; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[0] = gen_data_intel | (0x2B << 24); + gen_data[1] = 0xDF010000; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* System control */ + wc = 0x6; + gen_data[0] = gen_data_intel | (0xEE << 24); + gen_data[1] = 0x10FA; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* INPUT TIMING FOR TEST PATTERN(800X480) */ + /* H-size */ + gen_data[1] = 0x2000; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0301; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* V-size */ + gen_data[1] = 0xE002; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0103; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* H-total */ + gen_data[1] = 0x2004; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0405; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* V-total */ + gen_data[1] = 0x0d06; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0207; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* H-blank */ + gen_data[1] = 0x0308; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0009; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* H-blank */ + gen_data[1] = 0x030A; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x000B; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* H-start */ + gen_data[1] = 0xD80C; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x000D; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* V-start */ + gen_data[1] = 0x230E; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x000F; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* RGB domain */ + gen_data[1] = 0x0027; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* INP_FORM Setting */ + /* set_1 */ + gen_data[1] = 0x1C10; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_2 */ + gen_data[1] = 0x0711; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_3 */ + gen_data[1] = 0x0012; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_4 */ + gen_data[1] = 0x0013; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_5 */ + gen_data[1] = 0x2314; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_6 */ + gen_data[1] = 0x0015; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_7 */ + gen_data[1] = 0x2316; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_8 */ + gen_data[1] = 0x0017; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_1 */ + gen_data[1] = 0x0330; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* FRC Setting */ + /* FRC_set_2 */ + gen_data[1] = 0x237A; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* FRC_set_3 */ + gen_data[1] = 0x4C7B; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* FRC_set_4 */ + gen_data[1] = 0x037C; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* FRC_set_5 */ + gen_data[1] = 0x3482; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* FRC_set_7 */ + gen_data[1] = 0x1785; + mrst_gen_long_write(dev, gen_data, wc, vc); + +#if 0 + /* FRC_set_8 */ + gen_data[1] = 0xD08F; + mrst_gen_long_write(dev, gen_data, wc, vc); +#endif + + /* OUTPUT TIMING FOR TEST PATTERN (800X480) */ + /* out_htotal */ + gen_data[1] = 0x2090; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0491; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* out_hsync */ + gen_data[1] = 0x0392; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0093; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* out_hstart */ + gen_data[1] = 0xD894; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0095; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* out_hsize */ + gen_data[1] = 0x2096; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0397; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* out_vtotal */ + gen_data[1] = 0x0D98; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x0299; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* out_vsync */ + gen_data[1] = 0x039A; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x009B; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* out_vstart */ + gen_data[1] = 0x239C; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x009D; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* out_vsize */ + gen_data[1] = 0xE09E; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x019F; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* FRC_set_6 */ + gen_data[1] = 0x9084; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* Other setting */ + gen_data[1] = 0x0526; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* RBG domain */ + gen_data[1] = 0x1177; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* rgbw */ + /* set_1 */ + gen_data[1] = 0xD28F; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_2 */ + gen_data[1] = 0x02D0; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_3 */ + gen_data[1] = 0x08D1; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_4 */ + gen_data[1] = 0x05D2; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_5 */ + gen_data[1] = 0x24D4; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* set_6 */ + gen_data[1] = 0x00D5; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x02D7; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x00D8; + mrst_gen_long_write(dev, gen_data, wc, vc); + + gen_data[1] = 0x48F3; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0xD4F2; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x3D8E; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x60FD; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x00B5; + mrst_gen_long_write(dev, gen_data, wc, vc); + gen_data[1] = 0x48F4; + mrst_gen_long_write(dev, gen_data, wc, vc); + + /* inside patten */ + gen_data[1] = 0x0060; + mrst_gen_long_write(dev, gen_data, wc, vc); + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} +#endif +static void mrst_wait_for_LP_CTRL_FIFO(struct drm_device *dev) +{ + while(REG_READ(GEN_FIFO_STAT_REG) & LP_CTRL_FIFO_FULL); +} + +/* ************************************************************************* *\ +FUNCTION: mrst_init_NSC_MIPI_bridge + ` +DESCRIPTION: + +\* ************************************************************************* */ +static void mrst_init_NSC_MIPI_bridge(struct drm_device *dev) +{ + + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_init_NSC_MIPI_bridge.\n"); +#endif /* PRINT_JLIU7 */ + /* Program MIPI IP to 50MHz DSI, Non-Burst mode with sync event, + 1 or 2 Data Lanes */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* enable RGB24*/ + REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3); + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* enable all error reporting*/ + REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3); + mrst_wait_for_LP_CTRL_FIFO(dev); + REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3); + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* enable 2 data lane; video shaping & error reporting */ + REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */ + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* HS timeout */ + REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3); + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */ + REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3); + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* enable all virtual channels */ + REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3); + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* set output strength to low-drive */ + REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3); + + mrst_wait_for_LP_CTRL_FIFO(dev); + if (dev_priv->sku_83) + { + /* set escape clock to divede by 8 */ + REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3); + } + else if(dev_priv->sku_100L) + { + /* set escape clock to divede by 16 */ + REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3); + } + else if(dev_priv->sku_100) + { + /* set escape clock to divede by 32*/ + REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3); + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */ + REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3); + } + + mrst_wait_for_LP_CTRL_FIFO(dev); + /* CFG_VALID=1; RGB_CLK_EN=1. */ + REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3); + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +static void mrst_dsi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + u32 dsiFuncPrgValue = 0; + u32 SupportedFormat = 0; + u32 channelNumber = 0; + u32 DBI_dataWidth = 0; + u32 resolution = 0; + u32 mipiport = 0; + uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_mode_set \n"); +#endif /* PRINT_JLIU7 */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + switch (dev_priv->bpp) + { + case 16: + SupportedFormat = RGB_565_FMT; + break; + case 18: + SupportedFormat = RGB_666_FMT; + break; + case 24: + SupportedFormat = RGB_888_FMT; + break; + default: + DRM_INFO("mrst_dsi_mode_set, invalid bpp \n"); + break; + } + + resolution = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS); + + if (dev_priv->dpi) + { + drm_connector_property_get_value(&enc_to_psb_intel_output(encoder)->base, dev->mode_config.scaling_mode_property, &curValue); + + if (curValue == DRM_MODE_SCALE_NO_SCALE) + REG_WRITE(PFIT_CONTROL, 0); + else if (curValue == DRM_MODE_SCALE_ASPECT) { + if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) { + if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay)) + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); + else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay)) + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX); + else + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX); + } else + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); + } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/ + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); + + /* Enable MIPI Port */ + mipiport = MIPI_PORT_EN | MIPI_BORDER_EN; + REG_WRITE(MIPI, mipiport); + + /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */ + REG_WRITE(MIPI_CONTROL_REG, 0x00000018); + + /* Enable all the error interrupt */ + REG_WRITE(INTR_EN_REG, 0xffffffff); + REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000A); + REG_WRITE(DEVICE_RESET_REG, 0x000000ff); /* old value = 0x00000015 may depends on the DSI RX device*/ + REG_WRITE(INIT_COUNT_REG, 0x00000fff); /* Minimum value = 0x000007d0 */ + + SupportedFormat <<= FMT_DPI_POS; + dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat; + REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue); + + REG_WRITE(DPI_RESOLUTION_REG, resolution); + /*REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);*/ + + REG_WRITE(VERT_SYNC_PAD_COUNT_REG, GetVSA_Count(dev, dev_priv)); + REG_WRITE(VERT_BACK_PORCH_COUNT_REG, + GetVBP_Count(dev, dev_priv)); + REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, + GetVFP_Count(dev, dev_priv)); + + REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, + GetHSA_Count(dev, dev_priv)); + REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, + GetHBP_Count(dev, dev_priv)); + REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, + GetHFP_Count(dev, dev_priv)); + REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, + GetHAdr_Count(dev, dev_priv)); + + REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat); + } + else + { + /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or VIRTUAL_CHANNEL_NUMBER_0*/ + channelNumber = VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS; + DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS; + dsiFuncPrgValue = dev_priv->laneCount | channelNumber | DBI_dataWidth; + /* JLIU7 FIXME */ + SupportedFormat <<= FMT_DBI_POS; + dsiFuncPrgValue |= SupportedFormat; + REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue); + + REG_WRITE(DPI_RESOLUTION_REG, 0x00000000); + REG_WRITE(DBI_RESOLUTION_REG, resolution); + } + +#if 1 /*JLIU7_PO hard code for NSC PO */ + REG_WRITE(HS_TX_TIMEOUT_REG, 0x90000); + REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff); + + REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46); +#else /*JLIU7_PO hard code for NSC PO */ + REG_WRITE(HS_TX_TIMEOUT_REG, GetHS_TX_timeoutCount(dev_priv)); + REG_WRITE(LP_RX_TIMEOUT_REG, GetLP_RX_timeoutCount(dev_priv)); + + REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, GetHighLowSwitchCount(dev_priv)); +#endif /*JLIU7_PO hard code for NSC PO */ + + + REG_WRITE(EOT_DISABLE_REG, 0x00000000); + + /* FIXME JLIU7 for NSC PO */ + REG_WRITE(LP_BYTECLK_REG, 0x00000004); + + REG_WRITE(DEVICE_READY_REG, 0x00000001); + REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */ + + dev_priv->dsi_device_ready = true; + +#if 0 /*JLIU7_PO */ + mrst_init_HIMAX_MIPI_bridge(dev); +#endif /*JLIU7_PO */ + mrst_init_NSC_MIPI_bridge(dev); + + if (dev_priv->sku_100L) + /* Set DSI link to 100MHz; 2:1 clock ratio */ + REG_WRITE(MIPI_CONTROL_REG, 0x00000009); + + REG_WRITE(PIPEACONF, dev_priv->pipeconf); + REG_READ(PIPEACONF); + + /* Wait for 20ms for the pipe enable to take effect. */ + udelay(20000); + + REG_WRITE(DSPACNTR, dev_priv->dspcntr); + + /* Wait for 20ms for the plane enable to take effect. */ + udelay(20000); + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +/** + * Detect the MIPI connection. + * + * This always returns CONNECTOR_STATUS_CONNECTED. + * This connector should only have + * been set up if the MIPI was actually connected anyway. + */ +static enum drm_connector_status mrst_dsi_detect(struct drm_connector + *connector) +{ +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_detect \n"); +#endif /* PRINT_JLIU7 */ + + return connector_status_connected; +} + +/** + * Return the list of MIPI DDB modes if available. + */ +static int mrst_dsi_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev; + +/* FIXME get the MIPI DDB modes */ + + /* Didn't get an DDB, so + * Set wide sync ranges so we get all modes + * handed to valid_mode for checking + */ + connector->display_info.min_vfreq = 0; + connector->display_info.max_vfreq = 200; + connector->display_info.min_hfreq = 0; + connector->display_info.max_hfreq = 200; + + if (mode_dev->panel_fixed_mode != NULL) { + struct drm_display_mode *mode = + drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + drm_mode_probed_add(connector, mode); + return 1; + } + + return 0; +} + +static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = { + .dpms = mrst_dsi_dpms, + .mode_fixup = psb_intel_lvds_mode_fixup, + .prepare = mrst_dsi_prepare, + .mode_set = mrst_dsi_mode_set, + .commit = mrst_dsi_commit, +}; + +static const struct drm_connector_helper_funcs + mrst_dsi_connector_helper_funcs = { + .get_modes = mrst_dsi_get_modes, + .mode_valid = psb_intel_lvds_mode_valid, + .best_encoder = psb_intel_best_encoder, +}; + +static const struct drm_connector_funcs mrst_dsi_connector_funcs = { + .dpms = psb_intel_lvds_connector_dpms, + .save = mrst_dsi_save, + .restore = mrst_dsi_restore, + .detect = mrst_dsi_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = psb_intel_lvds_set_property, + .destroy = psb_intel_lvds_destroy, +}; + +/** Returns the panel fixed mode from configuration. */ +/** FIXME JLIU7 need to revist it. */ +struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev) +{ + struct drm_display_mode *mode; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + u8 panel_index = dev_priv->gct_data.bpi; + u8 panel_type = dev_priv->gct_data.pt; + struct mrst_timing_info *ti = &dev_priv->gct_data.DTD; + bool use_gct = false; + + mode = kzalloc(sizeof(*mode), GFP_KERNEL); + if (!mode) + return NULL; + + if (dev_priv->vbt_data.Size != 0x00) /*if non-zero, vbt is present*/ + if ((1<hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; + mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; + mode->hsync_start = mode->hdisplay + \ + ((ti->hsync_offset_hi << 8) | \ + ti->hsync_offset_lo); + mode->hsync_end = mode->hsync_start + \ + ((ti->hsync_pulse_width_hi << 8) | \ + ti->hsync_pulse_width_lo); + mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ + ti->hblank_lo); + mode->vsync_start = \ + mode->vdisplay + ((ti->vsync_offset_hi << 8) | \ + ti->vsync_offset_lo); + mode->vsync_end = \ + mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \ + ti->vsync_pulse_width_lo); + mode->vtotal = mode->vdisplay + \ + ((ti->vblank_hi << 8) | ti->vblank_lo); + mode->clock = ti->pixel_clock * 10; +#if 1 + printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay); + printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay); + printk(KERN_INFO "HSS is %d\n", mode->hsync_start); + printk(KERN_INFO "HSE is %d\n", mode->hsync_end); + printk(KERN_INFO "htotal is %d\n", mode->htotal); + printk(KERN_INFO "VSS is %d\n", mode->vsync_start); + printk(KERN_INFO "VSE is %d\n", mode->vsync_end); + printk(KERN_INFO "vtotal is %d\n", mode->vtotal); + printk(KERN_INFO "clock is %d\n", mode->clock); +#endif + + } else { +#if 1 /*FIXME jliu7 remove it later */ + /* copy from SV - hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */ + mode->hdisplay = 800; + mode->vdisplay = 480; + mode->hsync_start = 808; + mode->hsync_end = 848; + mode->htotal = 880; + mode->vsync_start = 482; + mode->vsync_end = 483; + mode->vtotal = 486; + mode->clock = 33264; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */ + mode->hdisplay = 800; + mode->vdisplay = 480; + mode->hsync_start = 836; + mode->hsync_end = 846; + mode->htotal = 1056; + mode->vsync_start = 489; + mode->vsync_end = 491; + mode->vtotal = 525; + mode->clock = 33264; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for LVDS 800x480 */ + mode->hdisplay = 800; + mode->vdisplay = 480; + mode->hsync_start = 801; + mode->hsync_end = 802; + mode->htotal = 1024; + mode->vsync_start = 481; + mode->vsync_end = 482; + mode->vtotal = 525; + mode->clock = 30994; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */ + /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ + mode->hdisplay = 1024; + mode->vdisplay = 600; + mode->hsync_start = 1072; + mode->hsync_end = 1104; + mode->htotal = 1184; + mode->vsync_start = 603; + mode->vsync_end = 604; + mode->vtotal = 608; + mode->clock = 53990; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */ + /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ + mode->hdisplay = 1024; + mode->vdisplay = 600; + mode->hsync_start = 1104; + mode->hsync_end = 1136; + mode->htotal = 1184; + mode->vsync_start = 603; + mode->vsync_end = 604; + mode->vtotal = 608; + mode->clock = 53990; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */ + mode->hdisplay = 1024; + mode->vdisplay = 600; + mode->hsync_start = 1124; + mode->hsync_end = 1204; + mode->htotal = 1312; + mode->vsync_start = 607; + mode->vsync_end = 610; + mode->vtotal = 621; + mode->clock = 48885; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for LVDS 1024x768 */ + mode->hdisplay = 1024; + mode->vdisplay = 768; + mode->hsync_start = 1048; + mode->hsync_end = 1184; + mode->htotal = 1344; + mode->vsync_start = 771; + mode->vsync_end = 777; + mode->vtotal = 806; + mode->clock = 65000; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for LVDS 1366x768 */ + mode->hdisplay = 1366; + mode->vdisplay = 768; + mode->hsync_start = 1430; + mode->hsync_end = 1558; + mode->htotal = 1664; + mode->vsync_start = 769; + mode->vsync_end = 770; + mode->vtotal = 776; + mode->clock = 77500; +#endif /*FIXME jliu7 remove it later */ + } + drm_mode_set_name(mode); + drm_mode_set_crtcinfo(mode, 0); + + return mode; +} + +/* ************************************************************************* *\ +FUNCTION: mrstDSI_clockInit + ` +DESCRIPTION: + +\* ************************************************************************* */ +static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667}; +static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000}; +static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000}; +#define MIPI_2XCLK_COUNT 0x04 + +static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv) +{ + u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0; + u32 i = 0; + u32 *p_mipi_2xclk = NULL; + +#if 0 /* JLIU7_PO old values */ + /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */ + dev_priv->pixelClock = 33264; /*KHz*/ + dev_priv->HsyncWidth = 10; + dev_priv->HbackPorch = 210; + dev_priv->HfrontPorch = 36; + dev_priv->HactiveArea = 800; + dev_priv->VsyncWidth = 2; + dev_priv->VbackPorch = 34; + dev_priv->VfrontPorch = 9; + dev_priv->VactiveArea = 480; + dev_priv->bpp = 24; + + /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */ + dev_priv->dbi_pixelClock = 33264; /*KHz*/ + dev_priv->dbi_HsyncWidth = 10; + dev_priv->dbi_HbackPorch = 210; + dev_priv->dbi_HfrontPorch = 36; + dev_priv->dbi_HactiveArea = 800; + dev_priv->dbi_VsyncWidth = 2; + dev_priv->dbi_VbackPorch = 34; + dev_priv->dbi_VfrontPorch = 9; + dev_priv->dbi_VactiveArea = 480; + dev_priv->dbi_bpp = 24; +#else /* JLIU7_PO old values */ + /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */ + /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */ + dev_priv->pixelClock = 33264; /*KHz*/ + dev_priv->HsyncWidth = 10; + dev_priv->HbackPorch = 8; + dev_priv->HfrontPorch = 3; + dev_priv->HactiveArea = 800; + dev_priv->VsyncWidth = 2; + dev_priv->VbackPorch = 3; + dev_priv->VfrontPorch = 2; + dev_priv->VactiveArea = 480; + dev_priv->bpp = 24; + + /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */ + dev_priv->dbi_pixelClock = 33264; /*KHz*/ + dev_priv->dbi_HsyncWidth = 10; + dev_priv->dbi_HbackPorch = 8; + dev_priv->dbi_HfrontPorch = 3; + dev_priv->dbi_HactiveArea = 800; + dev_priv->dbi_VsyncWidth = 2; + dev_priv->dbi_VbackPorch = 3; + dev_priv->dbi_VfrontPorch = 2; + dev_priv->dbi_VactiveArea = 480; + dev_priv->dbi_bpp = 24; +#endif /* JLIU7_PO old values */ + + Htotal = dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch + dev_priv->HactiveArea; + Vtotal = dev_priv->VsyncWidth + dev_priv->VbackPorch + dev_priv->VfrontPorch + dev_priv->VactiveArea; + + RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1; + + dev_priv->RRate = RRate; + + /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/ + mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) / dev_priv->laneCount; /* KHz */ + dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */ + + DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk); + + if (dev_priv->sku_100) + { + p_mipi_2xclk = sku_100_mipi_2xclk; + } + else if (dev_priv->sku_100L) + { + p_mipi_2xclk = sku_100L_mipi_2xclk; + } + else + { + p_mipi_2xclk = sku_83_mipi_2xclk; + } + + for (; i < MIPI_2XCLK_COUNT; i++) + { + if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i]) + break; + } + + if (i == MIPI_2XCLK_COUNT) + { + DRM_DEBUG("mrstDSI_clockInit the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated); + return false; + } + + dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2; + dev_priv->ClockBits = i; + +#if 1 /* FIXME remove it after power on*/ + DRM_DEBUG("mrstDSI_clockInit, mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, dev_priv->DDR_Clock_Calculated); +#endif /* FIXME remove it after power on*/ + + return true; +} + +/** + * mrst_dsi_init - setup MIPI connectors on this device + * @dev: drm device + * + * Create the connector, try to figure out what + * modes we can display on the MIPI panel (if present). + */ +void mrst_dsi_init(struct drm_device *dev, + struct psb_intel_mode_device *mode_dev) +{ + DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; + struct psb_intel_output *psb_intel_output; + struct drm_connector *connector; + struct drm_encoder *encoder; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_dsi_init \n"); +#endif /* PRINT_JLIU7 */ + + psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL); + if (!psb_intel_output) + return; + + psb_intel_output->mode_dev = mode_dev; + connector = &psb_intel_output->base; + encoder = &psb_intel_output->enc; + drm_connector_init(dev, &psb_intel_output->base, + &mrst_dsi_connector_funcs, + DRM_MODE_CONNECTOR_MIPI); + + drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs, + DRM_MODE_ENCODER_MIPI); + + drm_mode_connector_attach_encoder(&psb_intel_output->base, + &psb_intel_output->enc); + psb_intel_output->type = INTEL_OUTPUT_MIPI; + + drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs); + drm_connector_helper_add(connector, + &mrst_dsi_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); + + dsi_backlight = BRIGHTNESS_MAX_LEVEL; + blc_pol = BLC_POLARITY_NORMAL; + blc_freq = 0xc8; + + /* + * MIPI discovery: + * 1) check for DDB data + * 2) check for VBT data + * 4) make sure lid is open + * if closed, act like it's not there for now + */ + + /* FIXME jliu7 we only support DPI */ + dev_priv->dpi = true; + + /* FIXME hard coded 4 lanes for Himax HX8858-A, 2 lanes for NSC LM2550 */ + dev_priv->laneCount = 2; + + /* FIXME hard coded for NSC PO. */ + /* We only support BUST_MODE */ + dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS; /* BURST_MODE */ + /* FIXME change it to true if GET_DDB works */ + dev_priv->config_phase = false; + + if (!mrstDSI_clockInit(dev_priv)) + { + DRM_DEBUG("Can't iniitialize MRST DSI clock.\n"); +#if 0 /* FIXME JLIU7 */ + goto failed_find; +#endif /* FIXME JLIU7 */ + } + + /* + * If we didn't get DDB data, try geting panel timing + * from configuration data + */ + mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev); + + if (mode_dev->panel_fixed_mode) { + mode_dev->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + goto out; /* FIXME: check for quirks */ + } + + /* If we still don't have a mode after all that, give up. */ + if (!mode_dev->panel_fixed_mode) { + DRM_DEBUG + ("Found no modes on the lvds, ignoring the LVDS\n"); + goto failed_find; + } + +out: + drm_sysfs_connector_add(connector); + return; + +failed_find: + DRM_DEBUG("No MIIP modes found, disabling.\n"); + drm_encoder_cleanup(encoder); + drm_connector_cleanup(connector); + kfree(connector); +} diff --git a/drivers/gpu/drm/psb/psb_intel_i2c.c b/drivers/gpu/drm/psb/psb_intel_i2c.c new file mode 100644 index 0000000..60165fd --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_i2c.c @@ -0,0 +1,179 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ +/* + * Copyright (c) 2006 Dave Airlie + * Jesse Barnes + */ + +#include +#include +#include + +/* + * Intel GPIO access functions + */ + +#define I2C_RISEFALL_TIME 20 + +static int get_clock(void *data) +{ + struct psb_intel_i2c_chan *chan = data; + struct drm_device *dev = chan->drm_dev; + u32 val; + + val = REG_READ(chan->reg); + return (val & GPIO_CLOCK_VAL_IN) != 0; +} + +static int get_data(void *data) +{ + struct psb_intel_i2c_chan *chan = data; + struct drm_device *dev = chan->drm_dev; + u32 val; + + val = REG_READ(chan->reg); + return (val & GPIO_DATA_VAL_IN) != 0; +} + +static void set_clock(void *data, int state_high) +{ + struct psb_intel_i2c_chan *chan = data; + struct drm_device *dev = chan->drm_dev; + u32 reserved = 0, clock_bits; + + /* On most chips, these bits must be preserved in software. */ + if (!IS_I830(dev) && !IS_845G(dev)) + reserved = + REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | + GPIO_CLOCK_PULLUP_DISABLE); + + if (state_high) + clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; + else + clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | + GPIO_CLOCK_VAL_MASK; + REG_WRITE(chan->reg, reserved | clock_bits); + udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ +} + +static void set_data(void *data, int state_high) +{ + struct psb_intel_i2c_chan *chan = data; + struct drm_device *dev = chan->drm_dev; + u32 reserved = 0, data_bits; + + /* On most chips, these bits must be preserved in software. */ + if (!IS_I830(dev) && !IS_845G(dev)) + reserved = + REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | + GPIO_CLOCK_PULLUP_DISABLE); + + if (state_high) + data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; + else + data_bits = + GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | + GPIO_DATA_VAL_MASK; + + REG_WRITE(chan->reg, reserved | data_bits); + udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ +} + +/** + * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg + * @dev: DRM device + * @output: driver specific output device + * @reg: GPIO reg to use + * @name: name for this bus + * + * Creates and registers a new i2c bus with the Linux i2c layer, for use + * in output probing and control (e.g. DDC or SDVO control functions). + * + * Possible values for @reg include: + * %GPIOA + * %GPIOB + * %GPIOC + * %GPIOD + * %GPIOE + * %GPIOF + * %GPIOG + * %GPIOH + * see PRM for details on how these different busses are used. + */ +struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev, + const u32 reg, const char *name) +{ + struct psb_intel_i2c_chan *chan; + + chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL); + if (!chan) + goto out_free; + + chan->drm_dev = dev; + chan->reg = reg; + snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); + chan->adapter.owner = THIS_MODULE; + chan->adapter.algo_data = &chan->algo; + chan->adapter.dev.parent = &dev->pdev->dev; + chan->algo.setsda = set_data; + chan->algo.setscl = set_clock; + chan->algo.getsda = get_data; + chan->algo.getscl = get_clock; + chan->algo.udelay = 20; + chan->algo.timeout = usecs_to_jiffies(2200); + chan->algo.data = chan; + + i2c_set_adapdata(&chan->adapter, chan); + + if (i2c_bit_add_bus(&chan->adapter)) + goto out_free; + + /* JJJ: raise SCL and SDA? */ + set_data(chan, 1); + set_clock(chan, 1); + udelay(20); + + return chan; + +out_free: + kfree(chan); + return NULL; +} + +/** + * psb_intel_i2c_destroy - unregister and free i2c bus resources + * @output: channel to free + * + * Unregister the adapter from the i2c layer, then free the structure. + */ +void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan) +{ + if (!chan) + return; + + i2c_del_adapter(&chan->adapter); + kfree(chan); +} diff --git a/drivers/gpu/drm/psb/psb_intel_lvds.c b/drivers/gpu/drm/psb/psb_intel_lvds.c new file mode 100644 index 0000000..4fa29f8 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_lvds.c @@ -0,0 +1,1343 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * Copyright (c) 2006 Dave Airlie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Dave Airlie + * Jesse Barnes + */ + +#include +#include +#include + +#include "psb_intel_bios.h" +#include "psb_powermgmt.h" + +/* MRST defines start */ +uint8_t blc_type; +uint8_t blc_pol; +uint8_t blc_freq; +uint8_t blc_minbrightness; +uint8_t blc_i2caddr; +uint8_t blc_brightnesscmd; +int lvds_backlight; /* restore backlight to this value */ + +u32 CoreClock; +u32 PWMControlRegFreq; + +/** + * LVDS I2C backlight control macros + */ +#define BRIGHTNESS_MAX_LEVEL 100 +#define BRIGHTNESS_MASK 0xFF +#define BLC_I2C_TYPE 0x01 +#define BLC_PWM_TYPT 0x02 + +#define BLC_POLARITY_NORMAL 0 +#define BLC_POLARITY_INVERSE 1 + +#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE) +#define PSB_BLC_MIN_PWM_REG_FREQ (0x2) +#define PSB_BLC_PWM_PRECISION_FACTOR (10) +#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16) +#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) + +struct psb_intel_lvds_priv { + /** + * Saved LVDO output states + */ + uint32_t savePP_ON; + uint32_t savePP_OFF; + uint32_t saveLVDS; + uint32_t savePP_CONTROL; + uint32_t savePP_CYCLE; + uint32_t savePFIT_CONTROL; + uint32_t savePFIT_PGM_RATIOS; + uint32_t saveBLC_PWM_CTL; +}; + +/* MRST defines end */ + +/** + * Returns the maximum level of the backlight duty cycle field. + */ +static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + u32 retVal; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + retVal = ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> + BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else + retVal = ((dev_priv->saveBLC_PWM_CTL & BACKLIGHT_MODULATION_FREQ_MASK) >> + BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; + + return retVal; +} + +/** + * Set LVDS backlight level by I2C command + */ +static int psb_lvds_i2c_set_brightness(struct drm_device * dev, unsigned int level) + { + struct drm_psb_private * dev_priv = + (struct drm_psb_private*)dev->dev_private; + + struct psb_intel_i2c_chan * lvds_i2c_bus = dev_priv->lvds_i2c_bus; + u8 out_buf[2]; + unsigned int blc_i2c_brightness; + + struct i2c_msg msgs[] = { + { + .addr = lvds_i2c_bus->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + } + }; + + blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level * + BRIGHTNESS_MASK / + BRIGHTNESS_MAX_LEVEL); + + if(dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) { + blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness; + } + + + out_buf[0] = dev_priv->lvds_bl->brightnesscmd; + out_buf[1] = (u8)blc_i2c_brightness; + + if(i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) { + DRM_DEBUG("I2C set brightness done.(command, value) (%d, %d)\n", blc_brightnesscmd, blc_i2c_brightness); + return 0; + } + + DRM_ERROR("I2C transfer error\n"); + return -1; +} + + +static int psb_lvds_pwm_set_brightness(struct drm_device * dev, int level) +{ + struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; + + u32 max_pwm_blc; + u32 blc_pwm_duty_cycle; + + max_pwm_blc = psb_intel_lvds_get_max_backlight(dev); + + /*BLC_PWM_CTL Should be initiated while backlight device init*/ + BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ ) == 0); + + blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL; + + if(dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE){ + blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle; + } + + blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR; + REG_WRITE(BLC_PWM_CTL, + (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) | + (blc_pwm_duty_cycle)); + + return 0; +} + +/** + * Set LVDS backlight level either by I2C or PWM + */ +void psb_intel_lvds_set_brightness(struct drm_device * dev, int level) +{ + /*u32 blc_pwm_ctl;*/ + struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; + + DRM_DEBUG("backlight level is %d\n", level); + + if(!dev_priv->lvds_bl) { + DRM_ERROR("NO LVDS Backlight Info\n"); + return; + } + + if(IS_MRST(dev)) { + DRM_ERROR("psb_intel_lvds_set_brightness called from MRST...not expected\n"); + return; + } + + if(dev_priv->lvds_bl->type == BLC_I2C_TYPE) { + psb_lvds_i2c_set_brightness(dev, level); + } else { + psb_lvds_pwm_set_brightness(dev, level); + } +} + +/** + * Sets the backlight level. + * + * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight(). + */ +static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + u32 blc_pwm_ctl; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false) ) { + blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; + REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | + (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } else { + blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL & ~BACKLIGHT_DUTY_CYCLE_MASK; + dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl | + (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); + } +} + +/** + * Sets the power state for the panel. + */ +static void psb_intel_lvds_set_power(struct drm_device *dev, + struct psb_intel_output *output, bool on) +{ + u32 pp_status; + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + if (on) { + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | + POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while ((pp_status & PP_ON) == 0); + + psb_intel_lvds_set_backlight(dev, + output-> + mode_dev->backlight_duty_cycle); + } else { + psb_intel_lvds_set_backlight(dev, 0); + + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & + ~POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while (pp_status & PP_ON); + } + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); + + if (mode == DRM_MODE_DPMS_ON) + psb_intel_lvds_set_power(dev, output, true); + else + psb_intel_lvds_set_power(dev, output, false); + + /* XXX: We never power down the LVDS pairs. */ +} + +static void psb_intel_lvds_save(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; + struct psb_intel_output * psb_intel_output = to_psb_intel_output(connector); + struct psb_intel_lvds_priv * lvds_priv = + (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv; + + if(IS_POULSBO(dev)) { + lvds_priv->savePP_ON = REG_READ(LVDSPP_ON); + lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF); + lvds_priv->saveLVDS = REG_READ(LVDS); + lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL); + lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE); + /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/ + lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); + lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL); + lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS); + + /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/ + dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & + BACKLIGHT_DUTY_CYCLE_MASK); + + /* + * If the light is off at server startup, just make it full brightness + */ + if (dev_priv->backlight_duty_cycle == 0) + dev_priv->backlight_duty_cycle = + psb_intel_lvds_get_max_backlight(dev); + + DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON, + lvds_priv->savePP_OFF, + lvds_priv->saveLVDS, + lvds_priv->savePP_CONTROL, + lvds_priv->savePP_CYCLE, + lvds_priv->saveBLC_PWM_CTL); + } +} + +static void psb_intel_lvds_restore(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + u32 pp_status; + + /*struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;*/ + struct psb_intel_output * psb_intel_output = to_psb_intel_output(connector); + struct psb_intel_lvds_priv * lvds_priv = + (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv; + + if(IS_POULSBO(dev)) { + DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON, + lvds_priv->savePP_OFF, + lvds_priv->saveLVDS, + lvds_priv->savePP_CONTROL, + lvds_priv->savePP_CYCLE, + lvds_priv->saveBLC_PWM_CTL); + + REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL); + REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL); + REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS); + REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON); + REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF); + /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/ + REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE); + REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL); + REG_WRITE(LVDS, lvds_priv->saveLVDS); + + if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) { + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | + POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while((pp_status & PP_ON) == 0); + } else { + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & + ~POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + }while(pp_status & PP_ON); + } + } +} + +static int psb_intel_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + struct drm_display_mode *fixed_mode = + psb_intel_output->mode_dev->panel_fixed_mode; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n"); +#endif /* PRINT_JLIU7 */ + + /* just in case */ + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + /* just in case */ + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + if (fixed_mode) { + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; + } + return MODE_OK; +} + +static bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct psb_intel_mode_device *mode_dev = + enc_to_psb_intel_output(encoder)->mode_dev; + struct drm_device *dev = encoder->dev; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc); + struct drm_encoder *tmp_encoder; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n"); +#endif /* PRINT_JLIU7 */ + + /* Should never happen!! */ + if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) { + printk(KERN_ERR + "Can't support LVDS/MIPI on pipe B on MRST\n"); + return false; + } else if (!IS_MRST(dev) && !IS_I965G(dev) + && psb_intel_crtc->pipe == 0) { + printk(KERN_ERR "Can't support LVDS on pipe A\n"); + return false; + } + /* Should never happen!! */ + list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, + head) { + if (tmp_encoder != encoder + && tmp_encoder->crtc == encoder->crtc) { + printk(KERN_ERR "Can't enable LVDS and another " + "encoder on the same pipe\n"); + return false; + } + } + + /* + * If we have timings from the BIOS for the panel, put them in + * to the adjusted mode. The CRTC will be set up for this mode, + * with the panel scaling set up to source from the H/VDisplay + * of the original mode. + */ + if (mode_dev->panel_fixed_mode != NULL) { + adjusted_mode->hdisplay = + mode_dev->panel_fixed_mode->hdisplay; + adjusted_mode->hsync_start = + mode_dev->panel_fixed_mode->hsync_start; + adjusted_mode->hsync_end = + mode_dev->panel_fixed_mode->hsync_end; + adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal; + adjusted_mode->vdisplay = + mode_dev->panel_fixed_mode->vdisplay; + adjusted_mode->vsync_start = + mode_dev->panel_fixed_mode->vsync_start; + adjusted_mode->vsync_end = + mode_dev->panel_fixed_mode->vsync_end; + adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal; + adjusted_mode->clock = mode_dev->panel_fixed_mode->clock; + drm_mode_set_crtcinfo(adjusted_mode, + CRTC_INTERLACE_HALVE_V); + } + + /* + * XXX: It would be nice to support lower refresh rates on the + * panels to reduce power consumption, and perhaps match the + * user's requested refresh rate. + */ + + return true; +} + +static void psb_intel_lvds_prepare(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); + struct psb_intel_mode_device *mode_dev = output->mode_dev; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n"); +#endif /* PRINT_JLIU7 */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); + mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & + BACKLIGHT_DUTY_CYCLE_MASK); + + psb_intel_lvds_set_power(dev, output, false); + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +static void psb_intel_lvds_commit(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); + struct psb_intel_mode_device *mode_dev = output->mode_dev; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n"); +#endif /* PRINT_JLIU7 */ + + if (mode_dev->backlight_duty_cycle == 0) + mode_dev->backlight_duty_cycle = + psb_intel_lvds_get_max_backlight(dev); + + psb_intel_lvds_set_power(dev, output, true); +} + +static void psb_intel_lvds_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct psb_intel_mode_device *mode_dev = + enc_to_psb_intel_output(encoder)->mode_dev; + struct drm_device *dev = encoder->dev; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc); + u32 pfit_control; + + /* + * The LVDS pin pair will already have been turned on in the + * psb_intel_crtc_mode_set since it has a large impact on the DPLL + * settings. + */ + + /* + * Enable automatic panel scaling so that non-native modes fill the + * screen. Should be enabled before the pipe is enabled, according to + * register description and PRM. + */ + if (mode->hdisplay != adjusted_mode->hdisplay || + mode->vdisplay != adjusted_mode->vdisplay) + pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | + HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + else + pfit_control = 0; + + if (!IS_I965G(dev)) { + if (mode_dev->panel_wants_dither) + pfit_control |= PANEL_8TO6_DITHER_ENABLE; + } else + pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT; + + REG_WRITE(PFIT_CONTROL, pfit_control); +} + +/** + * Detect the LVDS connection. + * + * This always returns CONNECTOR_STATUS_CONNECTED. + * This connector should only have + * been set up if the LVDS was actually connected anyway. + */ +static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector + *connector) +{ + return connector_status_connected; +} + +/** + * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. + */ +static int psb_intel_lvds_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev; + int ret = 0; + + if (!IS_MRST(dev)) + ret = psb_intel_ddc_get_modes(psb_intel_output); + + if (ret) + return ret; + + /* Didn't get an EDID, so + * Set wide sync ranges so we get all modes + * handed to valid_mode for checking + */ + connector->display_info.min_vfreq = 0; + connector->display_info.max_vfreq = 200; + connector->display_info.min_hfreq = 0; + connector->display_info.max_hfreq = 200; + + if (mode_dev->panel_fixed_mode != NULL) { + struct drm_display_mode *mode = + drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + drm_mode_probed_add(connector, mode); + return 1; + } + + return 0; +} + +/** + * psb_intel_lvds_destroy - unregister and free LVDS structures + * @connector: connector to free + * + * Unregister the DDC bus for this connector then free the driver private + * structure. + */ +static void psb_intel_lvds_destroy(struct drm_connector *connector) +{ + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + + if (psb_intel_output->ddc_bus) + psb_intel_i2c_destroy(psb_intel_output->ddc_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static void psb_intel_lvds_connector_dpms(struct drm_connector *connector, int mode) +{ + struct drm_encoder *pEncoder = connector->encoder; + struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private; + struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private; + struct drm_device * dev = connector->dev; + pEncHFuncs->dpms(pEncoder, mode); + /*FIXME: crtc dpms will crash kernel on menlow*/ + if (IS_MRST(dev)) + pCrtcHFuncs->dpms(pEncoder->crtc, mode); +} + +static int psb_intel_lvds_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t value) +{ + struct drm_encoder *pEncoder = connector->encoder; + + if (!strcmp(property->name, "scaling mode") && pEncoder) { + struct psb_intel_crtc *pPsbCrtc = to_psb_intel_crtc(pEncoder->crtc); + bool bTransitionFromToCentered; + uint64_t curValue; + + if (!pPsbCrtc) + goto set_prop_error; + + switch (value) { + case DRM_MODE_SCALE_FULLSCREEN: + break; + case DRM_MODE_SCALE_NO_SCALE: + break; + case DRM_MODE_SCALE_ASPECT: + break; + default: + goto set_prop_error; + } + + if (drm_connector_property_get_value(connector, property, &curValue)) + goto set_prop_error; + + if (curValue == value) + goto set_prop_done; + + if (drm_connector_property_set_value(connector, property, value)) + goto set_prop_error; + + bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) || + (value == DRM_MODE_SCALE_NO_SCALE); + + if (pPsbCrtc->saved_mode.hdisplay != 0 && + pPsbCrtc->saved_mode.vdisplay != 0) { + if (bTransitionFromToCentered) { + if (!drm_crtc_helper_set_mode(pEncoder->crtc, &pPsbCrtc->saved_mode, + pEncoder->crtc->x, pEncoder->crtc->y, pEncoder->crtc->fb)) + goto set_prop_error; + } else { + struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private; + pEncHFuncs->mode_set(pEncoder, &pPsbCrtc->saved_mode, + &pPsbCrtc->saved_adjusted_mode); + } + } + } else if (!strcmp(property->name, "backlight") && pEncoder) { + if (drm_connector_property_set_value(connector, property, value)) + goto set_prop_error; + else { +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + struct backlight_device bd; + bd.props.brightness = value; + psb_set_brightness(&bd); +#endif + } + } else if (!strcmp(property->name, "DPMS") && pEncoder) { + struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private; + /*struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private;*/ + pEncHFuncs->dpms(pEncoder, value); + /*pCrtcHFuncs->dpms(pEncoder->crtc, value);*/ + } + +set_prop_done: + return 0; +set_prop_error: + return -1; +} + +static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = { + .dpms = psb_intel_lvds_encoder_dpms, + .mode_fixup = psb_intel_lvds_mode_fixup, + .prepare = psb_intel_lvds_prepare, + .mode_set = psb_intel_lvds_mode_set, + .commit = psb_intel_lvds_commit, +}; + +static const struct drm_connector_helper_funcs + psb_intel_lvds_connector_helper_funcs = { + .get_modes = psb_intel_lvds_get_modes, + .mode_valid = psb_intel_lvds_mode_valid, + .best_encoder = psb_intel_best_encoder, +}; + +static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { + .dpms = psb_intel_lvds_connector_dpms, + .save = psb_intel_lvds_save, + .restore = psb_intel_lvds_restore, + .detect = psb_intel_lvds_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = psb_intel_lvds_set_property, + .destroy = psb_intel_lvds_destroy, +}; + + +static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = { + .destroy = psb_intel_lvds_enc_destroy, +}; + + + +/** + * psb_intel_lvds_init - setup LVDS connectors on this device + * @dev: drm device + * + * Create the connector, register the LVDS DDC bus, and try to figure out what + * modes we can display on the LVDS panel (if present). + */ +void psb_intel_lvds_init(struct drm_device *dev, + struct psb_intel_mode_device *mode_dev) +{ + struct psb_intel_output *psb_intel_output; + struct psb_intel_lvds_priv * lvds_priv; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_display_mode *scan; /* *modes, *bios_mode; */ + struct drm_crtc *crtc; + struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; + u32 lvds; + int pipe; + + psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL); + if (!psb_intel_output) + return; + + lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL); + if(!lvds_priv) { + kfree(psb_intel_output); + DRM_DEBUG("LVDS private allocation error\n"); + return; + } + + psb_intel_output->dev_priv = lvds_priv; + + psb_intel_output->mode_dev = mode_dev; + connector = &psb_intel_output->base; + encoder = &psb_intel_output->enc; + drm_connector_init(dev, &psb_intel_output->base, + &psb_intel_lvds_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + + drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs, + DRM_MODE_ENCODER_LVDS); + + drm_mode_connector_attach_encoder(&psb_intel_output->base, + &psb_intel_output->enc); + psb_intel_output->type = INTEL_OUTPUT_LVDS; + + drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); + drm_connector_helper_add(connector, + &psb_intel_lvds_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + /*Attach connector properties*/ + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); + + /** + * Set up I2C bus + * FIXME: distroy i2c_bus when exit + */ + psb_intel_output->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B"); + if(!psb_intel_output->i2c_bus) { + dev_printk(KERN_ERR, + &dev->pdev->dev, "I2C bus registration failed.\n"); + goto failed_blc_i2c; + } + psb_intel_output->i2c_bus->slave_addr = 0x2C; + dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus; + + /* + * LVDS discovery: + * 1) check for EDID on DDC + * 2) check for VBT data + * 3) check to see if LVDS is already on + * if none of the above, no panel + * 4) make sure lid is open + * if closed, act like it's not there for now + */ + + /* Set up the DDC bus. */ + psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); + if (!psb_intel_output->ddc_bus) { + dev_printk(KERN_ERR, &dev->pdev->dev, + "DDC bus registration " "failed.\n"); + goto failed_ddc; + } + + /* + * Attempt to get the fixed panel mode from DDC. Assume that the + * preferred mode is the right one. + */ + psb_intel_ddc_get_modes(psb_intel_output); + list_for_each_entry(scan, &connector->probed_modes, head) { + if (scan->type & DRM_MODE_TYPE_PREFERRED) { + mode_dev->panel_fixed_mode = + drm_mode_duplicate(dev, scan); + goto out; /* FIXME: check for quirks */ + } + } + + /* Failed to get EDID, what about VBT? do we need this?*/ + if (mode_dev->vbt_mode) + mode_dev->panel_fixed_mode = + drm_mode_duplicate(dev, mode_dev->vbt_mode); + + if(!mode_dev->panel_fixed_mode) + if (dev_priv->lfp_lvds_vbt_mode) + mode_dev->panel_fixed_mode = + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + + /* + * If we didn't get EDID, try checking if the panel is already turned + * on. If so, assume that whatever is currently programmed is the + * correct mode. + */ + lvds = REG_READ(LVDS); + pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; + crtc = psb_intel_get_crtc_from_pipe(dev, pipe); + + if (crtc && (lvds & LVDS_PORT_EN)) { + mode_dev->panel_fixed_mode = + psb_intel_crtc_mode_get(dev, crtc); + if (mode_dev->panel_fixed_mode) { + mode_dev->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + goto out; /* FIXME: check for quirks */ + } + } + + /* If we still don't have a mode after all that, give up. */ + if (!mode_dev->panel_fixed_mode) { + DRM_DEBUG + ("Found no modes on the lvds, ignoring the LVDS\n"); + goto failed_find; + } + + /* FIXME: detect aopen & mac mini type stuff automatically? */ + /* + * Blacklist machines with BIOSes that list an LVDS panel without + * actually having one. + */ + if (IS_I945GM(dev)) { + /* aopen mini pc */ + if (dev->pdev->subsystem_vendor == 0xa0a0) { + DRM_DEBUG + ("Suspected AOpen Mini PC, ignoring the LVDS\n"); + goto failed_find; + } + + if ((dev->pdev->subsystem_vendor == 0x8086) && + (dev->pdev->subsystem_device == 0x7270)) { + /* It's a Mac Mini or Macbook Pro. */ + + if (mode_dev->panel_fixed_mode != NULL && + mode_dev->panel_fixed_mode->hdisplay == 800 && + mode_dev->panel_fixed_mode->vdisplay == 600) { + DRM_DEBUG + ("Suspected Mac Mini, ignoring the LVDS\n"); + goto failed_find; + } + } + } + +out: + drm_sysfs_connector_add(connector); + +#if PRINT_JLIU7 + DRM_INFO("PRINT_JLIU7 hdisplay = %d\n", + mode_dev->panel_fixed_mode->hdisplay); + DRM_INFO("PRINT_JLIU7 vdisplay = %d\n", + mode_dev->panel_fixed_mode->vdisplay); + DRM_INFO("PRINT_JLIU7 hsync_start = %d\n", + mode_dev->panel_fixed_mode->hsync_start); + DRM_INFO("PRINT_JLIU7 hsync_end = %d\n", + mode_dev->panel_fixed_mode->hsync_end); + DRM_INFO("PRINT_JLIU7 htotal = %d\n", + mode_dev->panel_fixed_mode->htotal); + DRM_INFO("PRINT_JLIU7 vsync_start = %d\n", + mode_dev->panel_fixed_mode->vsync_start); + DRM_INFO("PRINT_JLIU7 vsync_end = %d\n", + mode_dev->panel_fixed_mode->vsync_end); + DRM_INFO("PRINT_JLIU7 vtotal = %d\n", + mode_dev->panel_fixed_mode->vtotal); + DRM_INFO("PRINT_JLIU7 clock = %d\n", + mode_dev->panel_fixed_mode->clock); +#endif /* PRINT_JLIU7 */ + return; + +failed_find: + if (psb_intel_output->ddc_bus) + psb_intel_i2c_destroy(psb_intel_output->ddc_bus); +failed_ddc: + if (psb_intel_output->i2c_bus) + psb_intel_i2c_destroy(psb_intel_output->i2c_bus); +failed_blc_i2c: + drm_encoder_cleanup(encoder); + drm_connector_cleanup(connector); + kfree(connector); +} + +/* MRST platform start */ + +/* + * FIXME need to move to register define head file + */ +#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16) +#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16) + +/* The max/min PWM frequency in BPCR[31:17] - */ +/* The smallest number is 1 (not 0) that can fit in the + * 15-bit field of the and then*/ +/* shifts to the left by one bit to get the actual 16-bit + * value that the 15-bits correspond to.*/ +#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF + +#define BRIGHTNESS_MAX_LEVEL 100 +#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */ +#define BLC_PWM_FREQ_CALC_CONSTANT 32 +#define MHz 1000000 +#define BLC_POLARITY_NORMAL 0 +#define BLC_POLARITY_INVERSE 1 + +/** + * Calculate PWM control register value. + */ +#if 0 +static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev) +{ + unsigned long value = 0; + if (blc_freq == 0) { + /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq: + * Frequency Requested is 0.\n"); */ + return false; + } + + value = (CoreClock * MHz); + value = (value / BLC_PWM_FREQ_CALC_CONSTANT); + value = (value * BLC_PWM_PRECISION_FACTOR); + value = (value / blc_freq); + value = (value / BLC_PWM_PRECISION_FACTOR); + + if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) { + return 0; + } else { + PWMControlRegFreq = (u32) value; + return 1; + } +} +#endif +/** + * Sets the power state for the panel. + */ +static void mrst_lvds_set_power(struct drm_device *dev, + struct psb_intel_output *output, bool on) +{ + u32 pp_status; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_lvds_set_power \n"); +#endif /* PRINT_JLIU7 */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + if (on) { + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | + POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while ((pp_status & (PP_ON | PP_READY)) == PP_READY); + } else { + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & + ~POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while (pp_status & PP_ON); + } + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + +static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_lvds_dpms \n"); +#endif /* PRINT_JLIU7 */ + + if (mode == DRM_MODE_DPMS_ON) + mrst_lvds_set_power(dev, output, true); + else + mrst_lvds_set_power(dev, output, false); + + /* XXX: We never power down the LVDS pairs. */ +} + +static void mrst_lvds_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct psb_intel_mode_device *mode_dev = enc_to_psb_intel_output(encoder)->mode_dev; + struct drm_device *dev = encoder->dev; + u32 lvds_port; + uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN; + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n"); +#endif /* PRINT_JLIU7 */ + + powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, true); + + /* + * The LVDS pin pair will already have been turned on in the + * psb_intel_crtc_mode_set since it has a large impact on the DPLL + * settings. + */ + /*FIXME JLIU7 Get panel power delay parameters from config data */ + REG_WRITE(0x61208, 0x25807d0); + REG_WRITE(0x6120c, 0x1f407d0); + REG_WRITE(0x61210, 0x270f04); + + lvds_port = (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN | LVDS_BORDER_EN; + + if (mode_dev->panel_wants_dither) + lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE; + + REG_WRITE(LVDS, lvds_port); + + drm_connector_property_get_value(&enc_to_psb_intel_output(encoder)->base, + dev->mode_config.scaling_mode_property, &curValue); + + if (curValue == DRM_MODE_SCALE_NO_SCALE) + REG_WRITE(PFIT_CONTROL, 0); + else if (curValue == DRM_MODE_SCALE_ASPECT) { + if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) { + if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay)) + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); + else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay)) + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX); + else + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX); + } else + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); + } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/ + REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); +} + + +static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = { + .dpms = mrst_lvds_dpms, + .mode_fixup = psb_intel_lvds_mode_fixup, + .prepare = psb_intel_lvds_prepare, + .mode_set = mrst_lvds_mode_set, + .commit = psb_intel_lvds_commit, +}; + +/** Returns the panel fixed mode from configuration. */ +/** FIXME JLIU7 need to revist it. */ +struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device + *dev) +{ + struct drm_display_mode *mode; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct mrst_timing_info *ti = &dev_priv->gct_data.DTD; + + mode = kzalloc(sizeof(*mode), GFP_KERNEL); + if (!mode) + return NULL; + + if (dev_priv->vbt_data.Size != 0x00) { /*if non-zero, then use vbt*/ + + mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; + mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; + mode->hsync_start = mode->hdisplay + \ + ((ti->hsync_offset_hi << 8) | \ + ti->hsync_offset_lo); + mode->hsync_end = mode->hsync_start + \ + ((ti->hsync_pulse_width_hi << 8) | \ + ti->hsync_pulse_width_lo); + mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ + ti->hblank_lo); + mode->vsync_start = \ + mode->vdisplay + ((ti->vsync_offset_hi << 8) | \ + ti->vsync_offset_lo); + mode->vsync_end = \ + mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \ + ti->vsync_pulse_width_lo); + mode->vtotal = mode->vdisplay + \ + ((ti->vblank_hi << 8) | ti->vblank_lo); + mode->clock = ti->pixel_clock * 10; +#if 0 + printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay); + printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay); + printk(KERN_INFO "HSS is %d\n", mode->hsync_start); + printk(KERN_INFO "HSE is %d\n", mode->hsync_end); + printk(KERN_INFO "htotal is %d\n", mode->htotal); + printk(KERN_INFO "VSS is %d\n", mode->vsync_start); + printk(KERN_INFO "VSE is %d\n", mode->vsync_end); + printk(KERN_INFO "vtotal is %d\n", mode->vtotal); + printk(KERN_INFO "clock is %d\n", mode->clock); +#endif + } + else { + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for TPO LTPS LPJ040K001A */ + mode->hdisplay = 800; + mode->vdisplay = 480; + mode->hsync_start = 836; + mode->hsync_end = 846; + mode->htotal = 1056; + mode->vsync_start = 489; + mode->vsync_end = 491; + mode->vtotal = 525; + mode->clock = 33264; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for LVDS 800x480 */ + mode->hdisplay = 800; + mode->vdisplay = 480; + mode->hsync_start = 801; + mode->hsync_end = 802; + mode->htotal = 1024; + mode->vsync_start = 481; + mode->vsync_end = 482; + mode->vtotal = 525; + mode->clock = 30994; +#endif /*FIXME jliu7 remove it later */ + +#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */ + /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ + mode->hdisplay = 1024; + mode->vdisplay = 600; + mode->hsync_start = 1072; + mode->hsync_end = 1104; + mode->htotal = 1184; + mode->vsync_start = 603; + mode->vsync_end = 604; + mode->vtotal = 608; + mode->clock = 53990; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */ + /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ + mode->hdisplay = 1024; + mode->vdisplay = 600; + mode->hsync_start = 1104; + mode->hsync_end = 1136; + mode->htotal = 1184; + mode->vsync_start = 603; + mode->vsync_end = 604; + mode->vtotal = 608; + mode->clock = 53990; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */ + mode->hdisplay = 1024; + mode->vdisplay = 600; + mode->hsync_start = 1124; + mode->hsync_end = 1204; + mode->htotal = 1312; + mode->vsync_start = 607; + mode->vsync_end = 610; + mode->vtotal = 621; + mode->clock = 48885; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for LVDS 1024x768 */ + mode->hdisplay = 1024; + mode->vdisplay = 768; + mode->hsync_start = 1048; + mode->hsync_end = 1184; + mode->htotal = 1344; + mode->vsync_start = 771; + mode->vsync_end = 777; + mode->vtotal = 806; + mode->clock = 65000; +#endif /*FIXME jliu7 remove it later */ + +#if 0 /*FIXME jliu7 remove it later */ + /* hard coded fixed mode for LVDS 1366x768 */ + mode->hdisplay = 1366; + mode->vdisplay = 768; + mode->hsync_start = 1430; + mode->hsync_end = 1558; + mode->htotal = 1664; + mode->vsync_start = 769; + mode->vsync_end = 770; + mode->vtotal = 776; + mode->clock = 77500; +#endif /*FIXME jliu7 remove it later */ + } + drm_mode_set_name(mode); + drm_mode_set_crtcinfo(mode, 0); + + return mode; +} + +/** + * mrst_lvds_init - setup LVDS connectors on this device + * @dev: drm device + * + * Create the connector, register the LVDS DDC bus, and try to figure out what + * modes we can display on the LVDS panel (if present). + */ +void mrst_lvds_init(struct drm_device *dev, + struct psb_intel_mode_device *mode_dev) +{ + struct psb_intel_output *psb_intel_output; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private; + struct edid *edid; + int ret = 0; + struct i2c_adapter *i2c_adap; + struct drm_display_mode *scan; /* *modes, *bios_mode; */ + +#if PRINT_JLIU7 + DRM_INFO("JLIU7 enter mrst_lvds_init \n"); +#endif /* PRINT_JLIU7 */ + + psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL); + if (!psb_intel_output) + return; + + psb_intel_output->mode_dev = mode_dev; + connector = &psb_intel_output->base; + encoder = &psb_intel_output->enc; + drm_connector_init(dev, &psb_intel_output->base, + &psb_intel_lvds_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + + drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs, + DRM_MODE_ENCODER_LVDS); + + drm_mode_connector_attach_encoder(&psb_intel_output->base, + &psb_intel_output->enc); + psb_intel_output->type = INTEL_OUTPUT_LVDS; + + drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs); + drm_connector_helper_add(connector, + &psb_intel_lvds_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); + + lvds_backlight = BRIGHTNESS_MAX_LEVEL; + + /* + * LVDS discovery: + * 1) check for EDID on DDC + * 2) check for VBT data + * 3) check to see if LVDS is already on + * if none of the above, no panel + * 4) make sure lid is open + * if closed, act like it's not there for now + */ + i2c_adap = i2c_get_adapter(2); + if (i2c_adap == NULL) + printk(KERN_ALERT "No ddc adapter available!\n"); + /* Set up the DDC bus. */ +/* psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); + if (!psb_intel_output->ddc_bus) { + dev_printk(KERN_ERR, &dev->pdev->dev, + "DDC bus registration " "failed.\n"); + goto failed_ddc; + }*/ + + /* + * Attempt to get the fixed panel mode from DDC. Assume that the + * preferred mode is the right one. + */ + edid = drm_get_edid(connector, i2c_adap); + if (edid) { + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + } + + list_for_each_entry(scan, &connector->probed_modes, head) { + if (scan->type & DRM_MODE_TYPE_PREFERRED) { + mode_dev->panel_fixed_mode = + drm_mode_duplicate(dev, scan); + goto out; /* FIXME: check for quirks */ + } + } + + /* + * If we didn't get EDID, try geting panel timing + * from configuration data + */ + mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev); + + if (mode_dev->panel_fixed_mode) { + mode_dev->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + goto out; /* FIXME: check for quirks */ + } + + /* If we still don't have a mode after all that, give up. */ + if (!mode_dev->panel_fixed_mode) { + DRM_DEBUG + ("Found no modes on the lvds, ignoring the LVDS\n"); + goto failed_find; + } + +out: + drm_sysfs_connector_add(connector); + return; + +failed_find: + DRM_DEBUG("No LVDS modes found, disabling.\n"); + if (psb_intel_output->ddc_bus) + psb_intel_i2c_destroy(psb_intel_output->ddc_bus); + +failed_ddc: + + drm_encoder_cleanup(encoder); + drm_connector_cleanup(connector); + kfree(connector); +} + +/* MRST platform end */ diff --git a/drivers/gpu/drm/psb/psb_intel_modes.c b/drivers/gpu/drm/psb/psb_intel_modes.c new file mode 100644 index 0000000..54abe86 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_modes.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2007 Dave Airlie + * Copyright (c) 2007 Intel Corporation + * Jesse Barnes + */ + +#include +#include +#include +#include "psb_intel_drv.h" + +/** + * psb_intel_ddc_probe + * + */ +bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output) +{ + u8 out_buf[] = { 0x0, 0x0 }; + u8 buf[2]; + int ret; + struct i2c_msg msgs[] = { + { + .addr = 0x50, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = 0x50, + .flags = I2C_M_RD, + .len = 1, + .buf = buf, + } + }; + + ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2); + if (ret == 2) + return true; + + return false; +} + +/** + * psb_intel_ddc_get_modes - get modelist from monitor + * @connector: DRM connector device to use + * + * Fetch the EDID information from @connector using the DDC bus. + */ +int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output) +{ + struct edid *edid; + int ret = 0; + + edid = + drm_get_edid(&psb_intel_output->base, + &psb_intel_output->ddc_bus->adapter); + if (edid) { + drm_mode_connector_update_edid_property(&psb_intel_output-> + base, edid); + ret = drm_add_edid_modes(&psb_intel_output->base, edid); + kfree(edid); + } + return ret; +} diff --git a/drivers/gpu/drm/psb/psb_intel_reg.h b/drivers/gpu/drm/psb/psb_intel_reg.h new file mode 100644 index 0000000..7e22463 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_reg.h @@ -0,0 +1,1015 @@ +#define BLC_PWM_CTL 0x61254 +#define BLC_PWM_CTL2 0x61250 +#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) +/** + * This is the most significant 15 bits of the number of backlight cycles in a + * complete cycle of the modulated backlight control. + * + * The actual value is this field multiplied by two. + */ +#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) +#define BLM_LEGACY_MODE (1 << 16) +/** + * This is the number of cycles out of the backlight modulation cycle for which + * the backlight is on. + * + * This field must be no greater than the number of cycles in the complete + * backlight modulation cycle. + */ +#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) +#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) + +#define I915_GCFGC 0xf0 +#define I915_LOW_FREQUENCY_ENABLE (1 << 7) +#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4) +#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4) +#define I915_DISPLAY_CLOCK_MASK (7 << 4) + +#define I855_HPLLCC 0xc0 +#define I855_CLOCK_CONTROL_MASK (3 << 0) +#define I855_CLOCK_133_200 (0 << 0) +#define I855_CLOCK_100_200 (1 << 0) +#define I855_CLOCK_100_133 (2 << 0) +#define I855_CLOCK_166_250 (3 << 0) + +/* I830 CRTC registers */ +#define HTOTAL_A 0x60000 +#define HBLANK_A 0x60004 +#define HSYNC_A 0x60008 +#define VTOTAL_A 0x6000c +#define VBLANK_A 0x60010 +#define VSYNC_A 0x60014 +#define PIPEASRC 0x6001c +#define BCLRPAT_A 0x60020 +#define VSYNCSHIFT_A 0x60028 + +#define HTOTAL_B 0x61000 +#define HBLANK_B 0x61004 +#define HSYNC_B 0x61008 +#define VTOTAL_B 0x6100c +#define VBLANK_B 0x61010 +#define VSYNC_B 0x61014 +#define PIPEBSRC 0x6101c +#define BCLRPAT_B 0x61020 +#define VSYNCSHIFT_B 0x61028 + +#define PP_STATUS 0x61200 +# define PP_ON (1 << 31) +/** + * Indicates that all dependencies of the panel are on: + * + * - PLL enabled + * - pipe enabled + * - LVDS/DVOB/DVOC on + */ +# define PP_READY (1 << 30) +# define PP_SEQUENCE_NONE (0 << 28) +# define PP_SEQUENCE_ON (1 << 28) +# define PP_SEQUENCE_OFF (2 << 28) +# define PP_SEQUENCE_MASK 0x30000000 +#define PP_CONTROL 0x61204 +# define POWER_TARGET_ON (1 << 0) + +#define LVDSPP_ON 0x61208 +#define LVDSPP_OFF 0x6120c +#define PP_CYCLE 0x61210 + +#define PFIT_CONTROL 0x61230 +# define PFIT_ENABLE (1 << 31) +# define PFIT_PIPE_MASK (3 << 29) +# define PFIT_PIPE_SHIFT 29 +# define PFIT_SCALING_MODE_PILLARBOX (1 << 27) +# define PFIT_SCALING_MODE_LETTERBOX (3 << 26) +# define VERT_INTERP_DISABLE (0 << 10) +# define VERT_INTERP_BILINEAR (1 << 10) +# define VERT_INTERP_MASK (3 << 10) +# define VERT_AUTO_SCALE (1 << 9) +# define HORIZ_INTERP_DISABLE (0 << 6) +# define HORIZ_INTERP_BILINEAR (1 << 6) +# define HORIZ_INTERP_MASK (3 << 6) +# define HORIZ_AUTO_SCALE (1 << 5) +# define PANEL_8TO6_DITHER_ENABLE (1 << 3) + +#define PFIT_PGM_RATIOS 0x61234 +# define PFIT_VERT_SCALE_MASK 0xfff00000 +# define PFIT_HORIZ_SCALE_MASK 0x0000fff0 + +#define PFIT_AUTO_RATIOS 0x61238 + + +#define DPLL_A 0x06014 +#define DPLL_B 0x06018 +# define DPLL_VCO_ENABLE (1 << 31) +# define DPLL_DVO_HIGH_SPEED (1 << 30) +# define DPLL_SYNCLOCK_ENABLE (1 << 29) +# define DPLL_VGA_MODE_DIS (1 << 28) +# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ +# define DPLLB_MODE_LVDS (2 << 26) /* i915 */ +# define DPLL_MODE_MASK (3 << 26) +# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ +# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ +# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ +# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ +# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ +# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ +/** + * The i830 generation, in DAC/serial mode, defines p1 as two plus this + * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set. + */ +# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 +/** + * The i830 generation, in LVDS mode, defines P1 as the bit number set within + * this field (only one bit may be set). + */ +# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 +# define DPLL_FPA01_P1_POST_DIV_SHIFT 16 +# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required + * in DVO non-gang */ +# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ +# define PLL_REF_INPUT_DREFCLK (0 << 13) +# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ +# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO + * TVCLKIN */ +# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) +# define PLL_REF_INPUT_MASK (3 << 13) +# define PLL_LOAD_PULSE_PHASE_SHIFT 9 +/* + * Parallel to Serial Load Pulse phase selection. + * Selects the phase for the 10X DPLL clock for the PCIe + * digital display port. The range is 4 to 13; 10 or more + * is just a flip delay. The default is 6 + */ +# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) +# define DISPLAY_RATE_SELECT_FPA1 (1 << 8) + +/** + * SDVO multiplier for 945G/GM. Not used on 965. + * + * \sa DPLL_MD_UDI_MULTIPLIER_MASK + */ +# define SDVO_MULTIPLIER_MASK 0x000000ff +# define SDVO_MULTIPLIER_SHIFT_HIRES 4 +# define SDVO_MULTIPLIER_SHIFT_VGA 0 + +/** @defgroup DPLL_MD + * @{ + */ +/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */ +#define DPLL_A_MD 0x0601c +/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */ +#define DPLL_B_MD 0x06020 +/** + * UDI pixel divider, controlling how many pixels are stuffed into a packet. + * + * Value is pixels minus 1. Must be set to 1 pixel for SDVO. + */ +# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 +# define DPLL_MD_UDI_DIVIDER_SHIFT 24 +/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ +# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 +# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 +/** + * SDVO/UDI pixel multiplier. + * + * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus + * clock rate is 10 times the DPLL clock. At low resolution/refresh rate + * modes, the bus rate would be below the limits, so SDVO allows for stuffing + * dummy bytes in the datastream at an increased clock rate, with both sides of + * the link knowing how many bytes are fill. + * + * So, for a mode with a dotclock of 65Mhz, we would want to double the clock + * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be + * set to 130Mhz, and the SDVO multiplier set to 2x in this register and + * through an SDVO command. + * + * This register field has values of multiplication factor minus 1, with + * a maximum multiplier of 5 for SDVO. + */ +# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 +# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 +/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. + * This best be set to the default value (3) or the CRT won't work. No, + * I don't entirely understand what this does... + */ +# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f +# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 +/** @} */ + +#define DPLL_TEST 0x606c +# define DPLLB_TEST_SDVO_DIV_1 (0 << 22) +# define DPLLB_TEST_SDVO_DIV_2 (1 << 22) +# define DPLLB_TEST_SDVO_DIV_4 (2 << 22) +# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) +# define DPLLB_TEST_N_BYPASS (1 << 19) +# define DPLLB_TEST_M_BYPASS (1 << 18) +# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) +# define DPLLA_TEST_N_BYPASS (1 << 3) +# define DPLLA_TEST_M_BYPASS (1 << 2) +# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) + +#define ADPA 0x61100 +#define ADPA_DAC_ENABLE (1<<31) +#define ADPA_DAC_DISABLE 0 +#define ADPA_PIPE_SELECT_MASK (1<<30) +#define ADPA_PIPE_A_SELECT 0 +#define ADPA_PIPE_B_SELECT (1<<30) +#define ADPA_USE_VGA_HVPOLARITY (1<<15) +#define ADPA_SETS_HVPOLARITY 0 +#define ADPA_VSYNC_CNTL_DISABLE (1<<11) +#define ADPA_VSYNC_CNTL_ENABLE 0 +#define ADPA_HSYNC_CNTL_DISABLE (1<<10) +#define ADPA_HSYNC_CNTL_ENABLE 0 +#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) +#define ADPA_VSYNC_ACTIVE_LOW 0 +#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) +#define ADPA_HSYNC_ACTIVE_LOW 0 + +#define FPA0 0x06040 +#define FPA1 0x06044 +#define FPB0 0x06048 +#define FPB1 0x0604c +# define FP_N_DIV_MASK 0x003f0000 +# define FP_N_DIV_SHIFT 16 +# define FP_M1_DIV_MASK 0x00003f00 +# define FP_M1_DIV_SHIFT 8 +# define FP_M2_DIV_MASK 0x0000003f +# define FP_M2_DIV_SHIFT 0 + + +#define PORT_HOTPLUG_EN 0x61110 +# define SDVOB_HOTPLUG_INT_EN (1 << 26) +# define SDVOC_HOTPLUG_INT_EN (1 << 25) +# define TV_HOTPLUG_INT_EN (1 << 18) +# define CRT_HOTPLUG_INT_EN (1 << 9) +# define CRT_HOTPLUG_FORCE_DETECT (1 << 3) + +#define PORT_HOTPLUG_STAT 0x61114 +# define CRT_HOTPLUG_INT_STATUS (1 << 11) +# define TV_HOTPLUG_INT_STATUS (1 << 10) +# define CRT_HOTPLUG_MONITOR_MASK (3 << 8) +# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) +# define CRT_HOTPLUG_MONITOR_MONO (2 << 8) +# define CRT_HOTPLUG_MONITOR_NONE (0 << 8) +# define SDVOC_HOTPLUG_INT_STATUS (1 << 7) +# define SDVOB_HOTPLUG_INT_STATUS (1 << 6) + +#define SDVOB 0x61140 +#define SDVOC 0x61160 +#define SDVO_ENABLE (1 << 31) +#define SDVO_PIPE_B_SELECT (1 << 30) +#define SDVO_STALL_SELECT (1 << 29) +#define SDVO_INTERRUPT_ENABLE (1 << 26) +/** + * 915G/GM SDVO pixel multiplier. + * + * Programmed value is multiplier - 1, up to 5x. + * + * \sa DPLL_MD_UDI_MULTIPLIER_MASK + */ +#define SDVO_PORT_MULTIPLY_MASK (7 << 23) +#define SDVO_PORT_MULTIPLY_SHIFT 23 +#define SDVO_PHASE_SELECT_MASK (15 << 19) +#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) +#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) +#define SDVOC_GANG_MODE (1 << 16) +#define SDVO_BORDER_ENABLE (1 << 7) +#define SDVOB_PCIE_CONCURRENCY (1 << 3) +#define SDVO_DETECTED (1 << 2) +/* Bits to be preserved when writing */ +#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14)) +#define SDVOC_PRESERVE_MASK (1 << 17) + +/** @defgroup LVDS + * @{ + */ +/** + * This register controls the LVDS output enable, pipe selection, and data + * format selection. + * + * All of the clock/data pairs are force powered down by power sequencing. + */ +#define LVDS 0x61180 +/** + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as + * the DPLL semantics change when the LVDS is assigned to that pipe. + */ +# define LVDS_PORT_EN (1 << 31) +/** Selects pipe B for LVDS data. Must be set on pre-965. */ +# define LVDS_PIPEB_SELECT (1 << 30) + +/** Turns on border drawing to allow centered display. */ +# define LVDS_BORDER_EN (1 << 15) + +/** + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per + * pixel. + */ +# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) +# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) +# define LVDS_A0A2_CLKA_POWER_UP (3 << 8) +/** + * Controls the A3 data pair, which contains the additional LSBs for 24 bit + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be + * on. + */ +# define LVDS_A3_POWER_MASK (3 << 6) +# define LVDS_A3_POWER_DOWN (0 << 6) +# define LVDS_A3_POWER_UP (3 << 6) +/** + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP + * is set. + */ +# define LVDS_CLKB_POWER_MASK (3 << 4) +# define LVDS_CLKB_POWER_DOWN (0 << 4) +# define LVDS_CLKB_POWER_UP (3 << 4) + +/** + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 + * setting for whether we are in dual-channel mode. The B3 pair will + * additionally only be powered up when LVDS_A3_POWER_UP is set. + */ +# define LVDS_B0B3_POWER_MASK (3 << 2) +# define LVDS_B0B3_POWER_DOWN (0 << 2) +# define LVDS_B0B3_POWER_UP (3 << 2) + +#define PIPEACONF 0x70008 +#define PIPEACONF_ENABLE (1<<31) +#define PIPEACONF_DISABLE 0 +#define PIPEACONF_DOUBLE_WIDE (1<<30) +#define I965_PIPECONF_ACTIVE (1<<30) +#define PIPEACONF_SINGLE_WIDE 0 +#define PIPEACONF_PIPE_UNLOCKED 0 +#define PIPEACONF_PIPE_LOCKED (1<<25) +#define PIPEACONF_PALETTE 0 +#define PIPEACONF_GAMMA (1<<24) +#define PIPECONF_FORCE_BORDER (1<<25) +#define PIPECONF_PROGRESSIVE (0 << 21) +#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) +#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) + +#define PIPEBCONF 0x71008 +#define PIPEBCONF_ENABLE (1<<31) +#define PIPEBCONF_DISABLE 0 +#define PIPEBCONF_DOUBLE_WIDE (1<<30) +#define PIPEBCONF_DISABLE 0 +#define PIPEBCONF_GAMMA (1<<24) +#define PIPEBCONF_PALETTE 0 + +#define PIPEBGCMAXRED 0x71010 +#define PIPEBGCMAXGREEN 0x71014 +#define PIPEBGCMAXBLUE 0x71018 + +#define PIPEASTAT 0x70024 +#define PIPEBSTAT 0x71024 +#define PIPE_VBLANK_CLEAR (1 << 1) +#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) +#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) + +#define PIPEAFRAMEHIGH 0x70040 +#define PIPEAFRAMEPIXEL 0x70044 +#define PIPEBFRAMEHIGH 0x71040 +#define PIPEBFRAMEPIXEL 0x71044 +#define PIPE_FRAME_HIGH_MASK 0x0000ffff +#define PIPE_FRAME_HIGH_SHIFT 0 +#define PIPE_FRAME_LOW_MASK 0xff000000 +#define PIPE_FRAME_LOW_SHIFT 24 +#define PIPE_PIXEL_MASK 0x00ffffff +#define PIPE_PIXEL_SHIFT 0 + +#define DSPARB 0x70030 +#define DSPFW1 0x70034 +#define DSPFW2 0x70038 +#define DSPFW3 0x7003c +#define DSPFW4 0x70050 +#define DSPFW5 0x70054 +#define DSPFW6 0x70058 +#define DSPCHICKENBIT 0x70400 +#define DSPACNTR 0x70180 +#define DSPBCNTR 0x71180 +#define DISPLAY_PLANE_ENABLE (1<<31) +#define DISPLAY_PLANE_DISABLE 0 +#define DISPPLANE_GAMMA_ENABLE (1<<30) +#define DISPPLANE_GAMMA_DISABLE 0 +#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) +#define DISPPLANE_8BPP (0x2<<26) +#define DISPPLANE_15_16BPP (0x4<<26) +#define DISPPLANE_16BPP (0x5<<26) +#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) +#define DISPPLANE_32BPP (0x7<<26) +#define DISPPLANE_STEREO_ENABLE (1<<25) +#define DISPPLANE_STEREO_DISABLE 0 +#define DISPPLANE_SEL_PIPE_MASK (1<<24) +#define DISPPLANE_SEL_PIPE_A 0 +#define DISPPLANE_SEL_PIPE_B (1<<24) +#define DISPPLANE_SRC_KEY_ENABLE (1<<22) +#define DISPPLANE_SRC_KEY_DISABLE 0 +#define DISPPLANE_LINE_DOUBLE (1<<20) +#define DISPPLANE_NO_LINE_DOUBLE 0 +#define DISPPLANE_STEREO_POLARITY_FIRST 0 +#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) +/* plane B only */ +#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) +#define DISPPLANE_ALPHA_TRANS_DISABLE 0 +#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0 +#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) + +#define DSPABASE 0x70184 +#define DSPALINOFF 0x70184 +#define DSPASTRIDE 0x70188 + +#define DSPBBASE 0x71184 +#define DSPBLINOFF 0X71184 +#define DSPBADDR DSPBBASE +#define DSPBSTRIDE 0x71188 + +#define DSPAKEYVAL 0x70194 +#define DSPAKEYMASK 0x70198 + +#define DSPAPOS 0x7018C /* reserved */ +#define DSPASIZE 0x70190 +#define DSPBPOS 0x7118C +#define DSPBSIZE 0x71190 + +#define DSPASURF 0x7019C +#define DSPATILEOFF 0x701A4 + +#define DSPBSURF 0x7119C +#define DSPBTILEOFF 0x711A4 + +#define VGACNTRL 0x71400 +# define VGA_DISP_DISABLE (1 << 31) +# define VGA_2X_MODE (1 << 30) +# define VGA_PIPE_B_SELECT (1 << 29) + +/* + * Overlay registers + */ +#define OV_OVADD 0x30000 +#define OV_OGAMC5 0x30010 +#define OV_OGAMC4 0x30014 +#define OV_OGAMC3 0x30018 +#define OV_OGAMC2 0x3001C +#define OV_OGAMC1 0x30020 +#define OV_OGAMC0 0x30024 + +/* + * Some BIOS scratch area registers. The 845 (and 830?) store the amount + * of video memory available to the BIOS in SWF1. + */ + +#define SWF0 0x71410 +#define SWF1 0x71414 +#define SWF2 0x71418 +#define SWF3 0x7141c +#define SWF4 0x71420 +#define SWF5 0x71424 +#define SWF6 0x71428 + +/* + * 855 scratch registers. + */ +#define SWF00 0x70410 +#define SWF01 0x70414 +#define SWF02 0x70418 +#define SWF03 0x7041c +#define SWF04 0x70420 +#define SWF05 0x70424 +#define SWF06 0x70428 + +#define SWF10 SWF0 +#define SWF11 SWF1 +#define SWF12 SWF2 +#define SWF13 SWF3 +#define SWF14 SWF4 +#define SWF15 SWF5 +#define SWF16 SWF6 + +#define SWF30 0x72414 +#define SWF31 0x72418 +#define SWF32 0x7241c + + +/* + * Palette registers + */ +#define PALETTE_A 0x0a000 +#define PALETTE_B 0x0a800 + +#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC) +#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG) +#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) +#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) +#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG) + + +/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */ +#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG) +#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG) +#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG) +#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG) + +#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ + (dev)->pci_device == 0x2982 || \ + (dev)->pci_device == 0x2992 || \ + (dev)->pci_device == 0x29A2 || \ + (dev)->pci_device == 0x2A02 || \ + (dev)->pci_device == 0x2A12) + +#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) + +#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ + (dev)->pci_device == 0x29B2 || \ + (dev)->pci_device == 0x29D2) + +#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ + IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \ + IS_MRST(dev)) + +#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ + IS_I945GM(dev) || IS_I965GM(dev) || \ + IS_POULSBO(dev) || IS_MRST(dev)) + +/* Cursor A & B regs */ +#define CURACNTR 0x70080 +#define CURSOR_MODE_DISABLE 0x00 +#define CURSOR_MODE_64_32B_AX 0x07 +#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) +#define MCURSOR_GAMMA_ENABLE (1 << 26) +#define CURABASE 0x70084 +#define CURAPOS 0x70088 +#define CURSOR_POS_MASK 0x007FF +#define CURSOR_POS_SIGN 0x8000 +#define CURSOR_X_SHIFT 0 +#define CURSOR_Y_SHIFT 16 +#define CURBCNTR 0x700c0 +#define CURBBASE 0x700c4 +#define CURBPOS 0x700c8 + +/* + * Interrupt Registers + */ +#define IER 0x020a0 +#define IIR 0x020a4 +#define IMR 0x020a8 +#define ISR 0x020ac + +/* + * MOORESTOWN delta registers + */ +#define MRST_DPLL_A 0x0f014 +#define DPLLA_MODE_LVDS (2 << 26) /* mrst */ +#define MRST_FPA0 0x0f040 +#define MRST_FPA1 0x0f044 +#define MRST_PERF_MODE 0x020f4 + +/* #define LVDS 0x61180 */ +# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25) +# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24) +# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6) + +#define MIPI 0x61190 +# define MIPI_PORT_EN (1 << 31) +/** Turns on border drawing to allow centered display. */ +# define MIPI_BORDER_EN (1 << 15) + +/* #define PP_CONTROL 0x61204 */ +# define POWER_DOWN_ON_RESET (1 << 1) + +/* #define PFIT_CONTROL 0x61230 */ +# define PFIT_PIPE_SELECT (3 << 29) +# define PFIT_PIPE_SELECT_SHIFT (29) + +/* #define BLC_PWM_CTL 0x61254 */ +#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16) +#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16) + +/* #define PIPEACONF 0x70008 */ +#define PIPEACONF_PIPE_STATE (1<<30) +/* #define DSPACNTR 0x70180 */ +#if 0 /*FIXME JLIU7 need to define the following */ +1000 = 32 - bit RGBX(10 : 10 : 10 : 2) +pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX +(16 : 16 : 16 : 16) 16 bit floating point pixel format. +Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format. + Ignore + alpha. +#endif /*FIXME JLIU7 need to define the following */ + +#define MRST_DSPABASE 0x7019c + +/* + * MOORESTOWN reserved registers + */ +#if 0 +#define DSPAPOS 0x7018C /* reserved */ +#define DSPASIZE 0x70190 +#endif +/* + * Moorestown registers. + */ +/*=========================================================================== +; General Constants +;--------------------------------------------------------------------------*/ +#define BIT0 0x00000001 +#define BIT1 0x00000002 +#define BIT2 0x00000004 +#define BIT3 0x00000008 +#define BIT4 0x00000010 +#define BIT5 0x00000020 +#define BIT6 0x00000040 +#define BIT7 0x00000080 +#define BIT8 0x00000100 +#define BIT9 0x00000200 +#define BIT10 0x00000400 +#define BIT11 0x00000800 +#define BIT12 0x00001000 +#define BIT13 0x00002000 +#define BIT14 0x00004000 +#define BIT15 0x00008000 +#define BIT16 0x00010000 +#define BIT17 0x00020000 +#define BIT18 0x00040000 +#define BIT19 0x00080000 +#define BIT20 0x00100000 +#define BIT21 0x00200000 +#define BIT22 0x00400000 +#define BIT23 0x00800000 +#define BIT24 0x01000000 +#define BIT25 0x02000000 +#define BIT26 0x04000000 +#define BIT27 0x08000000 +#define BIT28 0x10000000 +#define BIT29 0x20000000 +#define BIT30 0x40000000 +#define BIT31 0x80000000 +/*=========================================================================== +; MIPI IP registers +;--------------------------------------------------------------------------*/ +#define DEVICE_READY_REG 0xb000 +#define INTR_STAT_REG 0xb004 +#define RX_SOT_ERROR BIT0 +#define RX_SOT_SYNC_ERROR BIT1 +#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3 +#define RX_LP_TX_SYNC_ERROR BIT4 +#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5 +#define RX_FALSE_CONTROL_ERROR BIT6 +#define RX_ECC_SINGLE_BIT_ERROR BIT7 +#define RX_ECC_MULTI_BIT_ERROR BIT8 +#define RX_CHECKSUM_ERROR BIT9 +#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10 +#define RX_DSI_VC_ID_INVALID BIT11 +#define TX_FALSE_CONTROL_ERROR BIT12 +#define TX_ECC_SINGLE_BIT_ERROR BIT13 +#define TX_ECC_MULTI_BIT_ERROR BIT14 +#define TX_CHECKSUM_ERROR BIT15 +#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16 +#define TX_DSI_VC_ID_INVALID BIT17 +#define HIGH_CONTENTION BIT18 +#define LOW_CONTENTION BIT19 +#define DPI_FIFO_UNDER_RUN BIT20 +#define HS_TX_TIMEOUT BIT21 +#define LP_RX_TIMEOUT BIT22 +#define TURN_AROUND_ACK_TIMEOUT BIT23 +#define ACK_WITH_NO_ERROR BIT24 +#define INTR_EN_REG 0xb008 +#define DSI_FUNC_PRG_REG 0xb00c +#define DPI_CHANNEL_NUMBER_POS 0x03 +#define DBI_CHANNEL_NUMBER_POS 0x05 +#define FMT_DPI_POS 0x07 +#define FMT_DBI_POS 0x0A +#define DBI_DATA_WIDTH_POS 0x0D +#define HS_TX_TIMEOUT_REG 0xb010 +#define LP_RX_TIMEOUT_REG 0xb014 +#define TURN_AROUND_TIMEOUT_REG 0xb018 +#define DEVICE_RESET_REG 0xb01C +#define DPI_RESOLUTION_REG 0xb020 +#define RES_V_POS 0x10 +#define DBI_RESOLUTION_REG 0xb024 +#define HORIZ_SYNC_PAD_COUNT_REG 0xb028 +#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C +#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030 +#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034 +#define VERT_SYNC_PAD_COUNT_REG 0xb038 +#define VERT_BACK_PORCH_COUNT_REG 0xb03c +#define VERT_FRONT_PORCH_COUNT_REG 0xb040 +#define HIGH_LOW_SWITCH_COUNT_REG 0xb044 +#define DPI_CONTROL_REG 0xb048 +#define DPI_SHUT_DOWN BIT0 +#define DPI_TURN_ON BIT1 +#define DPI_COLOR_MODE_ON BIT2 +#define DPI_COLOR_MODE_OFF BIT3 +#define DPI_BACK_LIGHT_ON BIT4 +#define DPI_BACK_LIGHT_OFF BIT5 +#define DPI_LP BIT6 +#define DPI_DATA_REG 0xb04c +#define DPI_BACK_LIGHT_ON_DATA 0x07 +#define DPI_BACK_LIGHT_OFF_DATA 0x17 +#define INIT_COUNT_REG 0xb050 +#define MAX_RET_PAK_REG 0xb054 +#define VIDEO_FMT_REG 0xb058 +#define EOT_DISABLE_REG 0xb05c +#define LP_BYTECLK_REG 0xb060 +#define LP_GEN_DATA_REG 0xb064 +#define HS_GEN_DATA_REG 0xb068 +#define LP_GEN_CTRL_REG 0xb06C +#define HS_GEN_CTRL_REG 0xb070 +#define GEN_FIFO_STAT_REG 0xb074 +#define HS_DATA_FIFO_FULL BIT0 +#define HS_DATA_FIFO_HALF_EMPTY BIT1 +#define HS_DATA_FIFO_EMPTY BIT2 +#define LP_DATA_FIFO_FULL BIT8 +#define LP_DATA_FIFO_HALF_EMPTY BIT9 +#define LP_DATA_FIFO_EMPTY BIT10 +#define HS_CTRL_FIFO_FULL BIT16 +#define HS_CTRL_FIFO_HALF_EMPTY BIT17 +#define HS_CTRL_FIFO_EMPTY BIT18 +#define LP_CTRL_FIFO_FULL BIT24 +#define LP_CTRL_FIFO_HALF_EMPTY BIT25 +#define LP_CTRL_FIFO_EMPTY BIT26 +/*=========================================================================== +; MIPI Adapter registers +;--------------------------------------------------------------------------*/ +#define MIPI_CONTROL_REG 0xb104 +#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1) +#define MIPI_DATA_ADDRESS_REG 0xb108 +#define MIPI_DATA_LENGTH_REG 0xb10C +#define MIPI_COMMAND_ADDRESS_REG 0xb110 +#define MIPI_COMMAND_LENGTH_REG 0xb114 +#define MIPI_READ_DATA_RETURN_REG0 0xb118 +#define MIPI_READ_DATA_RETURN_REG1 0xb11C +#define MIPI_READ_DATA_RETURN_REG2 0xb120 +#define MIPI_READ_DATA_RETURN_REG3 0xb124 +#define MIPI_READ_DATA_RETURN_REG4 0xb128 +#define MIPI_READ_DATA_RETURN_REG5 0xb12C +#define MIPI_READ_DATA_RETURN_REG6 0xb130 +#define MIPI_READ_DATA_RETURN_REG7 0xb134 +#define MIPI_READ_DATA_VALID_REG 0xb138 +/* DBI COMMANDS */ +#define soft_reset 0x01 +/* ************************************************************************* *\ +The display module performs a software reset. +Registers are written with their SW Reset default values. +\* ************************************************************************* */ +#define get_power_mode 0x0a +/* ************************************************************************* *\ +The display module returns the current power mode +\* ************************************************************************* */ +#define get_address_mode 0x0b +/* ************************************************************************* *\ +The display module returns the current status. +\* ************************************************************************* */ +#define get_pixel_format 0x0c +/* ************************************************************************* *\ +This command gets the pixel format for the RGB image data +used by the interface. +\* ************************************************************************* */ +#define get_display_mode 0x0d +/* ************************************************************************* *\ +The display module returns the Display Image Mode status. +\* ************************************************************************* */ +#define get_signal_mode 0x0e +/* ************************************************************************* *\ +The display module returns the Display Signal Mode. +\* ************************************************************************* */ +#define get_diagnostic_result 0x0f +/* ************************************************************************* *\ +The display module returns the self-diagnostic results following +a Sleep Out command. +\* ************************************************************************* */ +#define enter_sleep_mode 0x10 +/* ************************************************************************* *\ +This command causes the display module to enter the Sleep mode. +In this mode, all unnecessary blocks inside the display module are disabled +except interface communication. This is the lowest power mode +the display module supports. +\* ************************************************************************* */ +#define exit_sleep_mode 0x11 +/* ************************************************************************* *\ +This command causes the display module to exit Sleep mode. +All blocks inside the display module are enabled. +\* ************************************************************************* */ +#define enter_partial_mode 0x12 +/* ************************************************************************* *\ +This command causes the display module to enter the Partial Display Mode. +The Partial Display Mode window is described by the set_partial_area command. +\* ************************************************************************* */ +#define enter_normal_mode 0x13 +/* ************************************************************************* *\ +This command causes the display module to enter the Normal mode. +Normal Mode is defined as Partial Display mode and Scroll mode are off +\* ************************************************************************* */ +#define exit_invert_mode 0x20 +/* ************************************************************************* *\ +This command causes the display module to stop inverting the image data on +the display device. The frame memory contents remain unchanged. +No status bits are changed. +\* ************************************************************************* */ +#define enter_invert_mode 0x21 +/* ************************************************************************* *\ +This command causes the display module to invert the image data only on +the display device. The frame memory contents remain unchanged. +No status bits are changed. +\* ************************************************************************* */ +#define set_gamma_curve 0x26 +/* ************************************************************************* *\ +This command selects the desired gamma curve for the display device. +Four fixed gamma curves are defined in section DCS spec. +\* ************************************************************************* */ +#define set_display_off 0x28 +/* ************************************************************************* *\ +This command causes the display module to stop displaying the image data +on the display device. The frame memory contents remain unchanged. +No status bits are changed. +\* ************************************************************************* */ +#define set_display_on 0x29 +/* ************************************************************************* *\ +This command causes the display module to start displaying the image data +on the display device. The frame memory contents remain unchanged. +No status bits are changed. +\* ************************************************************************* */ +#define set_column_address 0x2a +/* ************************************************************************* *\ +This command defines the column extent of the frame memory accessed by the +hostprocessor with the read_memory_continue and write_memory_continue commands. +No status bits are changed. +\* ************************************************************************* */ +#define set_page_address 0x2b +/* ************************************************************************* *\ +This command defines the page extent of the frame memory accessed by the host +processor with the write_memory_continue and read_memory_continue command. +No status bits are changed. +\* ************************************************************************* */ +#define write_mem_start 0x2c +/* ************************************************************************* *\ +This command transfers image data from the host processor to the display +module s frame memory starting at the pixel location specified by +preceding set_column_address and set_page_address commands. +\* ************************************************************************* */ +#define set_partial_area 0x30 +/* ************************************************************************* *\ +This command defines the Partial Display mode s display area. +There are two parameters associated with +this command, the first defines the Start Row (SR) and the second the End Row +(ER). SR and ER refer to the Frame Memory Line Pointer. +\* ************************************************************************* */ +#define set_scroll_area 0x33 +/* ************************************************************************* *\ +This command defines the display modules Vertical Scrolling Area. +\* ************************************************************************* */ +#define set_tear_off 0x34 +/* ************************************************************************* *\ +This command turns off the display modules Tearing Effect output signal on +the TE signal line. +\* ************************************************************************* */ +#define set_tear_on 0x35 +/* ************************************************************************* *\ +This command turns on the display modules Tearing Effect output signal +on the TE signal line. +\* ************************************************************************* */ +#define set_address_mode 0x36 +/* ************************************************************************* *\ +This command sets the data order for transfers from the host processor to +display modules frame memory,bits B[7:5] and B3, and from the display +modules frame memory to the display device, bits B[2:0] and B4. +\* ************************************************************************* */ +#define set_scroll_start 0x37 +/* ************************************************************************* *\ +This command sets the start of the vertical scrolling area in the frame memory. +The vertical scrolling area is fully defined when this command is used with +the set_scroll_area command The set_scroll_start command has one parameter, +the Vertical Scroll Pointer. The VSP defines the line in the frame memory +that is written to the display device as the first line of the vertical +scroll area. +\* ************************************************************************* */ +#define exit_idle_mode 0x38 +/* ************************************************************************* *\ +This command causes the display module to exit Idle mode. +\* ************************************************************************* */ +#define enter_idle_mode 0x39 +/* ************************************************************************* *\ +This command causes the display module to enter Idle Mode. +In Idle Mode, color expression is reduced. Colors are shown on the display +device using the MSB of each of the R, G and B color components in the frame +memory +\* ************************************************************************* */ +#define set_pixel_format 0x3a +/* ************************************************************************* *\ +This command sets the pixel format for the RGB image data used by the interface. +Bits D[6:4] DPI Pixel Format Definition +Bits D[2:0] DBI Pixel Format Definition +Bits D7 and D3 are not used. +\* ************************************************************************* */ +#define write_mem_cont 0x3c +/* ************************************************************************* *\ +This command transfers image data from the host processor to the display +module's frame memory continuing from the pixel location following the +previous write_memory_continue or write_memory_start command. +\* ************************************************************************* */ +#define set_tear_scanline 0x44 +/* ************************************************************************* *\ +This command turns on the display modules Tearing Effect output signal on the +TE signal line when the display module reaches line N. +\* ************************************************************************* */ +#define get_scanline 0x45 +/* ************************************************************************* *\ +The display module returns the current scanline, N, used to update the +display device. The total number of scanlines on a display device is +defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as +the first line of V Sync and is denoted as Line 0. +When in Sleep Mode, the value returned by get_scanline is undefined. +\* ************************************************************************* */ +/* DCS Interface Pixel Formats */ +#define DCS_PIXEL_FORMAT_3BPP 0x1 +#define DCS_PIXEL_FORMAT_8BPP 0x2 +#define DCS_PIXEL_FORMAT_12BPP 0x3 +#define DCS_PIXEL_FORMAT_16BPP 0x5 +#define DCS_PIXEL_FORMAT_18BPP 0x6 +#define DCS_PIXEL_FORMAT_24BPP 0x7 +/* ONE PARAMETER READ DATA */ +#define addr_mode_data 0xfc +#define diag_res_data 0x00 +#define disp_mode_data 0x23 +#define pxl_fmt_data 0x77 +#define pwr_mode_data 0x74 +#define sig_mode_data 0x00 +/* TWO PARAMETERS READ DATA */ +#define scanline_data1 0xff +#define scanline_data2 0xff +/* DPI PIXEL FORMATS */ +#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */ +#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */ +#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED + * 666 FORMAT + */ +#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */ +#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode + * with Sync Pulse + */ +#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode + * with Sync events + */ +#define BURST_MODE 0x03 /* Burst Mode */ +#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */ +#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */ +#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */ +#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */ +#define DBI_NOT_SUPPORTED 0x00 /* command mode + * is not supported + */ +#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */ +#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */ +#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */ +#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least + * 0x100 Byte with 32 + * byte alignment + */ +#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least + * 0x100 Byte with 32 + * byte alignment + */ +#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4)) +#define SKU_83 0x01 +#define SKU_100 0x02 +#define SKU_100L 0x04 +#define SKU_BYPASS 0x08 +#if 0 +/* ************************************************************************* *\ +DSI command data structure +\* ************************************************************************* */ +union DSI_LONG_PACKET_HEADER { + u32 DSI_longPacketHeader; + struct { + u8 dataID; + u16 wordCount; + u8 ECC; + }; +#if 0 /*FIXME JLIU7 */ + struct { + u8 DT:6; + u8 VC:2; + }; +#endif /*FIXME JLIU7 */ +}; + +union MIPI_ADPT_CMD_LNG_REG { + u32 commnadLengthReg; + struct { + u8 command0; + u8 command1; + u8 command2; + u8 command3; + }; +}; + +struct SET_COLUMN_ADDRESS_DATA { + u8 command; + u16 SC; /* Start Column */ + u16 EC; /* End Column */ +}; + +struct SET_PAGE_ADDRESS_DATA { + u8 command; + u16 SP; /* Start Page */ + u16 EP; /* End Page */ +}; +#endif diff --git a/drivers/gpu/drm/psb/psb_intel_sdvo.c b/drivers/gpu/drm/psb/psb_intel_sdvo.c new file mode 100644 index 0000000..9f68d8d --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_sdvo.c @@ -0,0 +1,1350 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ +/* + * Copyright 2006 Dave Airlie + * Jesse Barnes + */ + +#include +#include +#include +#include "psb_intel_sdvo_regs.h" + +struct psb_intel_sdvo_priv { + struct psb_intel_i2c_chan *i2c_bus; + int slaveaddr; + int output_device; + + u16 active_outputs; + + struct psb_intel_sdvo_caps caps; + int pixel_clock_min, pixel_clock_max; + + int save_sdvo_mult; + u16 save_active_outputs; + struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; + struct psb_intel_sdvo_dtd save_output_dtd[16]; + u32 save_SDVOX; + u8 in_out_map[4]; + + u8 by_input_wiring; + u32 active_device; +}; + +/** + * Writes the SDVOB or SDVOC with the given value, but always writes both + * SDVOB and SDVOC to work around apparent hardware issues (according to + * comments in the BIOS). + */ +void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val) +{ + struct drm_device *dev = psb_intel_output->base.dev; + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + u32 bval = val, cval = val; + int i; + + if (sdvo_priv->output_device == SDVOB) + cval = REG_READ(SDVOC); + else + bval = REG_READ(SDVOB); + /* + * Write the registers twice for luck. Sometimes, + * writing them only once doesn't appear to 'stick'. + * The BIOS does this too. Yay, magic + */ + for (i = 0; i < 2; i++) { + REG_WRITE(SDVOB, bval); + REG_READ(SDVOB); + REG_WRITE(SDVOC, cval); + REG_READ(SDVOC); + } +} + +static bool psb_intel_sdvo_read_byte(struct psb_intel_output *psb_intel_output, + u8 addr, u8 *ch) +{ + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + u8 out_buf[2]; + u8 buf[2]; + int ret; + + struct i2c_msg msgs[] = { + { + .addr = sdvo_priv->i2c_bus->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = sdvo_priv->i2c_bus->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2); + if (ret == 2) { + /* DRM_DEBUG("got back from addr %02X = %02x\n", + * out_buf[0], buf[0]); + */ + *ch = buf[0]; + return true; + } + + DRM_DEBUG("i2c transfer returned %d\n", ret); + return false; +} + +static bool psb_intel_sdvo_write_byte(struct psb_intel_output *psb_intel_output, + int addr, u8 ch) +{ + u8 out_buf[2]; + struct i2c_msg msgs[] = { + { + .addr = psb_intel_output->i2c_bus->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1) + return true; + return false; +} + +#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} +/** Mapping of command numbers to names, for debug output */ +const static struct _sdvo_cmd_name { + u8 cmd; + char *name; +} sdvo_cmd_names[] = { +SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), + SDVO_CMD_NAME_ENTRY + (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY + (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), + SDVO_CMD_NAME_ENTRY + (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), + SDVO_CMD_NAME_ENTRY + (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), + SDVO_CMD_NAME_ENTRY + (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), + SDVO_CMD_NAME_ENTRY + (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), + SDVO_CMD_NAME_ENTRY + (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), + SDVO_CMD_NAME_ENTRY + (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),}; + +#define SDVO_NAME(dev_priv) \ + ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") +#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv) + +static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd, + void *args, int args_len) +{ + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + int i; + + if (1) { + DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); + for (i = 0; i < args_len; i++) + printk(KERN_INFO"%02X ", ((u8 *) args)[i]); + for (; i < 8; i++) + printk(" "); + for (i = 0; + i < + sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); + i++) { + if (cmd == sdvo_cmd_names[i].cmd) { + printk("(%s)", sdvo_cmd_names[i].name); + break; + } + } + if (i == + sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0])) + printk("(%02X)", cmd); + printk("\n"); + } + + for (i = 0; i < args_len; i++) { + psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i, + ((u8 *) args)[i]); + } + + psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd); +} + +static const char *cmd_status_names[] = { + "Power on", + "Success", + "Not supported", + "Invalid arg", + "Pending", + "Target not specified", + "Scaling not supported" +}; + +static u8 psb_intel_sdvo_read_response(struct psb_intel_output *psb_intel_output, + void *response, int response_len) +{ + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + int i; + u8 status; + u8 retry = 50; + + while (retry--) { + /* Read the command response */ + for (i = 0; i < response_len; i++) { + psb_intel_sdvo_read_byte(psb_intel_output, + SDVO_I2C_RETURN_0 + i, + &((u8 *) response)[i]); + } + + /* read the return status */ + psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS, + &status); + + if (1) { + DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv)); + for (i = 0; i < response_len; i++) + printk(KERN_INFO"%02X ", ((u8 *) response)[i]); + for (; i < 8; i++) + printk(" "); + if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) + printk(KERN_INFO"(%s)", + cmd_status_names[status]); + else + printk(KERN_INFO"(??? %d)", status); + printk("\n"); + } + + if (status != SDVO_CMD_STATUS_PENDING) + return status; + + mdelay(50); + } + + return status; +} + +int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) +{ + if (mode->clock >= 100000) + return 1; + else if (mode->clock >= 50000) + return 2; + else + return 4; +} + +/** + * Don't check status code from this as it switches the bus back to the + * SDVO chips which defeats the purpose of doing a bus switch in the first + * place. + */ +void psb_intel_sdvo_set_control_bus_switch(struct psb_intel_output *psb_intel_output, + u8 target) +{ + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, + &target, 1); +} + +static bool psb_intel_sdvo_set_target_input(struct psb_intel_output *psb_intel_output, + bool target_0, bool target_1) +{ + struct psb_intel_sdvo_set_target_input_args targets = { 0 }; + u8 status; + + if (target_0 && target_1) + return SDVO_CMD_STATUS_NOTSUPP; + + if (target_1) + targets.target_1 = 1; + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT, + &targets, sizeof(targets)); + + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); + + return status == SDVO_CMD_STATUS_SUCCESS; +} + +/** + * Return whether each input is trained. + * + * This function is making an assumption about the layout of the response, + * which should be checked against the docs. + */ +static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output + *psb_intel_output, bool *input_1, + bool *input_2) +{ + struct psb_intel_sdvo_get_trained_inputs_response response; + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS, + NULL, 0); + status = + psb_intel_sdvo_read_response(psb_intel_output, &response, + sizeof(response)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + *input_1 = response.input0_trained; + *input_2 = response.input1_trained; + return true; +} + +static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output + *psb_intel_output, u16 *outputs) +{ + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, + NULL, 0); + status = + psb_intel_sdvo_read_response(psb_intel_output, outputs, + sizeof(*outputs)); + + return status == SDVO_CMD_STATUS_SUCCESS; +} + +static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output + *psb_intel_output, u16 outputs) +{ + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, + &outputs, sizeof(outputs)); + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); + return status == SDVO_CMD_STATUS_SUCCESS; +} + +static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output + *psb_intel_output, int mode) +{ + u8 status, state = SDVO_ENCODER_STATE_ON; + + switch (mode) { + case DRM_MODE_DPMS_ON: + state = SDVO_ENCODER_STATE_ON; + break; + case DRM_MODE_DPMS_STANDBY: + state = SDVO_ENCODER_STATE_STANDBY; + break; + case DRM_MODE_DPMS_SUSPEND: + state = SDVO_ENCODER_STATE_SUSPEND; + break; + case DRM_MODE_DPMS_OFF: + state = SDVO_ENCODER_STATE_OFF; + break; + } + + psb_intel_sdvo_write_cmd(psb_intel_output, + SDVO_CMD_SET_ENCODER_POWER_STATE, &state, + sizeof(state)); + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); + + return status == SDVO_CMD_STATUS_SUCCESS; +} + +static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output + *psb_intel_output, + int *clock_min, + int *clock_max) +{ + struct psb_intel_sdvo_pixel_clock_range clocks; + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, + SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL, + 0); + + status = + psb_intel_sdvo_read_response(psb_intel_output, &clocks, + sizeof(clocks)); + + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + /* Convert the values from units of 10 kHz to kHz. */ + *clock_min = clocks.min * 10; + *clock_max = clocks.max * 10; + + return true; +} + +static bool psb_intel_sdvo_set_target_output(struct psb_intel_output *psb_intel_output, + u16 outputs) +{ + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT, + &outputs, sizeof(outputs)); + + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); + return status == SDVO_CMD_STATUS_SUCCESS; +} + +static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output, + u8 cmd, struct psb_intel_sdvo_dtd *dtd) +{ + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0); + status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1, + sizeof(dtd->part1)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0); + status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2, + sizeof(dtd->part2)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + return true; +} + +static bool psb_intel_sdvo_get_input_timing(struct psb_intel_output *psb_intel_output, + struct psb_intel_sdvo_dtd *dtd) +{ + return psb_intel_sdvo_get_timing(psb_intel_output, + SDVO_CMD_GET_INPUT_TIMINGS_PART1, + dtd); +} +#if 0 +static bool psb_intel_sdvo_get_output_timing(struct psb_intel_output *psb_intel_output, + struct psb_intel_sdvo_dtd *dtd) +{ + return psb_intel_sdvo_get_timing(psb_intel_output, + SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, + dtd); +} +#endif +static bool psb_intel_sdvo_set_timing(struct psb_intel_output *psb_intel_output, + u8 cmd, struct psb_intel_sdvo_dtd *dtd) +{ + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1, + sizeof(dtd->part1)); + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2, + sizeof(dtd->part2)); + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + return true; +} + +static bool psb_intel_sdvo_set_input_timing(struct psb_intel_output *psb_intel_output, + struct psb_intel_sdvo_dtd *dtd) +{ + return psb_intel_sdvo_set_timing(psb_intel_output, + SDVO_CMD_SET_INPUT_TIMINGS_PART1, + dtd); +} + +static bool psb_intel_sdvo_set_output_timing(struct psb_intel_output *psb_intel_output, + struct psb_intel_sdvo_dtd *dtd) +{ + return psb_intel_sdvo_set_timing(psb_intel_output, + SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, + dtd); +} + +#if 0 +static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output + *psb_intel_output, + struct psb_intel_sdvo_dtd + *dtd) +{ + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, + SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, + NULL, 0); + + status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1, + sizeof(dtd->part1)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + psb_intel_sdvo_write_cmd(psb_intel_output, + SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, + NULL, 0); + status = + psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2, + sizeof(dtd->part2)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + return true; +} +#endif + +static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output + *psb_intel_output) +{ + u8 response, status; + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, + NULL, 0); + status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1); + + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); + return SDVO_CLOCK_RATE_MULT_1X; + } else { + DRM_DEBUG("Current clock rate multiplier: %d\n", response); + } + + return response; +} + +static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output + *psb_intel_output, u8 val) +{ + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, + &val, 1); + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + return true; +} + +static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output * output, u32 in0outputmask, + u32 in1outputmask) +{ + u8 byArgs[4]; + u8 status; + int i; + struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv; + + /* Make all fields of the args/ret to zero */ + memset(byArgs, 0, sizeof(byArgs)); + + /* Fill up the arguement values; */ + byArgs[0] = (u8) (in0outputmask & 0xFF); + byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF); + byArgs[2] = (u8) (in1outputmask & 0xFF); + byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF); + + + /*save inoutmap arg here*/ + for(i=0; i<4; i++) { + sdvo_priv->in_out_map[i] = byArgs[0]; + } + + + psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4); + status = psb_intel_sdvo_read_response(output, NULL, 0); + + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + return true; +} + + +static void psb_intel_sdvo_set_iomap(struct psb_intel_output * output) +{ + u32 dwCurrentSDVOIn0 = 0; + u32 dwCurrentSDVOIn1 = 0; + u32 dwDevMask = 0; + + + struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv; + + /* Please DO NOT change the following code. */ + /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */ + /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */ + if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) { + switch (sdvo_priv->active_device) { + case SDVO_DEVICE_LVDS: + dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1; + break; + case SDVO_DEVICE_TMDS: + dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1; + break; + case SDVO_DEVICE_TV: + dwDevMask = + SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 | + SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 | + SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1; + break; + case SDVO_DEVICE_CRT: + dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1; + break; + } + dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask); + } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) { + switch (sdvo_priv->active_device) { + case SDVO_DEVICE_LVDS: + dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1; + break; + case SDVO_DEVICE_TMDS: + dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1; + break; + case SDVO_DEVICE_TV: + dwDevMask = + SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 | + SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 | + SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1; + break; + case SDVO_DEVICE_CRT: + dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1; + break; + } + dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask); + } + + psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0, + dwCurrentSDVOIn1); +} + + +static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO + * device will be told of the multiplier during mode_set. + */ + adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode); + return true; +} + +static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc = encoder->crtc; + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); + struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder); + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + u16 width, height; + u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; + u16 h_sync_offset, v_sync_offset; + u32 sdvox; + struct psb_intel_sdvo_dtd output_dtd; + int sdvo_pixel_multiply; + + if (!mode) + return; + + psb_intel_sdvo_set_target_output(psb_intel_output, 0); + + width = mode->crtc_hdisplay; + height = mode->crtc_vdisplay; + + /* do some mode translations */ + h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; + h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; + + v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; + v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; + + h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; + v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; + + output_dtd.part1.clock = mode->clock / 10; + output_dtd.part1.h_active = width & 0xff; + output_dtd.part1.h_blank = h_blank_len & 0xff; + output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | + ((h_blank_len >> 8) & 0xf); + output_dtd.part1.v_active = height & 0xff; + output_dtd.part1.v_blank = v_blank_len & 0xff; + output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | + ((v_blank_len >> 8) & 0xf); + + output_dtd.part2.h_sync_off = h_sync_offset; + output_dtd.part2.h_sync_width = h_sync_len & 0xff; + output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | + (v_sync_len & 0xf); + output_dtd.part2.sync_off_width_high = + ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | + ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); + + output_dtd.part2.dtd_flags = 0x18; + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + output_dtd.part2.dtd_flags |= 0x2; + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + output_dtd.part2.dtd_flags |= 0x4; + + output_dtd.part2.sdvo_flags = 0; + output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; + output_dtd.part2.reserved = 0; + + /* Set the output timing to the screen */ + psb_intel_sdvo_set_target_output(psb_intel_output, + sdvo_priv->active_outputs); + + /* Set the input timing to the screen. Assume always input 0. */ + psb_intel_sdvo_set_target_input(psb_intel_output, true, false); + + psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd); + + /* We would like to use i830_sdvo_create_preferred_input_timing() to + * provide the device with a timing it can support, if it supports that + * feature. However, presumably we would need to adjust the CRTC to + * output the preferred timing, and we don't support that currently. + */ +#if 0 + success = + psb_intel_sdvo_create_preferred_input_timing(psb_intel_output, clock, + width, height); + if (success) { + struct psb_intel_sdvo_dtd *input_dtd; + + psb_intel_sdvo_get_preferred_input_timing(psb_intel_output, + &input_dtd); + psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd); + } +#else + psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd); +#endif + + switch (psb_intel_sdvo_get_pixel_multiplier(mode)) { + case 1: + psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, + SDVO_CLOCK_RATE_MULT_1X); + break; + case 2: + psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, + SDVO_CLOCK_RATE_MULT_2X); + break; + case 4: + psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, + SDVO_CLOCK_RATE_MULT_4X); + break; + } + + /* Set the SDVO control regs. */ + if (0 /*IS_I965GM(dev) */) { + sdvox = SDVO_BORDER_ENABLE; + } else { + sdvox = REG_READ(sdvo_priv->output_device); + switch (sdvo_priv->output_device) { + case SDVOB: + sdvox &= SDVOB_PRESERVE_MASK; + break; + case SDVOC: + sdvox &= SDVOC_PRESERVE_MASK; + break; + } + sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; + } + if (psb_intel_crtc->pipe == 1) + sdvox |= SDVO_PIPE_B_SELECT; + + sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode); + +#if 0 + if (IS_I965G(dev)) { + /* done in crtc_mode_set as the dpll_md reg must be written + * early */ + } else if (IS_I945G(dev) || IS_I945GM(dev)) { + /* done in crtc_mode_set as it lives inside the + * dpll register */ + } else { + sdvox |= + (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; + } +#endif + + psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox); + + psb_intel_sdvo_set_iomap(psb_intel_output); +} + +static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder); + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + u32 temp; + + if (mode != DRM_MODE_DPMS_ON) { + psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); + if (0) + psb_intel_sdvo_set_encoder_power_state(psb_intel_output, + mode); + + if (mode == DRM_MODE_DPMS_OFF) { + temp = REG_READ(sdvo_priv->output_device); + if ((temp & SDVO_ENABLE) != 0) { + psb_intel_sdvo_write_sdvox(psb_intel_output, + temp & + ~SDVO_ENABLE); + } + } + } else { + bool input1, input2; + int i; + u8 status; + + temp = REG_READ(sdvo_priv->output_device); + if ((temp & SDVO_ENABLE) == 0) + psb_intel_sdvo_write_sdvox(psb_intel_output, + temp | SDVO_ENABLE); + for (i = 0; i < 2; i++) + psb_intel_wait_for_vblank(dev); + + status = + psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1, + &input2); + + + /* Warn if the device reported failure to sync. + * A lot of SDVO devices fail to notify of sync, but it's + * a given it the status is a success, we succeeded. + */ + if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { + DRM_DEBUG + ("First %s output reported failure to sync\n", + SDVO_NAME(sdvo_priv)); + } + + if (0) + psb_intel_sdvo_set_encoder_power_state(psb_intel_output, + mode); + psb_intel_sdvo_set_active_outputs(psb_intel_output, + sdvo_priv->active_outputs); + } + return; +} + +static void psb_intel_sdvo_save(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + /*int o;*/ + + sdvo_priv->save_sdvo_mult = + psb_intel_sdvo_get_clock_rate_mult(psb_intel_output); + psb_intel_sdvo_get_active_outputs(psb_intel_output, + &sdvo_priv->save_active_outputs); + + if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { + psb_intel_sdvo_set_target_input(psb_intel_output, true, false); + psb_intel_sdvo_get_input_timing(psb_intel_output, + &sdvo_priv->save_input_dtd_1); + } + + if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { + psb_intel_sdvo_set_target_input(psb_intel_output, false, true); + psb_intel_sdvo_get_input_timing(psb_intel_output, + &sdvo_priv->save_input_dtd_2); + } + +#if 0 + for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) { + u16 this_output = (1 << o); + if (sdvo_priv->caps.output_flags & this_output) { + psb_intel_sdvo_set_target_output(psb_intel_output, + this_output); + psb_intel_sdvo_get_output_timing(psb_intel_output, + &sdvo_priv-> + save_output_dtd[o]); + } + } +#endif + + sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device); + + /*TODO: save the in_out_map state*/ +} + +static void psb_intel_sdvo_restore(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + /*int o;*/ + int i; + bool input1, input2; + u8 status; + + psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); + +#if 0 + for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) { + u16 this_output = (1 << o); + if (sdvo_priv->caps.output_flags & this_output) { + psb_intel_sdvo_set_target_output(psb_intel_output, + this_output); + psb_intel_sdvo_set_output_timing(psb_intel_output, + &sdvo_priv-> + save_output_dtd[o]); + } + } +#endif + + if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { + psb_intel_sdvo_set_target_input(psb_intel_output, true, false); + psb_intel_sdvo_set_input_timing(psb_intel_output, + &sdvo_priv->save_input_dtd_1); + } + + if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { + psb_intel_sdvo_set_target_input(psb_intel_output, false, true); + psb_intel_sdvo_set_input_timing(psb_intel_output, + &sdvo_priv->save_input_dtd_2); + } + + psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, + sdvo_priv->save_sdvo_mult); + + REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); + + if (sdvo_priv->save_SDVOX & SDVO_ENABLE) { + for (i = 0; i < 2; i++) + psb_intel_wait_for_vblank(dev); + status = + psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1, + &input2); + if (status == SDVO_CMD_STATUS_SUCCESS && !input1) + DRM_DEBUG + ("First %s output reported failure to sync\n", + SDVO_NAME(sdvo_priv)); + } + + psb_intel_sdvo_set_active_outputs(psb_intel_output, + sdvo_priv->save_active_outputs); + + /*TODO: restore in_out_map*/ + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_IN_OUT_MAP, sdvo_priv->in_out_map, 4); + psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); +} + +static int psb_intel_sdvo_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + if (sdvo_priv->pixel_clock_min > mode->clock) + return MODE_CLOCK_LOW; + + if (sdvo_priv->pixel_clock_max < mode->clock) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static bool psb_intel_sdvo_get_capabilities(struct psb_intel_output *psb_intel_output, + struct psb_intel_sdvo_caps *caps) +{ + u8 status; + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, + 0); + status = + psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + return true; +} + +struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB) +{ + struct drm_connector *connector = NULL; + struct psb_intel_output *iout = NULL; + struct psb_intel_sdvo_priv *sdvo; + + /* find the sdvo connector */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + iout = to_psb_intel_output(connector); + + if (iout->type != INTEL_OUTPUT_SDVO) + continue; + + sdvo = iout->dev_priv; + + if (sdvo->output_device == SDVOB && sdvoB) + return connector; + + if (sdvo->output_device == SDVOC && !sdvoB) + return connector; + + } + + return NULL; +} + +int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector) +{ + u8 response[2]; + u8 status; + struct psb_intel_output *psb_intel_output; + DRM_DEBUG("\n"); + + if (!connector) + return 0; + + psb_intel_output = to_psb_intel_output(connector); + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, + NULL, 0); + status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); + + if (response[0] != 0) + return 1; + + return 0; +} + +void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on) +{ + u8 response[2]; + u8 status; + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, + NULL, 0); + psb_intel_sdvo_read_response(psb_intel_output, &response, 2); + + if (on) { + psb_intel_sdvo_write_cmd(psb_intel_output, + SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, + 0); + status = + psb_intel_sdvo_read_response(psb_intel_output, &response, 2); + + psb_intel_sdvo_write_cmd(psb_intel_output, + SDVO_CMD_SET_ACTIVE_HOT_PLUG, + &response, 2); + } else { + response[0] = 0; + response[1] = 0; + psb_intel_sdvo_write_cmd(psb_intel_output, + SDVO_CMD_SET_ACTIVE_HOT_PLUG, + &response, 2); + } + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, + NULL, 0); + psb_intel_sdvo_read_response(psb_intel_output, &response, 2); +} + +static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector + *connector) +{ + u8 response[2]; + u8 status; + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, + NULL, 0); + status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); + + DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); + if ((response[0] != 0) || (response[1] != 0)) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static int psb_intel_sdvo_get_modes(struct drm_connector *connector) +{ + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + + /* set the bus switch and get the modes */ + psb_intel_sdvo_set_control_bus_switch(psb_intel_output, + SDVO_CONTROL_BUS_DDC2); + psb_intel_ddc_get_modes(psb_intel_output); + + if (list_empty(&connector->probed_modes)) + return 0; + return 1; +#if 0 + /* Mac mini hack. On this device, I get DDC through the analog, which + * load-detects as disconnected. I fail to DDC through the SDVO DDC, + * but it does load-detect as connected. So, just steal the DDC bits + * from analog when we fail at finding it the right way. + */ + /* TODO */ + return NULL; + + return NULL; +#endif +} + +static void psb_intel_sdvo_destroy(struct drm_connector *connector) +{ + struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); + + if (psb_intel_output->i2c_bus) + psb_intel_i2c_destroy(psb_intel_output->i2c_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(psb_intel_output); +} + +static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { + .dpms = psb_intel_sdvo_dpms, + .mode_fixup = psb_intel_sdvo_mode_fixup, + .prepare = psb_intel_encoder_prepare, + .mode_set = psb_intel_sdvo_mode_set, + .commit = psb_intel_encoder_commit, +}; + +static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .save = psb_intel_sdvo_save, + .restore = psb_intel_sdvo_restore, + .detect = psb_intel_sdvo_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = psb_intel_sdvo_destroy, +}; + +static const struct drm_connector_helper_funcs + psb_intel_sdvo_connector_helper_funcs = { + .get_modes = psb_intel_sdvo_get_modes, + .mode_valid = psb_intel_sdvo_mode_valid, + .best_encoder = psb_intel_best_encoder, +}; + +void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { + .destroy = psb_intel_sdvo_enc_destroy, +}; + + +void psb_intel_sdvo_init(struct drm_device *dev, int output_device) +{ + struct drm_connector *connector; + struct psb_intel_output *psb_intel_output; + struct psb_intel_sdvo_priv *sdvo_priv; + struct psb_intel_i2c_chan *i2cbus = NULL; + int connector_type; + u8 ch[0x40]; + int i; + int encoder_type, output_id; + + psb_intel_output = + kcalloc(sizeof(struct psb_intel_output) + + sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL); + if (!psb_intel_output) + return; + + connector = &psb_intel_output->base; + + drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs, + DRM_MODE_CONNECTOR_Unknown); + drm_connector_helper_add(connector, + &psb_intel_sdvo_connector_helper_funcs); + sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1); + psb_intel_output->type = INTEL_OUTPUT_SDVO; + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + /* setup the DDC bus. */ + if (output_device == SDVOB) + i2cbus = + psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); + else + i2cbus = + psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); + + if (!i2cbus) + goto err_connector; + + sdvo_priv->i2c_bus = i2cbus; + + if (output_device == SDVOB) { + output_id = 1; + sdvo_priv->by_input_wiring = SDVOB_IN0; + sdvo_priv->i2c_bus->slave_addr = 0x38; + } else { + output_id = 2; + sdvo_priv->i2c_bus->slave_addr = 0x39; + } + + sdvo_priv->output_device = output_device; + psb_intel_output->i2c_bus = i2cbus; + psb_intel_output->dev_priv = sdvo_priv; + + + /* Read the regs to test if we can talk to the device */ + for (i = 0; i < 0x40; i++) { + if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) { + DRM_DEBUG("No SDVO device found on SDVO%c\n", + output_device == SDVOB ? 'B' : 'C'); + goto err_i2c; + } + } + + psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps); + + memset(&sdvo_priv->active_outputs, 0, + sizeof(sdvo_priv->active_outputs)); + + /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ + if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) { + sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; + sdvo_priv->active_device = SDVO_DEVICE_CRT; + connector->display_info.subpixel_order = + SubPixelHorizontalRGB; + encoder_type = DRM_MODE_ENCODER_DAC; + connector_type = DRM_MODE_CONNECTOR_VGA; + } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) { + sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; + sdvo_priv->active_outputs = SDVO_DEVICE_CRT; + connector->display_info.subpixel_order = + SubPixelHorizontalRGB; + encoder_type = DRM_MODE_ENCODER_DAC; + connector_type = DRM_MODE_CONNECTOR_VGA; + } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) { + sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; + sdvo_priv->active_device = SDVO_DEVICE_TMDS; + connector->display_info.subpixel_order = + SubPixelHorizontalRGB; + encoder_type = DRM_MODE_ENCODER_TMDS; + connector_type = DRM_MODE_CONNECTOR_DVID; + } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) { + sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; + sdvo_priv->active_device = SDVO_DEVICE_TMDS; + connector->display_info.subpixel_order = + SubPixelHorizontalRGB; + encoder_type = DRM_MODE_ENCODER_TMDS; + connector_type = DRM_MODE_CONNECTOR_DVID; + } else { + unsigned char bytes[2]; + + memcpy(bytes, &sdvo_priv->caps.output_flags, 2); + DRM_DEBUG + ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n", + SDVO_NAME(sdvo_priv), bytes[0], bytes[1]); + goto err_i2c; + } + + drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs, + encoder_type); + drm_encoder_helper_add(&psb_intel_output->enc, + &psb_intel_sdvo_helper_funcs); + connector->connector_type = connector_type; + + drm_mode_connector_attach_encoder(&psb_intel_output->base, + &psb_intel_output->enc); + drm_sysfs_connector_add(connector); + + /* Set the input timing to the screen. Assume always input 0. */ + psb_intel_sdvo_set_target_input(psb_intel_output, true, false); + + psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output, + &sdvo_priv->pixel_clock_min, + &sdvo_priv-> + pixel_clock_max); + + + DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " + "clock range %dMHz - %dMHz, " + "input 1: %c, input 2: %c, " + "output 1: %c, output 2: %c\n", + SDVO_NAME(sdvo_priv), + sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, + sdvo_priv->caps.device_rev_id, + sdvo_priv->pixel_clock_min / 1000, + sdvo_priv->pixel_clock_max / 1000, + (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', + (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', + /* check currently supported outputs */ + sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', + sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); + + psb_intel_output->ddc_bus = i2cbus; + + return; + +err_i2c: + psb_intel_i2c_destroy(psb_intel_output->i2c_bus); +err_connector: + drm_connector_cleanup(connector); + kfree(psb_intel_output); + + return; +} diff --git a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h new file mode 100644 index 0000000..bf3d72e --- /dev/null +++ b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h @@ -0,0 +1,345 @@ +/* + * Copyright (c) 2008, Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ + +/** + * @file SDVO command definitions and structures. + */ + +#define SDVO_OUTPUT_FIRST (0) +#define SDVO_OUTPUT_TMDS0 (1 << 0) +#define SDVO_OUTPUT_RGB0 (1 << 1) +#define SDVO_OUTPUT_CVBS0 (1 << 2) +#define SDVO_OUTPUT_SVID0 (1 << 3) +#define SDVO_OUTPUT_YPRPB0 (1 << 4) +#define SDVO_OUTPUT_SCART0 (1 << 5) +#define SDVO_OUTPUT_LVDS0 (1 << 6) +#define SDVO_OUTPUT_TMDS1 (1 << 8) +#define SDVO_OUTPUT_RGB1 (1 << 9) +#define SDVO_OUTPUT_CVBS1 (1 << 10) +#define SDVO_OUTPUT_SVID1 (1 << 11) +#define SDVO_OUTPUT_YPRPB1 (1 << 12) +#define SDVO_OUTPUT_SCART1 (1 << 13) +#define SDVO_OUTPUT_LVDS1 (1 << 14) +#define SDVO_OUTPUT_LAST (14) + +struct psb_intel_sdvo_caps { + u8 vendor_id; + u8 device_id; + u8 device_rev_id; + u8 sdvo_version_major; + u8 sdvo_version_minor; + unsigned int sdvo_inputs_mask:2; + unsigned int smooth_scaling:1; + unsigned int sharp_scaling:1; + unsigned int up_scaling:1; + unsigned int down_scaling:1; + unsigned int stall_support:1; + unsigned int pad:1; + u16 output_flags; +} __attribute__ ((packed)); + +/** This matches the EDID DTD structure, more or less */ +struct psb_intel_sdvo_dtd { + struct { + u16 clock; /**< pixel clock, in 10kHz units */ + u8 h_active; /**< lower 8 bits (pixels) */ + u8 h_blank; /**< lower 8 bits (pixels) */ + u8 h_high; /**< upper 4 bits each h_active, h_blank */ + u8 v_active; /**< lower 8 bits (lines) */ + u8 v_blank; /**< lower 8 bits (lines) */ + u8 v_high; /**< upper 4 bits each v_active, v_blank */ + } part1; + + struct { + u8 h_sync_off; + /**< lower 8 bits, from hblank start */ + u8 h_sync_width;/**< lower 8 bits (pixels) */ + /** lower 4 bits each vsync offset, vsync width */ + u8 v_sync_off_width; + /** + * 2 high bits of hsync offset, 2 high bits of hsync width, + * bits 4-5 of vsync offset, and 2 high bits of vsync width. + */ + u8 sync_off_width_high; + u8 dtd_flags; + u8 sdvo_flags; + /** bits 6-7 of vsync offset at bits 6-7 */ + u8 v_sync_off_high; + u8 reserved; + } part2; +} __attribute__ ((packed)); + +struct psb_intel_sdvo_pixel_clock_range { + u16 min; /**< pixel clock, in 10kHz units */ + u16 max; /**< pixel clock, in 10kHz units */ +} __attribute__ ((packed)); + +struct psb_intel_sdvo_preferred_input_timing_args { + u16 clock; + u16 width; + u16 height; +} __attribute__ ((packed)); + +/* I2C registers for SDVO */ +#define SDVO_I2C_ARG_0 0x07 +#define SDVO_I2C_ARG_1 0x06 +#define SDVO_I2C_ARG_2 0x05 +#define SDVO_I2C_ARG_3 0x04 +#define SDVO_I2C_ARG_4 0x03 +#define SDVO_I2C_ARG_5 0x02 +#define SDVO_I2C_ARG_6 0x01 +#define SDVO_I2C_ARG_7 0x00 +#define SDVO_I2C_OPCODE 0x08 +#define SDVO_I2C_CMD_STATUS 0x09 +#define SDVO_I2C_RETURN_0 0x0a +#define SDVO_I2C_RETURN_1 0x0b +#define SDVO_I2C_RETURN_2 0x0c +#define SDVO_I2C_RETURN_3 0x0d +#define SDVO_I2C_RETURN_4 0x0e +#define SDVO_I2C_RETURN_5 0x0f +#define SDVO_I2C_RETURN_6 0x10 +#define SDVO_I2C_RETURN_7 0x11 +#define SDVO_I2C_VENDOR_BEGIN 0x20 + +/* Status results */ +#define SDVO_CMD_STATUS_POWER_ON 0x0 +#define SDVO_CMD_STATUS_SUCCESS 0x1 +#define SDVO_CMD_STATUS_NOTSUPP 0x2 +#define SDVO_CMD_STATUS_INVALID_ARG 0x3 +#define SDVO_CMD_STATUS_PENDING 0x4 +#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5 +#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6 + +/* SDVO commands, argument/result registers */ + +#define SDVO_CMD_RESET 0x01 + +/** Returns a struct psb_intel_sdvo_caps */ +#define SDVO_CMD_GET_DEVICE_CAPS 0x02 + +#define SDVO_CMD_GET_FIRMWARE_REV 0x86 +# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0 +# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 +# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 + +/** + * Reports which inputs are trained (managed to sync). + * + * Devices must have trained within 2 vsyncs of a mode change. + */ +#define SDVO_CMD_GET_TRAINED_INPUTS 0x03 +struct psb_intel_sdvo_get_trained_inputs_response { + unsigned int input0_trained:1; + unsigned int input1_trained:1; + unsigned int pad:6; +} __attribute__ ((packed)); + +/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */ +#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 + +/** + * Sets the current set of active outputs. + * + * Takes a struct psb_intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP + * on multi-output devices. + */ +#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 + +/** + * Returns the current mapping of SDVO inputs to outputs on the device. + * + * Returns two struct psb_intel_sdvo_output_flags structures. + */ +#define SDVO_CMD_GET_IN_OUT_MAP 0x06 + +/** + * Sets the current mapping of SDVO inputs to outputs on the device. + * + * Takes two struct i380_sdvo_output_flags structures. + */ +#define SDVO_CMD_SET_IN_OUT_MAP 0x07 + +/** + * Returns a struct psb_intel_sdvo_output_flags of attached displays. + */ +#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b + +/** + * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging. + */ +#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c + +/** + * Takes a struct psb_intel_sdvo_output_flags. + */ +#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d + +/** + * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug + * interrupts enabled. + */ +#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e + +#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f +struct psb_intel_sdvo_get_interrupt_event_source_response { + u16 interrupt_status; + unsigned int ambient_light_interrupt:1; + unsigned int pad:7; +} __attribute__ ((packed)); + +/** + * Selects which input is affected by future input commands. + * + * Commands affected include SET_INPUT_TIMINGS_PART[12], + * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12], + * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS. + */ +#define SDVO_CMD_SET_TARGET_INPUT 0x10 +struct psb_intel_sdvo_set_target_input_args { + unsigned int target_1:1; + unsigned int pad:7; +} __attribute__ ((packed)); + +/** + * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by + * future output commands. + * + * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], + * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE. + */ +#define SDVO_CMD_SET_TARGET_OUTPUT 0x11 + +#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12 +#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13 +#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14 +#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15 +#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16 +#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17 +#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18 +#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19 +/* Part 1 */ +# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0 +# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1 +# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2 +# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3 +# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4 +# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5 +# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6 +# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7 +/* Part 2 */ +# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0 +# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1 +# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2 +# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3 +# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4 +# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7) +# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5) +# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3) +# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1) +# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5 +# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7) +# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6) +# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6) +# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4) +# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4) +# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4) +# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) +# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 + +/** + * Generates a DTD based on the given width, height, and flags. + * + * This will be supported by any device supporting scaling or interlaced + * modes. + */ +#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a +# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0 +# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1 +# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2 +# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3 +# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4 +# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5 +# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6 +# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0) +# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1) + +#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b +#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c + +/** Returns a struct psb_intel_sdvo_pixel_clock_range */ +#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d +/** Returns a struct psb_intel_sdvo_pixel_clock_range */ +#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e + +/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ +#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f + +/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ +#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 +/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ +#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 +# define SDVO_CLOCK_RATE_MULT_1X (1 << 0) +# define SDVO_CLOCK_RATE_MULT_2X (1 << 1) +# define SDVO_CLOCK_RATE_MULT_4X (1 << 3) + +#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 + +#define SDVO_CMD_GET_TV_FORMAT 0x28 + +#define SDVO_CMD_SET_TV_FORMAT 0x29 + +#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a +#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b +#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c +# define SDVO_ENCODER_STATE_ON (1 << 0) +# define SDVO_ENCODER_STATE_STANDBY (1 << 1) +# define SDVO_ENCODER_STATE_SUSPEND (1 << 2) +# define SDVO_ENCODER_STATE_OFF (1 << 3) + +#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93 + +#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a +# define SDVO_CONTROL_BUS_PROM 0x0 +# define SDVO_CONTROL_BUS_DDC1 0x1 +# define SDVO_CONTROL_BUS_DDC2 0x2 +# define SDVO_CONTROL_BUS_DDC3 0x3 + +/* SDVO Bus & SDVO Inputs wiring details*/ +/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/ +/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/ +/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/ +/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/ +#define SDVOB_IN0 0x01 +#define SDVOB_IN1 0x02 +#define SDVOC_IN0 0x04 +#define SDVOC_IN1 0x08 + +#define SDVO_DEVICE_NONE 0x00 +#define SDVO_DEVICE_CRT 0x01 +#define SDVO_DEVICE_TV 0x02 +#define SDVO_DEVICE_LVDS 0x04 +#define SDVO_DEVICE_TMDS 0x08 + diff --git a/drivers/gpu/drm/psb/psb_irq.c b/drivers/gpu/drm/psb/psb_irq.c new file mode 100644 index 0000000..983e2ad --- /dev/null +++ b/drivers/gpu/drm/psb/psb_irq.c @@ -0,0 +1,621 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + */ + +#include +#include "psb_drv.h" +#include "psb_reg.h" +#include "psb_msvdx.h" +#include "lnc_topaz.h" +#include "psb_intel_reg.h" +#include "psb_powermgmt.h" + +/* + * Video display controller interrupt. + */ + +static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) { +#ifdef PSB_FIXME + atomic_inc(&dev->vbl_received); +#endif + PSB_WVDC32(PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_VBLANK_CLEAR, PIPEASTAT); + drm_handle_vblank(dev, 0); + } + + if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) { +#ifdef PSB_FIXME + atomic_inc(&dev->vbl_received2); +#endif + PSB_WVDC32(PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_VBLANK_CLEAR, PIPEBSTAT); + drm_handle_vblank(dev, 1); + } +} + +/* + * SGX interrupt source 1. + */ + +static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat, + uint32_t sgx_stat2) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + if (sgx_stat & _PSB_CE_TWOD_COMPLETE) { + DRM_WAKEUP(&dev_priv->event_2d_queue); + psb_fence_handler(dev, PSB_ENGINE_2D); + } + + if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT)) + psb_print_pagefault(dev_priv); + + psb_scheduler_handler(dev_priv, sgx_stat); +} + + +irqreturn_t psb_irq_handler(DRM_IRQ_ARGS) +{ + struct drm_device *dev = (struct drm_device *) arg; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + uint32_t vdc_stat,msvdx_int = 0, topaz_int = 0; + uint32_t sgx_stat = 0; + uint32_t sgx_stat2 = 0; + uint32_t sgx_int = 0; + int handled = 0; + + spin_lock(&dev_priv->irqmask_lock); + + vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); + + if (vdc_stat & _PSB_IRQ_SGX_FLAG) { + PSB_DEBUG_IRQ("Got SGX interrupt\n"); + sgx_int = 1; + } + if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) { + PSB_DEBUG_IRQ("Got MSVDX interrupt\n"); + msvdx_int = 1; + } + + if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) { + PSB_DEBUG_IRQ("Got TOPAX interrupt\n"); + topaz_int = 1; + } + if (sgx_int && powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) { + sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS); + sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2); + + sgx_stat2 &= dev_priv->sgx2_irq_mask; + sgx_stat &= dev_priv->sgx_irq_mask; + PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2); + PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR); + (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR); + } else if (unlikely(PSB_D_PM & drm_psb_debug)) { + if (sgx_int) + PSB_DEBUG_PM("sgx int in down mode\n"); + } + vdc_stat &= dev_priv->vdc_irq_mask; + spin_unlock(&dev_priv->irqmask_lock); + + if (msvdx_int && + powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND)) { + uint32_t msvdx_stat = 0; + + msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS); + psb_msvdx_interrupt(dev, msvdx_stat); + handled = 1; + } + + if (IS_MRST(dev) && topaz_int && + powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND)) { + /* sometimes, even topaz power down, IIR + * may still have topaz bit set + */ + uint32_t topaz_stat = 0; + + TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT,&topaz_stat); + lnc_topaz_interrupt (dev, topaz_stat); + handled = 1; + } + + if (vdc_stat && powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) { + psb_vdc_interrupt(dev, vdc_stat); + handled = 1; + } + + if (sgx_stat || sgx_stat2) { + psb_sgx_interrupt(dev, sgx_stat, sgx_stat2); + handled = 1; + } + + PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R); + (void) PSB_RVDC32(PSB_INT_IDENTITY_R); + DRM_READMEMORYBARRIER(); + + if (!handled) + return IRQ_NONE; + + + return IRQ_HANDLED; +} + +void psb_irq_preinstall(struct drm_device *dev) +{ + psb_irq_preinstall_islands(dev, PSB_ALL_ISLANDS); +} + +void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + + if (hw_islands & PSB_DISPLAY_ISLAND) { + if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) { + if (IS_POULSBO(dev)) + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); + if (dev->vblank_enabled[0]) + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; + if (dev->vblank_enabled[1]) + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; + } + } + + if (hw_islands & PSB_GRAPHICS_ISLAND) { + if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) { + PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); + (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); + + dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER | + _PSB_CE_DPM_3D_MEM_FREE | + _PSB_CE_TA_FINISHED | + _PSB_CE_DPM_REACHED_MEM_THRESH | + _PSB_CE_DPM_OUT_OF_MEMORY_GBL | + _PSB_CE_DPM_OUT_OF_MEMORY_MT | + _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT; + + dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT; + dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG; + } + } + + if (hw_islands & PSB_VIDEO_DEC_ISLAND) + if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND)) + dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG; + + if (hw_islands & PSB_VIDEO_ENC_ISLAND) + if (IS_MRST(dev) && powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND)) + dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG; + + /*This register is safe even if display island is off*/ + PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); + + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); +} + +int psb_irq_postinstall(struct drm_device *dev) +{ + return psb_irq_postinstall_islands(dev, PSB_ALL_ISLANDS); +} + +int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + + /*This register is safe even if display island is off*/ + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); + + if (hw_islands & PSB_DISPLAY_ISLAND) { + if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) { + if (IS_POULSBO(dev)) + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); + if (dev->vblank_enabled[0]) { + if (IS_MRST(dev)) + psb_enable_pipestat(dev_priv, 0, + PIPE_START_VBLANK_INTERRUPT_ENABLE | + PIPE_VBLANK_INTERRUPT_ENABLE); + else + psb_enable_pipestat(dev_priv, 0, + PIPE_VBLANK_INTERRUPT_ENABLE); + } else + psb_disable_pipestat(dev_priv, 0, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); + + if (dev->vblank_enabled[1]) { + if (IS_MRST(dev)) + psb_enable_pipestat(dev_priv, 1, + PIPE_START_VBLANK_INTERRUPT_ENABLE | + PIPE_VBLANK_INTERRUPT_ENABLE); + else + psb_enable_pipestat(dev_priv, 1, + PIPE_VBLANK_INTERRUPT_ENABLE); + } else + psb_disable_pipestat(dev_priv, 1, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); + } + } + + if (hw_islands & PSB_GRAPHICS_ISLAND) { + if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) { + PSB_WSGX32(dev_priv->sgx2_irq_mask, + PSB_CR_EVENT_HOST_ENABLE2); + PSB_WSGX32(dev_priv->sgx_irq_mask, + PSB_CR_EVENT_HOST_ENABLE); + (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); + } + } + + if (IS_MRST(dev)) + if (hw_islands & PSB_VIDEO_ENC_ISLAND) + if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND)) + lnc_topaz_enableirq(dev); + + if (hw_islands & PSB_VIDEO_DEC_ISLAND) + if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND)) + psb_msvdx_enableirq(dev); + + if (hw_islands == PSB_ALL_ISLANDS) + dev_priv->irq_enabled = 1; + + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); + + return 0; +} + +void psb_irq_uninstall(struct drm_device *dev) +{ + psb_irq_uninstall_islands(dev, PSB_ALL_ISLANDS); +} + +void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + + if (hw_islands & PSB_DISPLAY_ISLAND) { + if (powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)) { + if (IS_POULSBO(dev)) + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); + if (dev->vblank_enabled[0]) + psb_disable_pipestat(dev_priv, 0, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); + if (dev->vblank_enabled[1]) + psb_disable_pipestat(dev_priv, 1, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); + } + dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | + _PSB_IRQ_MSVDX_FLAG | + _LNC_IRQ_TOPAZ_FLAG; + } + + if (hw_islands & PSB_GRAPHICS_ISLAND) { + dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG; + if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) { + dev_priv->sgx_irq_mask = 0x00000000; + dev_priv->sgx2_irq_mask = 0x00000000; + PSB_WSGX32(dev_priv->sgx_irq_mask, + PSB_CR_EVENT_HOST_ENABLE); + PSB_WSGX32(dev_priv->sgx2_irq_mask, + PSB_CR_EVENT_HOST_ENABLE2); + } + } + + if (hw_islands & PSB_VIDEO_DEC_ISLAND) + dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG; + + if (hw_islands & PSB_VIDEO_ENC_ISLAND) + dev_priv->vdc_irq_mask &= ~_LNC_IRQ_TOPAZ_FLAG; + + /*These two registers are safe even if display island is off*/ + PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); + + wmb(); + + /*This register is safe even if display island is off*/ + PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R); + + if (hw_islands & PSB_GRAPHICS_ISLAND) { + if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) { + PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), + PSB_CR_EVENT_HOST_CLEAR); + PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), + PSB_CR_EVENT_HOST_CLEAR2); + } + } + + if (IS_MRST(dev)) + if (hw_islands & PSB_VIDEO_ENC_ISLAND) + if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND)) + lnc_topaz_disableirq(dev); + if (hw_islands & PSB_VIDEO_DEC_ISLAND) + if (powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND)) + psb_msvdx_disableirq(dev); + + + if (hw_islands == PSB_ALL_ISLANDS) + dev_priv->irq_enabled = 0; + + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); +} + +void psb_2D_irq_off(struct drm_psb_private *dev_priv) +{ + unsigned long irqflags; + uint32_t old_mask; + uint32_t cleared_mask; + struct drm_device *dev; + + dev = container_of((void *) dev_priv, struct drm_device, dev_private); + + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + --dev_priv->irqen_count_2d; + if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) { + + old_mask = dev_priv->sgx_irq_mask; + dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE; + if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) { + PSB_WSGX32(dev_priv->sgx_irq_mask, + PSB_CR_EVENT_HOST_ENABLE); + (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); + + cleared_mask = + (old_mask ^ dev_priv->sgx_irq_mask) & old_mask; + PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR); + (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR); + } + } + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); +} + +void psb_2D_irq_on(struct drm_psb_private *dev_priv) +{ + unsigned long irqflags; + struct drm_device *dev; + + dev = container_of((void *) dev_priv, struct drm_device, dev_private); + + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) { + dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE; + if (powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND)) { + PSB_WSGX32(dev_priv->sgx_irq_mask, + PSB_CR_EVENT_HOST_ENABLE); + (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); + } + } + ++dev_priv->irqen_count_2d; + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); +} + +#ifdef PSB_FIXME +static int psb_vblank_do_wait(struct drm_device *dev, + unsigned int *sequence, atomic_t *counter) +{ + unsigned int cur_vblank; + int ret = 0; + DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, + (((cur_vblank = atomic_read(counter)) + - *sequence) <= (1 << 23))); + *sequence = cur_vblank; + + return ret; +} +#endif + + +/* Called from drm generic code, passed 'crtc' which + * we use as a pipe index + */ +int psb_enable_vblank(struct drm_device *dev, int pipe) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + unsigned long irqflags; + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; + u32 pipeconf = 0; + + if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) { + pipeconf = REG_READ(pipeconf_reg); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + if (!(pipeconf & PIPEACONF_ENABLE)) + return -EINVAL; + + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) { + drm_psb_disable_vsync = 0; + if (pipe == 0) + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; + else + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; + PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); + if (IS_MRST(dev)) { + psb_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE | + PIPE_VBLANK_INTERRUPT_ENABLE); + } else + psb_enable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); + + return 0; +} + +/* Called from drm generic code, passed 'crtc' which + * we use as a pipe index + */ +void psb_disable_vblank(struct drm_device *dev, int pipe) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + unsigned long irqflags; + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) { + if (pipe == 0) + dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG; + else + dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG; + PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); + psb_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); +} + +static inline u32 +psb_pipestat(int pipe) +{ + if (pipe == 0) + return PIPEASTAT; + if (pipe == 1) + return PIPEBSTAT; + BUG(); +} + +void +psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask) +{ + if ((dev_priv->pipestat[pipe] & mask) != mask) { + u32 reg = psb_pipestat(pipe); + dev_priv->pipestat[pipe] |= mask; + /* Enable the interrupt, clear any pending status */ + if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) { + u32 writeVal = PSB_RVDC32(reg); + writeVal |= (mask | (mask >> 16)); + PSB_WVDC32(writeVal, reg); + (void) PSB_RVDC32(reg); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + } +} + +void +psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask) +{ + if ((dev_priv->pipestat[pipe] & mask) != 0) { + u32 reg = psb_pipestat(pipe); + dev_priv->pipestat[pipe] &= ~mask; + if (powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_DISPLAY_ISLAND, false)) { + u32 writeVal = PSB_RVDC32(reg); + writeVal &= ~mask; + PSB_WVDC32(writeVal, reg); + (void) PSB_RVDC32(reg); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + } +} + +/** + * psb_pipe_enabled - check if a pipe is enabled + * @dev: DRM device + * @pipe: pipe to check + * + * Reading certain registers when the pipe is disabled can hang the chip. + * Use this routine to make sure the PLL is running and the pipe is active + * before reading such registers if unsure. + */ +static int +psb_pipe_enabled(struct drm_device *dev, int pipe) +{ + unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; + int ret = 0; + + if (powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) { + ret = (REG_READ(pipeconf) & PIPEACONF_ENABLE); + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + } + + return ret; +} + +/* Called from drm generic code, passed a 'crtc', which + * we use as a pipe index + */ +u32 psb_get_vblank_counter(struct drm_device *dev, int pipe) +{ + unsigned long high_frame; + unsigned long low_frame; + u32 high1, high2, low; + u32 count = 0; + + high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; + low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; + + if (!powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) + return 0; + + if (!psb_pipe_enabled(dev, pipe)) { + DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); + goto psb_get_vblank_counter_exit; + } + + /* + * High & low register fields aren't synchronized, so make sure + * we get a low value that's stable across two reads of the high + * register. + */ + do { + high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> + PIPE_FRAME_HIGH_SHIFT); + low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> + PIPE_FRAME_LOW_SHIFT); + high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> + PIPE_FRAME_HIGH_SHIFT); + } while (high1 != high2); + + count = (high1 << 8) | low; + +psb_get_vblank_counter_exit: + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + + return count; +} diff --git a/drivers/gpu/drm/psb/psb_mmu.c b/drivers/gpu/drm/psb/psb_mmu.c new file mode 100644 index 0000000..d3ff8e0 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_mmu.c @@ -0,0 +1,1073 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +#include +#include "psb_drv.h" +#include "psb_reg.h" +#include "psb_powermgmt.h" + +/* + * Code for the SGX MMU: + */ + +/* + * clflush on one processor only: + * clflush should apparently flush the cache line on all processors in an + * SMP system. + */ + +/* + * kmap atomic: + * The usage of the slots must be completely encapsulated within a spinlock, and + * no other functions that may be using the locks for other purposed may be + * called from within the locked region. + * Since the slots are per processor, this will guarantee that we are the only + * user. + */ + +/* + * TODO: Inserting ptes from an interrupt handler: + * This may be desirable for some SGX functionality where the GPU can fault in + * needed pages. For that, we need to make an atomic insert_pages function, that + * may fail. + * If it fails, the caller need to insert the page using a workqueue function, + * but on average it should be fast. + */ + +struct psb_mmu_driver { + /* protects driver- and pd structures. Always take in read mode + * before taking the page table spinlock. + */ + struct rw_semaphore sem; + + /* protects page tables, directory tables and pt tables. + * and pt structures. + */ + spinlock_t lock; + + atomic_t needs_tlbflush; + + uint8_t __iomem *register_map; + struct psb_mmu_pd *default_pd; + uint32_t bif_ctrl; + int has_clflush; + int clflush_add; + unsigned long clflush_mask; + + struct drm_psb_private *dev_priv; +}; + +struct psb_mmu_pd; + +struct psb_mmu_pt { + struct psb_mmu_pd *pd; + uint32_t index; + uint32_t count; + struct page *p; + uint32_t *v; +}; + +struct psb_mmu_pd { + struct psb_mmu_driver *driver; + int hw_context; + struct psb_mmu_pt **tables; + struct page *p; + struct page *dummy_pt; + struct page *dummy_page; + uint32_t pd_mask; + uint32_t invalid_pde; + uint32_t invalid_pte; +}; + +void topaz_mmu_flushcache(struct drm_psb_private *dev_priv); + +static inline uint32_t psb_mmu_pt_index(uint32_t offset) +{ + return (offset >> PSB_PTE_SHIFT) & 0x3FF; +} + +static inline uint32_t psb_mmu_pd_index(uint32_t offset) +{ + return offset >> PSB_PDE_SHIFT; +} + +#if defined(CONFIG_X86) +static inline void psb_clflush(void *addr) +{ + __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); +} + +static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, + void *addr) +{ + if (!driver->has_clflush) + return; + + mb(); + psb_clflush(addr); + mb(); +} +#else + +static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, + void *addr) +{; +} + +#endif + +static inline void psb_iowrite32(const struct psb_mmu_driver *d, + uint32_t val, uint32_t offset) +{ + iowrite32(val, d->register_map + offset); +} + +static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d, + uint32_t offset) +{ + return ioread32(d->register_map + offset); +} + +static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, + int force) +{ + if (atomic_read(&driver->needs_tlbflush) || force) { + uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL); + psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC, + PSB_CR_BIF_CTRL); + wmb(); + psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC, + PSB_CR_BIF_CTRL); + (void)psb_ioread32(driver, PSB_CR_BIF_CTRL); + if (driver->dev_priv) { + atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1); + if (IS_MRST(driver->dev_priv->dev)) + topaz_mmu_flushcache(driver->dev_priv); + } + } + atomic_set(&driver->needs_tlbflush, 0); +} + +static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) +{ + down_write(&driver->sem); + psb_mmu_flush_pd_locked(driver, force); + up_write(&driver->sem); +} + +void psb_mmu_flush(struct psb_mmu_driver *driver) +{ + uint32_t val; + + if (powermgmt_using_hw_begin(driver->dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, false)) { + down_write(&driver->sem); + val = psb_ioread32(driver, PSB_CR_BIF_CTRL); + if (atomic_read(&driver->needs_tlbflush)) + psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC, + PSB_CR_BIF_CTRL); + else + psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH, + PSB_CR_BIF_CTRL); + wmb(); + psb_iowrite32(driver, + val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC), + PSB_CR_BIF_CTRL); + (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); + atomic_set(&driver->needs_tlbflush, 0); + up_write(&driver->sem); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + } else { + PSB_DEBUG_PM("mmu flush when down\n"); + } + + down_write(&driver->sem); + if (driver->dev_priv) { + atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1); + if (IS_MRST(driver->dev_priv->dev)) + topaz_mmu_flushcache(driver->dev_priv); + } + + up_write(&driver->sem); +} + +void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) +{ + uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 : + PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4; + + ttm_tt_cache_flush(&pd->p, 1); + down_write(&pd->driver->sem); + psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), + offset); + wmb(); + psb_mmu_flush_pd_locked(pd->driver, 1); + pd->hw_context = hw_context; + up_write(&pd->driver->sem); + +} + +static inline unsigned long psb_pd_addr_end(unsigned long addr, + unsigned long end) +{ + + addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; + return (addr < end) ? addr : end; +} + +static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type) +{ + uint32_t mask = PSB_PTE_VALID; + + if (type & PSB_MMU_CACHED_MEMORY) + mask |= PSB_PTE_CACHED; + if (type & PSB_MMU_RO_MEMORY) + mask |= PSB_PTE_RO; + if (type & PSB_MMU_WO_MEMORY) + mask |= PSB_PTE_WO; + + return (pfn << PAGE_SHIFT) | mask; +} + +struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, + int trap_pagefaults, int invalid_type) +{ + struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); + uint32_t *v; + int i; + + if (!pd) + return NULL; + + pd->p = alloc_page(GFP_DMA32); + if (!pd->p) + goto out_err1; + pd->dummy_pt = alloc_page(GFP_DMA32); + if (!pd->dummy_pt) + goto out_err2; + pd->dummy_page = alloc_page(GFP_DMA32); + if (!pd->dummy_page) + goto out_err3; + + if (!trap_pagefaults) { + pd->invalid_pde = + psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), + invalid_type); + pd->invalid_pte = + psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), + invalid_type); + } else { + pd->invalid_pde = 0; + pd->invalid_pte = 0; + } + + v = kmap(pd->dummy_pt); + for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) + v[i] = pd->invalid_pte; + + kunmap(pd->dummy_pt); + + v = kmap(pd->p); + for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) + v[i] = pd->invalid_pde; + + kunmap(pd->p); + + clear_page(kmap(pd->dummy_page)); + kunmap(pd->dummy_page); + + pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); + if (!pd->tables) + goto out_err4; + + pd->hw_context = -1; + pd->pd_mask = PSB_PTE_VALID; + pd->driver = driver; + + return pd; + +out_err4: + __free_page(pd->dummy_page); +out_err3: + __free_page(pd->dummy_pt); +out_err2: + __free_page(pd->p); +out_err1: + kfree(pd); + return NULL; +} + +void psb_mmu_free_pt(struct psb_mmu_pt *pt) +{ + __free_page(pt->p); + kfree(pt); +} + +void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) +{ + struct psb_mmu_driver *driver = pd->driver; + struct psb_mmu_pt *pt; + int i; + + down_write(&driver->sem); + if (pd->hw_context != -1) { + psb_iowrite32(driver, 0, + PSB_CR_BIF_DIR_LIST_BASE0 + + pd->hw_context * 4); + psb_mmu_flush_pd_locked(driver, 1); + } + + /* Should take the spinlock here, but we don't need to do that + since we have the semaphore in write mode. */ + + for (i = 0; i < 1024; ++i) { + pt = pd->tables[i]; + if (pt) + psb_mmu_free_pt(pt); + } + + vfree(pd->tables); + __free_page(pd->dummy_page); + __free_page(pd->dummy_pt); + __free_page(pd->p); + kfree(pd); + up_write(&driver->sem); +} + +static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) +{ + struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL); + void *v; + uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; + uint32_t clflush_count = PAGE_SIZE / clflush_add; + spinlock_t *lock = &pd->driver->lock; + uint8_t *clf; + uint32_t *ptes; + int i; + + if (!pt) + return NULL; + + pt->p = alloc_page(GFP_DMA32); + if (!pt->p) { + kfree(pt); + return NULL; + } + + spin_lock(lock); + + v = kmap_atomic(pt->p, KM_USER0); + clf = (uint8_t *) v; + ptes = (uint32_t *) v; + for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) + *ptes++ = pd->invalid_pte; + + +#if defined(CONFIG_X86) + if (pd->driver->has_clflush && pd->hw_context != -1) { + mb(); + for (i = 0; i < clflush_count; ++i) { + psb_clflush(clf); + clf += clflush_add; + } + mb(); + } +#endif + kunmap_atomic(v, KM_USER0); + spin_unlock(lock); + + pt->count = 0; + pt->pd = pd; + pt->index = 0; + + return pt; +} + +struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, + unsigned long addr) +{ + uint32_t index = psb_mmu_pd_index(addr); + struct psb_mmu_pt *pt; + uint32_t *v; + spinlock_t *lock = &pd->driver->lock; + + spin_lock(lock); + pt = pd->tables[index]; + while (!pt) { + spin_unlock(lock); + pt = psb_mmu_alloc_pt(pd); + if (!pt) + return NULL; + spin_lock(lock); + + if (pd->tables[index]) { + spin_unlock(lock); + psb_mmu_free_pt(pt); + spin_lock(lock); + pt = pd->tables[index]; + continue; + } + + v = kmap_atomic(pd->p, KM_USER0); + pd->tables[index] = pt; + v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; + pt->index = index; + kunmap_atomic((void *) v, KM_USER0); + + if (pd->hw_context != -1) { + psb_mmu_clflush(pd->driver, (void *) &v[index]); + atomic_set(&pd->driver->needs_tlbflush, 1); + } + } + pt->v = kmap_atomic(pt->p, KM_USER0); + return pt; +} + +static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, + unsigned long addr) +{ + uint32_t index = psb_mmu_pd_index(addr); + struct psb_mmu_pt *pt; + spinlock_t *lock = &pd->driver->lock; + + spin_lock(lock); + pt = pd->tables[index]; + if (!pt) { + spin_unlock(lock); + return NULL; + } + pt->v = kmap_atomic(pt->p, KM_USER0); + return pt; +} + +static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) +{ + struct psb_mmu_pd *pd = pt->pd; + uint32_t *v; + + kunmap_atomic(pt->v, KM_USER0); + if (pt->count == 0) { + v = kmap_atomic(pd->p, KM_USER0); + v[pt->index] = pd->invalid_pde; + pd->tables[pt->index] = NULL; + + if (pd->hw_context != -1) { + psb_mmu_clflush(pd->driver, + (void *) &v[pt->index]); + atomic_set(&pd->driver->needs_tlbflush, 1); + } + kunmap_atomic(pt->v, KM_USER0); + spin_unlock(&pd->driver->lock); + psb_mmu_free_pt(pt); + return; + } + spin_unlock(&pd->driver->lock); +} + +static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, + unsigned long addr, uint32_t pte) +{ + pt->v[psb_mmu_pt_index(addr)] = pte; +} + +static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, + unsigned long addr) +{ + pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; +} + +#if 0 +static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd, + uint32_t mmu_offset) +{ + uint32_t *v; + uint32_t pfn; + + v = kmap_atomic(pd->p, KM_USER0); + if (!v) { + printk(KERN_INFO "Could not kmap pde page.\n"); + return 0; + } + pfn = v[psb_mmu_pd_index(mmu_offset)]; + /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */ + kunmap_atomic(v, KM_USER0); + if (((pfn & 0x0F) != PSB_PTE_VALID)) { + printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n", + mmu_offset, pfn); + } + v = ioremap(pfn & 0xFFFFF000, 4096); + if (!v) { + printk(KERN_INFO "Could not kmap pte page.\n"); + return 0; + } + pfn = v[psb_mmu_pt_index(mmu_offset)]; + /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */ + iounmap(v); + if (((pfn & 0x0F) != PSB_PTE_VALID)) { + printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n", + mmu_offset, pfn); + } + return pfn >> PAGE_SHIFT; +} + +static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd, + uint32_t mmu_offset, + uint32_t gtt_pages) +{ + uint32_t start; + uint32_t next; + + printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n", + mmu_offset, gtt_pages); + down_read(&pd->driver->sem); + start = psb_mmu_check_pte_locked(pd, mmu_offset); + mmu_offset += PAGE_SIZE; + gtt_pages -= 1; + while (gtt_pages--) { + next = psb_mmu_check_pte_locked(pd, mmu_offset); + if (next != start + 1) { + printk(KERN_INFO + "Ptes out of order: 0x%08x, 0x%08x.\n", + start, next); + } + start = next; + mmu_offset += PAGE_SIZE; + } + up_read(&pd->driver->sem); +} + +#endif + +void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, + uint32_t mmu_offset, uint32_t gtt_start, + uint32_t gtt_pages) +{ + uint32_t *v; + uint32_t start = psb_mmu_pd_index(mmu_offset); + struct psb_mmu_driver *driver = pd->driver; + int num_pages = gtt_pages; + + down_read(&driver->sem); + spin_lock(&driver->lock); + + v = kmap_atomic(pd->p, KM_USER0); + v += start; + + while (gtt_pages--) { + *v++ = gtt_start | pd->pd_mask; + gtt_start += PAGE_SIZE; + } + + ttm_tt_cache_flush(&pd->p, num_pages); + kunmap_atomic(v, KM_USER0); + spin_unlock(&driver->lock); + + if (pd->hw_context != -1) + atomic_set(&pd->driver->needs_tlbflush, 1); + + up_read(&pd->driver->sem); + psb_mmu_flush_pd(pd->driver, 0); +} + +struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) +{ + struct psb_mmu_pd *pd; + + down_read(&driver->sem); + pd = driver->default_pd; + up_read(&driver->sem); + + return pd; +} + +/* Returns the physical address of the PD shared by sgx/msvdx */ +uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver) +{ + struct psb_mmu_pd *pd; + + pd = psb_mmu_get_default_pd(driver); + return page_to_pfn(pd->p) << PAGE_SHIFT; +} + +void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) +{ + psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL); + psb_mmu_free_pagedir(driver->default_pd); + kfree(driver); +} + +struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, + int trap_pagefaults, + int invalid_type, + struct drm_psb_private *dev_priv) +{ + struct psb_mmu_driver *driver; + + driver = kmalloc(sizeof(*driver), GFP_KERNEL); + + if (!driver) + return NULL; + driver->dev_priv = dev_priv; + + driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, + invalid_type); + if (!driver->default_pd) + goto out_err1; + + spin_lock_init(&driver->lock); + init_rwsem(&driver->sem); + down_write(&driver->sem); + driver->register_map = registers; + atomic_set(&driver->needs_tlbflush, 1); + + driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL); + psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT, + PSB_CR_BIF_CTRL); + psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT, + PSB_CR_BIF_CTRL); + + driver->has_clflush = 0; + +#if defined(CONFIG_X86) + if (boot_cpu_has(X86_FEATURE_CLFLSH)) { + uint32_t tfms, misc, cap0, cap4, clflush_size; + + /* + * clflush size is determined at kernel setup for x86_64 + * but not for i386. We have to do it here. + */ + + cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); + clflush_size = ((misc >> 8) & 0xff) * 8; + driver->has_clflush = 1; + driver->clflush_add = + PAGE_SIZE * clflush_size / sizeof(uint32_t); + driver->clflush_mask = driver->clflush_add - 1; + driver->clflush_mask = ~driver->clflush_mask; + } +#endif + + up_write(&driver->sem); + return driver; + +out_err1: + kfree(driver); + return NULL; +} + +#if defined(CONFIG_X86) +static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, + unsigned long address, uint32_t num_pages, + uint32_t desired_tile_stride, + uint32_t hw_tile_stride) +{ + struct psb_mmu_pt *pt; + uint32_t rows = 1; + uint32_t i; + unsigned long addr; + unsigned long end; + unsigned long next; + unsigned long add; + unsigned long row_add; + unsigned long clflush_add = pd->driver->clflush_add; + unsigned long clflush_mask = pd->driver->clflush_mask; + + if (!pd->driver->has_clflush) { + ttm_tt_cache_flush(&pd->p, num_pages); + return; + } + + if (hw_tile_stride) + rows = num_pages / desired_tile_stride; + else + desired_tile_stride = num_pages; + + add = desired_tile_stride << PAGE_SHIFT; + row_add = hw_tile_stride << PAGE_SHIFT; + mb(); + for (i = 0; i < rows; ++i) { + + addr = address; + end = addr + add; + + do { + next = psb_pd_addr_end(addr, end); + pt = psb_mmu_pt_map_lock(pd, addr); + if (!pt) + continue; + do { + psb_clflush(&pt->v + [psb_mmu_pt_index(addr)]); + } while (addr += + clflush_add, + (addr & clflush_mask) < next); + + psb_mmu_pt_unmap_unlock(pt); + } while (addr = next, next != end); + address += row_add; + } + mb(); +} +#else +static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, + unsigned long address, uint32_t num_pages, + uint32_t desired_tile_stride, + uint32_t hw_tile_stride) +{ + drm_ttm_cache_flush(&pd->p, num_pages); +} +#endif + +void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, + unsigned long address, uint32_t num_pages) +{ + struct psb_mmu_pt *pt; + unsigned long addr; + unsigned long end; + unsigned long next; + unsigned long f_address = address; + + down_read(&pd->driver->sem); + + addr = address; + end = addr + (num_pages << PAGE_SHIFT); + + do { + next = psb_pd_addr_end(addr, end); + pt = psb_mmu_pt_alloc_map_lock(pd, addr); + if (!pt) + goto out; + do { + psb_mmu_invalidate_pte(pt, addr); + --pt->count; + } while (addr += PAGE_SIZE, addr < next); + psb_mmu_pt_unmap_unlock(pt); + + } while (addr = next, next != end); + +out: + if (pd->hw_context != -1) + psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); + + up_read(&pd->driver->sem); + + if (pd->hw_context != -1) + psb_mmu_flush(pd->driver); + + return; +} + +void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, + uint32_t num_pages, uint32_t desired_tile_stride, + uint32_t hw_tile_stride) +{ + struct psb_mmu_pt *pt; + uint32_t rows = 1; + uint32_t i; + unsigned long addr; + unsigned long end; + unsigned long next; + unsigned long add; + unsigned long row_add; + unsigned long f_address = address; + + if (hw_tile_stride) + rows = num_pages / desired_tile_stride; + else + desired_tile_stride = num_pages; + + add = desired_tile_stride << PAGE_SHIFT; + row_add = hw_tile_stride << PAGE_SHIFT; + + down_read(&pd->driver->sem); + + /* Make sure we only need to flush this processor's cache */ + + for (i = 0; i < rows; ++i) { + + addr = address; + end = addr + add; + + do { + next = psb_pd_addr_end(addr, end); + pt = psb_mmu_pt_map_lock(pd, addr); + if (!pt) + continue; + do { + psb_mmu_invalidate_pte(pt, addr); + --pt->count; + + } while (addr += PAGE_SIZE, addr < next); + psb_mmu_pt_unmap_unlock(pt); + + } while (addr = next, next != end); + address += row_add; + } + if (pd->hw_context != -1) + psb_mmu_flush_ptes(pd, f_address, num_pages, + desired_tile_stride, hw_tile_stride); + + up_read(&pd->driver->sem); + + if (pd->hw_context != -1) + psb_mmu_flush(pd->driver); +} + +int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, + unsigned long address, uint32_t num_pages, + int type) +{ + struct psb_mmu_pt *pt; + uint32_t pte; + unsigned long addr; + unsigned long end; + unsigned long next; + unsigned long f_address = address; + int ret = 0; + + down_read(&pd->driver->sem); + + addr = address; + end = addr + (num_pages << PAGE_SHIFT); + + do { + next = psb_pd_addr_end(addr, end); + pt = psb_mmu_pt_alloc_map_lock(pd, addr); + if (!pt) { + ret = -ENOMEM; + goto out; + } + do { + pte = psb_mmu_mask_pte(start_pfn++, type); + psb_mmu_set_pte(pt, addr, pte); + pt->count++; + } while (addr += PAGE_SIZE, addr < next); + psb_mmu_pt_unmap_unlock(pt); + + } while (addr = next, next != end); + +out: + if (pd->hw_context != -1) + psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); + + up_read(&pd->driver->sem); + + if (pd->hw_context != -1) + psb_mmu_flush(pd->driver); + + return ret; +} + +int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, + unsigned long address, uint32_t num_pages, + uint32_t desired_tile_stride, + uint32_t hw_tile_stride, int type) +{ + struct psb_mmu_pt *pt; + uint32_t rows = 1; + uint32_t i; + uint32_t pte; + unsigned long addr; + unsigned long end; + unsigned long next; + unsigned long add; + unsigned long row_add; + unsigned long f_address = address; + int ret = 0; + + if (hw_tile_stride) { + if (num_pages % desired_tile_stride != 0) + return -EINVAL; + rows = num_pages / desired_tile_stride; + } else { + desired_tile_stride = num_pages; + } + + add = desired_tile_stride << PAGE_SHIFT; + row_add = hw_tile_stride << PAGE_SHIFT; + + down_read(&pd->driver->sem); + + for (i = 0; i < rows; ++i) { + + addr = address; + end = addr + add; + + do { + next = psb_pd_addr_end(addr, end); + pt = psb_mmu_pt_alloc_map_lock(pd, addr); + if (!pt) { + ret = -ENOMEM; + goto out; + } + do { + pte = + psb_mmu_mask_pte(page_to_pfn(*pages++), + type); + psb_mmu_set_pte(pt, addr, pte); + pt->count++; + } while (addr += PAGE_SIZE, addr < next); + psb_mmu_pt_unmap_unlock(pt); + + } while (addr = next, next != end); + + address += row_add; + } +out: + if (pd->hw_context != -1) + psb_mmu_flush_ptes(pd, f_address, num_pages, + desired_tile_stride, hw_tile_stride); + + up_read(&pd->driver->sem); + + if (pd->hw_context != -1) + psb_mmu_flush(pd->driver); + + return ret; +} + +void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask) +{ + mask &= _PSB_MMU_ER_MASK; + psb_iowrite32(driver, + psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask, + PSB_CR_BIF_CTRL); + (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); +} + +void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, + uint32_t mask) +{ + mask &= _PSB_MMU_ER_MASK; + psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask, + PSB_CR_BIF_CTRL); + (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); +} + +int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, + unsigned long *pfn) +{ + int ret; + struct psb_mmu_pt *pt; + uint32_t tmp; + spinlock_t *lock = &pd->driver->lock; + + down_read(&pd->driver->sem); + pt = psb_mmu_pt_map_lock(pd, virtual); + if (!pt) { + uint32_t *v; + + spin_lock(lock); + v = kmap_atomic(pd->p, KM_USER0); + tmp = v[psb_mmu_pd_index(virtual)]; + kunmap_atomic(v, KM_USER0); + spin_unlock(lock); + + if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || + !(pd->invalid_pte & PSB_PTE_VALID)) { + ret = -EINVAL; + goto out; + } + ret = 0; + *pfn = pd->invalid_pte >> PAGE_SHIFT; + goto out; + } + tmp = pt->v[psb_mmu_pt_index(virtual)]; + if (!(tmp & PSB_PTE_VALID)) { + ret = -EINVAL; + } else { + ret = 0; + *pfn = tmp >> PAGE_SHIFT; + } + psb_mmu_pt_unmap_unlock(pt); +out: + up_read(&pd->driver->sem); + return ret; +} + +void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset) +{ + struct page *p; + unsigned long pfn; + int ret = 0; + struct psb_mmu_pd *pd; + uint32_t *v; + uint32_t *vmmu; + + pd = driver->default_pd; + if (!pd) + printk(KERN_WARNING "Could not get default pd\n"); + + + p = alloc_page(GFP_DMA32); + + if (!p) { + printk(KERN_WARNING "Failed allocating page\n"); + return; + } + + v = kmap(p); + memset(v, 0x67, PAGE_SIZE); + + pfn = (offset >> PAGE_SHIFT); + + ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0); + if (ret) { + printk(KERN_WARNING "Failed inserting mmu page\n"); + goto out_err1; + } + + /* Ioremap the page through the GART aperture */ + + vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!vmmu) { + printk(KERN_WARNING "Failed ioremapping page\n"); + goto out_err2; + } + + /* Read from the page with mmu disabled. */ + printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu)); + + /* Enable the mmu for host accesses and read again. */ + psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST); + + printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n", + ioread32(vmmu)); + *v = 0x15243705; + printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n", + ioread32(vmmu)); + iowrite32(0x16243355, vmmu); + (void) ioread32(vmmu); + printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v); + + printk(KERN_INFO "Int stat is 0x%08x\n", + psb_ioread32(driver, PSB_CR_BIF_INT_STAT)); + printk(KERN_INFO "Fault is 0x%08x\n", + psb_ioread32(driver, PSB_CR_BIF_FAULT)); + + /* Disable MMU for host accesses and clear page fault register */ + psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST); + iounmap(vmmu); +out_err2: + psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0); +out_err1: + kunmap(p); + __free_page(p); +} diff --git a/drivers/gpu/drm/psb/psb_msvdx.c b/drivers/gpu/drm/psb/psb_msvdx.c new file mode 100644 index 0000000..6930880 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_msvdx.c @@ -0,0 +1,855 @@ +/** + * file psb_msvdx.c + * MSVDX I/O operations and IRQ handling + * + */ + +/************************************************************************** + * + * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA + * Copyright (c) Imagination Technologies Limited, UK + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include +#include +#include "psb_drv.h" +#include "psb_drm.h" +#include "psb_msvdx.h" +#include "lnc_topaz.h" +#include "psb_powermgmt.h" +#include +#include + +#ifndef list_first_entry +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) +#endif + + +static int psb_msvdx_send(struct drm_device *dev, void *cmd, + unsigned long cmd_size); + +static int psb_msvdx_dequeue_send(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_msvdx_cmd_queue *msvdx_cmd = NULL; + int ret = 0; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + if (list_empty(&msvdx_priv->msvdx_queue)) { + PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n"); + msvdx_priv->msvdx_busy = 0; + return -EINVAL; + } + msvdx_cmd = list_first_entry(&msvdx_priv->msvdx_queue, + struct psb_msvdx_cmd_queue, head); + PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence); + ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size); + if (ret) { + DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n"); + ret = -EINVAL; + } + list_del(&msvdx_cmd->head); + kfree(msvdx_cmd->cmd); + kfree(msvdx_cmd); + + return ret; +} + +static int psb_msvdx_map_command(struct drm_device *dev, + struct ttm_buffer_object *cmd_buffer, + unsigned long cmd_offset, unsigned long cmd_size, + void **msvdx_cmd, uint32_t sequence, int copy_cmd) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + int ret = 0; + unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK; + unsigned long cmd_size_remaining; + struct ttm_bo_kmap_obj cmd_kmap; + void *cmd, *tmp, *cmd_start; + bool is_iomem; + + /* command buffers may not exceed page boundary */ + if (cmd_size + cmd_page_offset > PAGE_SIZE) + return -EINVAL; + + ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 1, &cmd_kmap); + if (ret) { + DRM_ERROR("MSVDXQUE:ret:%d\n", ret); + return ret; + } + + cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem) + + cmd_page_offset; + cmd = cmd_start; + cmd_size_remaining = cmd_size; + + while (cmd_size_remaining > 0) { + uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE); + uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID); + uint32_t mmu_ptd = 0, tmp = 0; + + PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d" + " cur_cmd_id = %02x fence = %08x\n", + (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence); + if ((cur_cmd_size % sizeof(uint32_t)) + || (cur_cmd_size > cmd_size_remaining)) { + ret = -EINVAL; + DRM_ERROR("MSVDX: ret:%d\n", ret); + goto out; + } + + switch (cur_cmd_id) { + case VA_MSGID_RENDER: + /* Fence ID */ + MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE, + sequence); + mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu); + tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc, + 1, 0); + if (tmp == 1) { + mmu_ptd |= 1; + PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n"); + } + + /* PTD */ + MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd); + break; + + default: + /* Msg not supported */ + ret = -EINVAL; + PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret); + goto out; + } + + cmd += cur_cmd_size; + cmd_size_remaining -= cur_cmd_size; + } + + if (copy_cmd) { + PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n"); + + tmp = kzalloc(cmd_size, GFP_KERNEL); + if (tmp == NULL) { + ret = -ENOMEM; + DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret); + goto out; + } + memcpy(tmp, cmd_start, cmd_size); + *msvdx_cmd = tmp; + } else { + PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n"); + ret = psb_msvdx_send(dev, cmd_start, cmd_size); + if (ret) { + DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n"); + ret = -EINVAL; + } + } + +out: + ttm_bo_kunmap(&cmd_kmap); + + return ret; +} + +int psb_submit_video_cmdbuf(struct drm_device *dev, + struct ttm_buffer_object *cmd_buffer, + unsigned long cmd_offset, unsigned long cmd_size, + struct ttm_fence_object *fence) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO]; + unsigned long irq_flags; + int ret = 0; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + psb_schedule_watchdog(dev_priv); + + spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags); + if (msvdx_priv->msvdx_needs_reset) { + spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags); + PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n"); + if (psb_msvdx_reset(dev_priv)) { + ret = -EBUSY; + DRM_ERROR("MSVDX: Reset failed\n"); + return ret; + } + msvdx_priv->msvdx_needs_reset = 0; + msvdx_priv->msvdx_busy = 0; + + psb_msvdx_init(dev); + spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags); + } + + if (!msvdx_priv->msvdx_fw_loaded) { + spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags); + PSB_DEBUG_GENERAL("MSVDX:reload FW to MTX\n"); + + ret = psb_setup_fw(dev); + if (ret) { + DRM_ERROR("MSVDX:fail to load FW\n"); + /* FIXME: find a proper return value */ + return -EFAULT; + } + msvdx_priv->msvdx_fw_loaded = 1; + + PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n"); + spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags); + } + + if (!msvdx_priv->msvdx_busy) { + msvdx_priv->msvdx_busy = 1; + spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags); + PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n", + sequence); + ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset, + cmd_size, NULL, sequence, 0); + if (ret) { + DRM_ERROR("MSVDXQUE: Failed to extract cmd\n"); + return ret; + } + } else { + struct psb_msvdx_cmd_queue *msvdx_cmd; + void *cmd = NULL; + + spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags); + /* queue the command to be sent when the h/w is ready */ + PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n", + sequence); + msvdx_cmd = kzalloc(sizeof(struct psb_msvdx_cmd_queue), + GFP_KERNEL); + if (msvdx_cmd == NULL) { + DRM_ERROR("MSVDXQUE: Out of memory...\n"); + return -ENOMEM; + } + + ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset, + cmd_size, &cmd, sequence, 1); + if (ret) { + DRM_ERROR("MSVDXQUE: Failed to extract cmd\n"); + kfree(msvdx_cmd + ); + return ret; + } + msvdx_cmd->cmd = cmd; + msvdx_cmd->cmd_size = cmd_size; + msvdx_cmd->sequence = sequence; + spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags); + list_add_tail(&msvdx_cmd->head, &msvdx_priv->msvdx_queue); + if (!msvdx_priv->msvdx_busy) { + msvdx_priv->msvdx_busy = 1; + PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n"); + psb_msvdx_dequeue_send(dev); + } + spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags); + } + + return ret; +} + +int psb_cmdbuf_video(struct drm_file *priv, + struct list_head *validate_list, + uint32_t fence_type, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct psb_ttm_fence_rep *fence_arg) +{ + struct drm_device *dev = priv->minor->dev; + struct ttm_fence_object *fence; + int ret; + + /* + * Check this. Doesn't seem right. Have fencing done AFTER command + * submission and make sure drm_psb_idle idles the MSVDX completely. + */ + ret = + psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset, + arg->cmdbuf_size, NULL); + if (ret) + return ret; + + + /* DRM_ERROR("Intel: Fix video fencing!!\n"); */ + psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type, + arg->fence_flags, validate_list, fence_arg, + &fence); + + ttm_fence_object_unref(&fence); + mutex_lock(&cmd_buffer->mutex); + if (cmd_buffer->sync_obj != NULL) + ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj); + mutex_unlock(&cmd_buffer->mutex); + + return 0; +} + + +static int psb_msvdx_send(struct drm_device *dev, void *cmd, + unsigned long cmd_size) +{ + int ret = 0; + struct drm_psb_private *dev_priv = dev->dev_private; + + while (cmd_size > 0) { + uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE); + if (cur_cmd_size > cmd_size) { + ret = -EINVAL; + DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n", + cmd_size, (unsigned long)cur_cmd_size); + goto out; + } + + /* Send the message to h/w */ + ret = psb_mtx_send(dev_priv, cmd); + if (ret) { + PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret); + goto out; + } + cmd += cur_cmd_size; + cmd_size -= cur_cmd_size; + } + +out: + PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret); + return ret; +} + +int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg) +{ + static uint32_t pad_msg[FWRK_PADMSG_SIZE]; + const uint32_t *p_msg = (uint32_t *) msg; + uint32_t msg_num, words_free, ridx, widx; + int ret = 0; + + PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n"); + + /* we need clocks enabled before we touch VEC local ram */ + PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); + + msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4; + if (msg_num > NUM_WORDS_MTX_BUF) { + ret = -EINVAL; + DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret); + goto out; + } + + ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX); + widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX); + + /* message would wrap, need to send a pad message */ + if (widx + msg_num > NUM_WORDS_MTX_BUF) { + /* Shouldn't happen for a PAD message itself */ + BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID) + == FWRK_MSGID_PADDING); + + /* if the read pointer is at zero then we must wait for it to + * change otherwise the write pointer will equal the read + * pointer,which should only happen when the buffer is empty + * + * This will only happens if we try to overfill the queue, + * queue management should make + * sure this never happens in the first place. + */ + BUG_ON(0 == ridx); + if (0 == ridx) { + ret = -EINVAL; + DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret); + goto out; + } + + /* Send a pad message */ + MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE, + (NUM_WORDS_MTX_BUF - widx) << 2); + MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID, + FWRK_MSGID_PADDING); + psb_mtx_send(dev_priv, pad_msg); + widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX); + } + + if (widx >= ridx) + words_free = NUM_WORDS_MTX_BUF - (widx - ridx); + else + words_free = ridx - widx; + + BUG_ON(msg_num > words_free); + if (msg_num > words_free) { + ret = -EINVAL; + DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret); + goto out; + } + while (msg_num > 0) { + PSB_WMSVDX32(*p_msg++, MSVDX_COMMS_TO_MTX_BUF + (widx << 2)); + msg_num--; + widx++; + if (NUM_WORDS_MTX_BUF == widx) + widx = 0; + } + PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX); + + /* Make sure clocks are enabled before we kick */ + PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); + + PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); + + /* signal an interrupt to let the mtx know there is a new message */ + PSB_WMSVDX32(1, MSVDX_MTX_KICKI); + +out: + return ret; +} + +/* + * MSVDX MTX interrupt + */ +static void psb_msvdx_mtx_interrupt(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + static uint32_t buf[128]; /* message buffer */ + uint32_t ridx, widx; + uint32_t num, ofs; /* message num and offset */ + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n"); + + /* Are clocks enabled - If not enable before + * attempting to read from VLR + */ + if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) { + PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n"); + PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); + } + +loop: /* just for coding style check */ + ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX); + widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX); + + /* Get out of here if nothing */ + if (ridx == widx) + goto done; + + ofs = 0; + buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2)); + + /* round to nearest word */ + num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4; + + /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */ + + if (++ridx >= NUM_WORDS_HOST_BUF) + ridx = 0; + + for (ofs++; ofs < num; ofs++) { + buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2)); + + if (++ridx >= NUM_WORDS_HOST_BUF) + ridx = 0; + } + + /* Update the Read index */ + PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX); + + if (msvdx_priv->msvdx_needs_reset) + goto loop; + + switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) { + case VA_MSGID_CMD_HW_PANIC: + case VA_MSGID_CMD_FAILED: { + uint32_t fence = MEMIO_READ_FIELD(buf, + FW_VA_CMD_FAILED_FENCE_VALUE); + uint32_t fault = MEMIO_READ_FIELD(buf, + FW_VA_CMD_FAILED_IRQSTATUS); + uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID); + uint32_t diff = 0; + + (void) fault; + if (msg_id == VA_MSGID_CMD_HW_PANIC) + PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:" + "Fault detected" + " - Fence: %08x, Status: %08x" + " - resetting and ignoring error\n", + fence, fault); + else + PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:" + "Fault detected" + " - Fence: %08x, Status: %08x" + " - resetting and ignoring error\n", + fence, fault); + + msvdx_priv->msvdx_needs_reset = 1; + + if (msg_id == VA_MSGID_CMD_HW_PANIC) { + diff = msvdx_priv->msvdx_current_sequence + - dev_priv->sequence[PSB_ENGINE_VIDEO]; + + if (diff > 0x0FFFFFFF) + msvdx_priv->msvdx_current_sequence++; + + PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, " + "assuming %08x\n", + msvdx_priv->msvdx_current_sequence); + } else { + msvdx_priv->msvdx_current_sequence = fence; + } + + psb_fence_error(dev, PSB_ENGINE_VIDEO, + msvdx_priv->msvdx_current_sequence, + _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED); + + /* Flush the command queue */ + psb_msvdx_flush_cmd_queue(dev); + + goto done; + } + case VA_MSGID_CMD_COMPLETED: { + uint32_t fence = MEMIO_READ_FIELD(buf, + FW_VA_CMD_COMPLETED_FENCE_VALUE); + uint32_t flags = MEMIO_READ_FIELD(buf, + FW_VA_CMD_COMPLETED_FLAGS); + + PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: " + "FenceID: %08x, flags: 0x%x\n", + fence, flags); + + msvdx_priv->msvdx_current_sequence = fence; + + psb_fence_handler(dev, PSB_ENGINE_VIDEO); + + if (flags & FW_VA_RENDER_HOST_INT) { + /*Now send the next command from the msvdx cmd queue */ + psb_msvdx_dequeue_send(dev); + goto done; + } + + break; + } + case VA_MSGID_CMD_COMPLETED_BATCH: { + uint32_t fence = MEMIO_READ_FIELD(buf, + FW_VA_CMD_COMPLETED_FENCE_VALUE); + uint32_t tickcnt = MEMIO_READ_FIELD(buf, + FW_VA_CMD_COMPLETED_NO_TICKS); + (void)tickcnt; + /* we have the fence value in the message */ + PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:" + " FenceID: %08x, TickCount: %08x\n", + fence, tickcnt); + msvdx_priv->msvdx_current_sequence = fence; + + break; + } + case VA_MSGID_ACK: + PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n"); + break; + + case VA_MSGID_TEST1: + PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n"); + break; + + case VA_MSGID_TEST2: + PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n"); + break; + /* Don't need to do anything with these messages */ + + case VA_MSGID_DEBLOCK_REQUIRED: { + uint32_t ctxid = MEMIO_READ_FIELD(buf, + FW_VA_DEBLOCK_REQUIRED_CONTEXT); + (void) ctxid; + /* The BE we now be locked. */ + /* Unblock rendec by reading the mtx2mtx end of slice */ + (void) PSB_RMSVDX32(MSVDX_RENDEC_READ_DATA); + + PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED" + " Context=%08x\n", ctxid); + goto done; + } + default: + DRM_ERROR("ERROR: msvdx Unknown message from MTX \n"); + goto done; + } + +done: + /* we get a frame/slice done, try to save some power*/ + if (drm_msvdx_pmpolicy == PSB_PMPOLICY_POWERDOWN) + schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0); + + DRM_MEMORYBARRIER(); /* TBD check this... */ +} + + +/* + * MSVDX interrupt. + */ +void psb_msvdx_interrupt(struct drm_device *dev, + uint32_t msvdx_stat) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) { + /*Ideally we should we should never get to this */ + PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x fence2_irq_on=%d\n", + msvdx_stat, dev_priv->fence2_irq_on); + + /* Pause MMU */ + PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK, + MSVDX_MMU_CONTROL0); + DRM_WRITEMEMORYBARRIER(); + + /* Clear this interupt bit only */ + PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK, + MSVDX_INTERRUPT_CLEAR); + PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR); + DRM_READMEMORYBARRIER(); + + msvdx_priv->msvdx_needs_reset = 1; + } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) { + PSB_DEBUG_IRQ + ("MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d(MTX)\n", + msvdx_stat, dev_priv->fence2_irq_on); + + /* Clear all interupt bits */ + PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR); + PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR); + DRM_READMEMORYBARRIER(); + + psb_msvdx_mtx_interrupt(dev); + } +} + + +void psb_msvdx_lockup(struct drm_psb_private *dev_priv, + int *msvdx_lockup, int *msvdx_idle) +{ + int tmp; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + *msvdx_lockup = 0; + *msvdx_idle = 1; + +#if 0 + PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d " + "last_sequence:%d and last_submitted_sequence :%d\n", + msvdx_priv->msvdx_current_sequence, + msvdx_priv->msvdx_last_sequence, + dev_priv->sequence[PSB_ENGINE_VIDEO]); +#endif + + tmp = msvdx_priv->msvdx_current_sequence - + dev_priv->sequence[PSB_ENGINE_VIDEO]; + + if (tmp > 0x0FFFFFFF) { + if (msvdx_priv->msvdx_current_sequence == + msvdx_priv->msvdx_last_sequence) { + DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n", + msvdx_priv->msvdx_current_sequence); + *msvdx_lockup = 1; + } else { + PSB_DEBUG_GENERAL("MSVDXTimer: " + "msvdx responded fine so far\n"); + msvdx_priv->msvdx_last_sequence = + msvdx_priv->msvdx_current_sequence; + *msvdx_idle = 0; + } + } +} + +int psb_check_msvdx_idle(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + uint32_t fs_status, ccb_roff, ccb_woff; + + if (msvdx_priv->msvdx_busy) { + PSB_DEBUG_PM("MSVDX: psb_check_msvdx_idle returns busy\n"); + return -EBUSY; + } + + /* check that clocks are enabled before reading VLR */ + if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) + PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); + + fs_status = PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS); + ccb_roff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX); + ccb_woff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX); + + /* If the firmware says the hardware is idle + * and the CCB is empty then we can say it is IDLE + */ + if ((fs_status & MSVDX_FW_STATUS_HW_IDLE) && (ccb_roff == ccb_woff)) { + PSB_DEBUG_PM("MSVDXIDLE: FW indicate IDLE\n"); + return 0; + } + + return -EBUSY; /* not checkout fence, CCB, etc here*/ +} + +int psb_wait_msvdx_idle(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct ttm_fence_device *fdev = &dev_priv->fdev; + struct ttm_fence_class_manager *fc = + &fdev->fence_class[PSB_ENGINE_VIDEO]; + struct ttm_fence_object *fence, *next; + int signaled = 0; + unsigned long _end = jiffies + 5 * DRM_HZ; + int ret = 0; + + /* Ensure that all pending IRQs are serviced, */ + + /* + * Save the last MSVDX fence in dev_priv instead!!! + * Need to be fc->write_locked while accessing a fence from the ring. + */ + list_for_each_entry_safe(fence, next, &fc->ring, ring) { + do { + signaled = ttm_fence_object_signaled(fence, + _PSB_FENCE_TYPE_EXE); + if (signaled) { + PSB_DEBUG_PM("MSVDXIDLE:wait_fence success\n"); + break; + } + if (time_after_eq(jiffies, _end)) { + PSB_DEBUG_PM("MSVDXIDLE: fence 0x%x didn't get" + "signaled for 3 secs\n", + (unsigned int) fence); + break; + } + DRM_UDELAY(1000); + } while (1); + } + do { + ret = psb_check_msvdx_idle(dev); + if (ret == 0) { + PSB_DEBUG_PM("MSVDXIDLE: check_idle succeeded!\n"); + break; + } + + if (time_after_eq(jiffies, _end)) { + PSB_DEBUG_PM("MSVDXIDLE: wait HW idle time out\n"); + break; + } + DRM_UDELAY(1000); + } while (1); + + return ret; +} + +#if 0 +static int psb_power_gated_msvdx(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + PSB_DEBUG_PM("MSVDX: Setting clock to minimal\n"); + PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); + + MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_CLOCKGATED); + + return 0; +} + +static int psb_power_ungated_msvdx(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERUP); + + return 0; +} +#endif + +int lnc_video_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_lnc_video_getparam_arg *arg = data; + int ret = 0; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)file_priv->minor->dev->dev_private; +#if defined(CONFIG_MRST_RAR_HANDLER) + struct RAR_buffer rar_buf; + size_t rar_status; +#endif + void *rar_handler; + uint32_t offset = 0; + + switch (arg->key) { + case LNC_VIDEO_GETPARAM_RAR_REGION_SIZE: + ret = copy_to_user((void __user *) ((unsigned long)arg->value), + &dev_priv->rar_region_size, + sizeof(dev_priv->rar_region_size)); + break; + case LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET: + ret = copy_from_user(&rar_handler, + (void __user *)((unsigned long)arg->arg), + sizeof(rar_handler)); + if (ret) + break; + +#if defined(CONFIG_MRST_RAR_HANDLER) + rar_buf.info.handle = rar_handler; + rar_buf.bus_address = dev_priv->rar_region_start; + rar_status = 1; + + rar_status = rar_handle_to_bus(&rar_buf, 1); + if (rar_status != 1) { + DRM_ERROR("MSVDX:rar_handle_to_bus failed\n"); + ret = -1; + break; + } + + offset = rar_buf.bus_address - dev_priv->rar_region_start; + PSB_DEBUG_GENERAL("MSVDX:RAR handler %p, bus address=0x%08x," + "RAR region=0x%08x\n", rar_handler, + rar_buf.bus_address,dev_priv->rar_region_start); +#endif + ret = copy_to_user((void __user *) ((unsigned long)arg->value), + &offset, + sizeof(offset)); + break; + case LNC_VIDEO_FRAME_SKIP: + ret = lnc_video_frameskip(dev, arg->value); + break; + default: + ret = -EFAULT; + break; + } + + if (ret) + return -EFAULT; + + return 0; +} + +inline int psb_try_power_down_msvdx(struct drm_device *dev) +{ + return powermgmt_suspend_islands(dev->pdev, PSB_VIDEO_DEC_ISLAND, false); +} diff --git a/drivers/gpu/drm/psb/psb_msvdx.h b/drivers/gpu/drm/psb/psb_msvdx.h new file mode 100644 index 0000000..8d8d8b5 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_msvdx.h @@ -0,0 +1,527 @@ +/************************************************************************** + * + * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA + * Copyright (c) Imagination Technologies Limited, UK + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef _PSB_MSVDX_H_ +#define _PSB_MSVDX_H_ + +#include "psb_drv.h" + +#if defined(CONFIG_MRST_RAR_HANDLER) +#include "rar/memrar.h" +#endif + +extern int drm_msvdx_pmpolicy; + +void psb_msvdx_interrupt(struct drm_device *dev, + uint32_t msvdx_stat); + +int psb_msvdx_init(struct drm_device *dev); +int psb_msvdx_uninit(struct drm_device *dev); +int psb_msvdx_reset(struct drm_psb_private *dev_priv); +uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver); +int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg); +void psb_msvdx_flush_cmd_queue(struct drm_device *dev); +void psb_msvdx_lockup(struct drm_psb_private *dev_priv, + int *msvdx_lockup, int *msvdx_idle); +int psb_setup_fw(struct drm_device *dev); +int psb_check_msvdx_idle(struct drm_device *dev); +int psb_wait_msvdx_idle(struct drm_device *dev); +int psb_cmdbuf_video(struct drm_file *priv, + struct list_head *validate_list, + uint32_t fence_type, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct psb_ttm_fence_rep *fence_arg); + +/* Non-Optimal Invalidation is not default */ +#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2 +#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100) + +#define FW_VA_RENDER_HOST_INT 0x00004000 +#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020 + +/* There is no work currently underway on the hardware */ +#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 +#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200 +#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \ + (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \ + MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \ + MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE) + +#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \ + (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \ + MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE) + +#define POULSBO_D0 0x5 +#define POULSBO_D1 0x6 +#define PSB_REVID_OFFSET 0x8 + +#define MTX_CODE_BASE (0x80900000) +#define MTX_DATA_BASE (0x82880000) +#define PC_START_ADDRESS (0x80900000) + +#define MTX_CORE_CODE_MEM (0x10) +#define MTX_CORE_DATA_MEM (0x18) + +#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100) +#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8) +#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \ + (0x00010000) +#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \ + (0x00100000) +#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \ + (0x01000000) +#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \ + (0x10000000) + +#define clk_enable_all \ +(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK) + +#define clk_enable_minimal \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK + +#define clk_enable_auto \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK + +#define msvdx_sw_reset_all \ +(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \ +MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \ +MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \ +MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \ +MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK) + +#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \ + (((R_SPECIFIER)<<4) | (U_SPECIFIER)) +#define MTX_PC MTX_INTERNAL_REG(0, 5) + +#define RENDEC_A_SIZE (1024 * 1024) +#define RENDEC_B_SIZE (1024 * 1024) + +#define MEMIO_READ_FIELD(vpMem, field) \ + ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \ + & field##_MASK) >> field##_SHIFT)) + +#define MEMIO_WRITE_FIELD(vpMem, field, value) \ + (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \ + ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \ + & (field##_TYPE)~field##_MASK) | \ + (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK); + +#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \ + (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \ + ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \ + (field##_TYPE)(((uint32_t)(value) << field##_SHIFT))); + +#define REGIO_READ_FIELD(reg_val, reg, field) \ + ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT) + +#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \ + (reg_val) = \ + ((reg_val) & ~(reg##_##field##_MASK)) | \ + (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK)); + +#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \ + (reg_val) = \ + ((reg_val) | ((value) << (reg##_##field##_SHIFT))); + +#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \ + (0x00000001) +#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \ + (0x00000002) +#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \ + (0x00000004) +#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \ + (0x00000008) +#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \ + (0x00000010) +#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \ + (0x00000020) +#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \ + (0x00000040) + +#define clk_enable_all \ + (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \ +MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK) + +#define clk_enable_minimal \ + MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ + MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK + +/* MTX registers */ +#define MSVDX_MTX_ENABLE (0x0000) +#define MSVDX_MTX_KICKI (0x0088) +#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC) +#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8) +#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104) +#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108) +#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C) +#define MSVDX_MTX_SOFT_RESET (0x0200) + +/* MSVDX registers */ +#define MSVDX_CONTROL (0x0600) +#define MSVDX_INTERRUPT_CLEAR (0x060C) +#define MSVDX_INTERRUPT_STATUS (0x0608) +#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610) +#define MSVDX_MMU_CONTROL0 (0x0680) +#define MSVDX_MTX_RAM_BANK (0x06F0) +#define MSVDX_MAN_CLK_ENABLE (0x0620) + +/* RENDEC registers */ +#define MSVDX_RENDEC_CONTROL0 (0x0868) +#define MSVDX_RENDEC_CONTROL1 (0x086C) +#define MSVDX_RENDEC_BUFFER_SIZE (0x0870) +#define MSVDX_RENDEC_BASE_ADDR0 (0x0874) +#define MSVDX_RENDEC_BASE_ADDR1 (0x0878) +#define MSVDX_RENDEC_READ_DATA (0x0898) +#define MSVDX_RENDEC_CONTEXT0 (0x0950) +#define MSVDX_RENDEC_CONTEXT1 (0x0954) +#define MSVDX_RENDEC_CONTEXT2 (0x0958) +#define MSVDX_RENDEC_CONTEXT3 (0x095C) +#define MSVDX_RENDEC_CONTEXT4 (0x0960) +#define MSVDX_RENDEC_CONTEXT5 (0x0964) + +/* + * This defines the MSVDX communication buffer + */ +#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */ +/*!< Host buffer size (in 32-bit words) */ +#define NUM_WORDS_HOST_BUF (100) +/*!< MTX buffer size (in 32-bit words) */ +#define NUM_WORDS_MTX_BUF (100) + +/* There is no work currently underway on the hardware */ +#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 + +#define MSVDX_COMMS_AREA_ADDR (0x02cc0) + +#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18) +#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04) +#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10) +#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00) +#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04) +#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08) +#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C) +#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10) +#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14) +#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18) +#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C) +#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20) +#define MSVDX_COMMS_TO_MTX_BUF \ + (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2)) + +#define MSVDX_COMMS_AREA_END \ + (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2)) + +#if (MSVDX_COMMS_AREA_END != 0x03000) +#error +#endif + +#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000) +#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31) + +#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000) +#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16) + +#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000) +#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20) + +#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC) +#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2) + +#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002) +#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1) + +#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001) +#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0) + +#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001) +#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0) + +#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001) +#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0) + +#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100) +#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8) + +#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00) +#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8) + +#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000) +#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14) + +#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002) +#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1) + +#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000) +#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16) + +#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF) +#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0) + +#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000) +#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16) + +#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF) +#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0) + +#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000) +#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18) + +#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000) +#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16) + +#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000) +#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24) + +#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001) +#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0) + +/* Start of parser specific Host->MTX messages. */ +#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80) + +/* Start of parser specific MTX->Host messages. */ +#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0) + +#define FWRK_MSGID_PADDING (0) + +#define FWRK_GENMSG_SIZE_TYPE uint8_t +#define FWRK_GENMSG_SIZE_MASK (0xFF) +#define FWRK_GENMSG_SIZE_SHIFT (0) +#define FWRK_GENMSG_SIZE_OFFSET (0x0000) +#define FWRK_GENMSG_ID_TYPE uint8_t +#define FWRK_GENMSG_ID_MASK (0xFF) +#define FWRK_GENMSG_ID_SHIFT (0) +#define FWRK_GENMSG_ID_OFFSET (0x0001) +#define FWRK_PADMSG_SIZE (2) + +/* This type defines the framework specified message ids */ +enum { + /* ! Sent by the DXVA driver on the host to the mtx firmware. + */ + VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG, + VA_MSGID_RENDER, + VA_MSGID_DEBLOCK, + VA_MSGID_BUBBLE, + + /* Test Messages */ + VA_MSGID_TEST1, + VA_MSGID_TEST2, + + /*! Sent by the mtx firmware to itself. + */ + VA_MSGID_RENDER_MC_INTERRUPT, + + /*! Sent by the DXVA firmware on the MTX to the host. + */ + VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG, + VA_MSGID_CMD_COMPLETED_BATCH, + VA_MSGID_DEBLOCK_REQUIRED, + VA_MSGID_TEST_RESPONCE, + VA_MSGID_ACK, + + VA_MSGID_CMD_FAILED, + VA_MSGID_CMD_UNSUPPORTED, + VA_MSGID_CMD_HW_PANIC, +}; + +/* MSVDX private structure */ +struct msvdx_private { + int msvdx_needs_reset; + + unsigned int pmstate; + + struct sysfs_dirent *sysfs_pmstate; + + uint32_t msvdx_current_sequence; + uint32_t msvdx_last_sequence; + + /* + *MSVDX Rendec Memory + */ + struct ttm_buffer_object *ccb0; + uint32_t base_addr0; + struct ttm_buffer_object *ccb1; + uint32_t base_addr1; + + /* + *msvdx command queue + */ + spinlock_t msvdx_lock; + struct mutex msvdx_mutex; + struct list_head msvdx_queue; + int msvdx_busy; + int msvdx_fw_loaded; + void *msvdx_fw; + int msvdx_fw_size; +}; + +/* MSVDX Firmware interface */ +#define FW_VA_INIT_SIZE (8) +#define FW_VA_DEBUG_TEST2_SIZE (4) + +/* FW_VA_DEBUG_TEST2 MSG_SIZE */ +#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t +#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF) +#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000) +#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0) + +/* FW_VA_DEBUG_TEST2 ID */ +#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t +#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF) +#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001) +#define FW_VA_DEBUG_TEST2_ID_SHIFT (0) + +/* FW_VA_CMD_FAILED FENCE_VALUE */ +#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t +#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF) +#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004) +#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0) + +/* FW_VA_CMD_FAILED IRQSTATUS */ +#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t +#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF) +#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008) +#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0) + +/* FW_VA_CMD_COMPLETED FENCE_VALUE */ +#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t +#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF) +#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004) +#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0) + +/* FW_VA_CMD_COMPLETED FLAGS */ +#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4) +#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t +#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF) +#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF) +#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008) +#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0) + +/* FW_VA_CMD_COMPLETED NO_TICKS */ +#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t +#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF) +#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002) +#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0) + +/* FW_VA_DEBLOCK_REQUIRED CONTEXT */ +#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t +#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF) +#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004) +#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0) + +/* FW_VA_INIT GLOBAL_PTD */ +#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t +#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF) +#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004) +#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0) + +/* FW_VA_RENDER FENCE_VALUE */ +#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t +#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF) +#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010) +#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0) + +/* FW_VA_RENDER MMUPTD */ +#define FW_VA_RENDER_MMUPTD_TYPE uint32_t +#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF) +#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004) +#define FW_VA_RENDER_MMUPTD_SHIFT (0) + +/* FW_VA_RENDER BUFFER_ADDRESS */ +#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t +#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF) +#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008) +#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0) + +/* FW_VA_RENDER BUFFER_SIZE */ +#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t +#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF) +#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002) +#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0) + + +static inline void psb_msvdx_clearirq(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + unsigned long mtx_int = 0; + + PSB_DEBUG_IRQ("MSVDX: clear IRQ\n"); + + /* Clear MTX interrupt */ + REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, + 1); + PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR); +} + + +static inline void psb_msvdx_disableirq(struct drm_device *dev) +{ + /* nothing */ +} + + +static inline void psb_msvdx_enableirq(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + unsigned long enables = 0; + + PSB_DEBUG_IRQ("MSVDX: enable MSVDX MTX IRQ\n"); + REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, + 1); + PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE); +} + +#define MSVDX_NEW_PMSTATE(drm_dev, msvdx_priv, new_state) \ +do { \ + msvdx_priv->pmstate = new_state; \ + sysfs_notify_dirent(msvdx_priv->sysfs_pmstate); \ + PSB_DEBUG_PM("MSVDX: %s\n", \ + (new_state == PSB_PMSTATE_POWERUP) ? "powerup" \ + : ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown" \ + : "clockgated")); \ +} while (0) + +#endif diff --git a/drivers/gpu/drm/psb/psb_msvdxinit.c b/drivers/gpu/drm/psb/psb_msvdxinit.c new file mode 100644 index 0000000..49c5041 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_msvdxinit.c @@ -0,0 +1,747 @@ +/** + * file psb_msvdxinit.c + * MSVDX initialization and mtx-firmware upload + * + */ + +/************************************************************************** + * + * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA + * Copyright (c) Imagination Technologies Limited, UK + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include +#include +#include "psb_drv.h" +#include "psb_msvdx.h" +#include + +#define MSVDX_REG (dev_priv->msvdx_reg) +uint8_t psb_rev_id; +/*MSVDX FW header*/ +struct msvdx_fw { + uint32_t ver; + uint32_t text_size; + uint32_t data_size; + uint32_t data_location; +}; + +int psb_wait_for_register(struct drm_psb_private *dev_priv, + uint32_t offset, uint32_t value, uint32_t enable) +{ + uint32_t tmp; + uint32_t poll_cnt = 10000; + while (poll_cnt) { + tmp = PSB_RMSVDX32(offset); + if (value == (tmp & enable)) /* All the bits are reset */ + return 0; /* So exit */ + + /* Wait a bit */ + DRM_UDELAY(1000); + poll_cnt--; + } + DRM_ERROR("MSVDX: Timeout while waiting for register %08x:" + " expecting %08x (mask %08x), got %08x\n", + offset, value, enable, tmp); + + return 1; +} + +int psb_poll_mtx_irq(struct drm_psb_private *dev_priv) +{ + int ret = 0; + uint32_t mtx_int = 0; + + REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, + 1); + + ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS, + /* Required value */ + mtx_int, + /* Enabled bits */ + mtx_int); + + if (ret) { + DRM_ERROR("MSVDX: Error Mtx did not return" + " int within a resonable time\n"); + return ret; + } + + PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n"); + + /* Got it so clear the bit */ + PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR); + + return ret; +} + +void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv, + const uint32_t core_reg, const uint32_t val) +{ + uint32_t reg = 0; + + /* Put data in MTX_RW_DATA */ + PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA); + + /* DREADY is set to 0 and request a write */ + reg = core_reg; + REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, + MTX_RNW, 0); + REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, + MTX_DREADY, 0); + PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST); + + psb_wait_for_register(dev_priv, + MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, + MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, + MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK); +} + +void psb_upload_fw(struct drm_psb_private *dev_priv, + const uint32_t data_mem, uint32_t ram_bank_size, + uint32_t address, const unsigned int words, + const uint32_t * const data) +{ + uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0; + uint32_t access_ctrl; + + /* Save the access control register... */ + access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL); + + /* Wait for MCMSTAT to become be idle 1 */ + psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, + 1, /* Required Value */ + 0xffffffff /* Enables */); + + for (loop = 0; loop < words; loop++) { + ram_id = data_mem + (address / ram_bank_size); + if (ram_id != cur_bank) { + addr = address >> 2; + ctrl = 0; + REGIO_WRITE_FIELD_LITE(ctrl, + MSVDX_MTX_RAM_ACCESS_CONTROL, + MTX_MCMID, ram_id); + REGIO_WRITE_FIELD_LITE(ctrl, + MSVDX_MTX_RAM_ACCESS_CONTROL, + MTX_MCM_ADDR, addr); + REGIO_WRITE_FIELD_LITE(ctrl, + MSVDX_MTX_RAM_ACCESS_CONTROL, + MTX_MCMAI, 1); + PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL); + cur_bank = ram_id; + } + address += 4; + + PSB_WMSVDX32(data[loop], + MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER); + + /* Wait for MCMSTAT to become be idle 1 */ + psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, + 1, /* Required Value */ + 0xffffffff /* Enables */); + } + PSB_DEBUG_GENERAL("MSVDX: Upload done\n"); + + /* Restore the access control register... */ + PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL); +} + +static int psb_verify_fw(struct drm_psb_private *dev_priv, + const uint32_t ram_bank_size, + const uint32_t data_mem, uint32_t address, + const uint32_t words, const uint32_t * const data) +{ + uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0; + uint32_t access_ctrl; + int ret = 0; + + /* Save the access control register... */ + access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL); + + /* Wait for MCMSTAT to become be idle 1 */ + psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, + 1, /* Required Value */ + 0xffffffff /* Enables */); + + for (loop = 0; loop < words; loop++) { + uint32_t tmp; + ram_id = data_mem + (address / ram_bank_size); + + if (ram_id != cur_bank) { + addr = address >> 2; + ctrl = 0; + REGIO_WRITE_FIELD_LITE(ctrl, + MSVDX_MTX_RAM_ACCESS_CONTROL, + MTX_MCMID, ram_id); + REGIO_WRITE_FIELD_LITE(ctrl, + MSVDX_MTX_RAM_ACCESS_CONTROL, + MTX_MCM_ADDR, addr); + REGIO_WRITE_FIELD_LITE(ctrl, + MSVDX_MTX_RAM_ACCESS_CONTROL, + MTX_MCMAI, 1); + REGIO_WRITE_FIELD_LITE(ctrl, + MSVDX_MTX_RAM_ACCESS_CONTROL, + MTX_MCMR, 1); + + PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL); + + cur_bank = ram_id; + } + address += 4; + + /* Wait for MCMSTAT to become be idle 1 */ + psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, + 1, /* Required Value */ + 0xffffffff /* Enables */); + + tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER); + if (data[loop] != tmp) { + DRM_ERROR("psb: Firmware validation fails" + " at index=%08x\n", loop); + ret = 1; + break; + } + } + + /* Restore the access control register... */ + PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL); + + return ret; +} + +static uint32_t *msvdx_get_fw(struct drm_device *dev, + const struct firmware **raw, uint8_t *name) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + int rc, fw_size; + int *ptr = NULL; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + rc = request_firmware(raw, name, &dev->pdev->dev); + if (rc < 0) { + DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n", + name, rc); + return NULL; + } + + if ((*raw)->size < sizeof(struct msvdx_fw)) { + DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n", + name, (*raw)->size); + return NULL; + } + + ptr = (int *) ((*raw))->data; + + if (!ptr) { + DRM_ERROR("MSVDX: Failed to load %s\n", name); + return NULL; + } + + /* another sanity check... */ + fw_size = sizeof(struct msvdx_fw) + + sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size + + sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size; + if ((*raw)->size != fw_size) { + DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n", + name, (*raw)->size); + return NULL; + } + msvdx_priv->msvdx_fw = kzalloc(fw_size, GFP_KERNEL); + if (msvdx_priv->msvdx_fw == NULL) + DRM_ERROR("MSVDX: allocate FW buffer failed\n"); + else { + memcpy(msvdx_priv->msvdx_fw, ptr, fw_size); + msvdx_priv->msvdx_fw_size = fw_size; + } + + PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n"); + release_firmware(*raw); + + return msvdx_priv->msvdx_fw; +} + +int psb_setup_fw(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + int ret = 0; + + uint32_t ram_bank_size; + struct msvdx_fw *fw; + uint32_t *fw_ptr = NULL; + uint32_t *text_ptr = NULL; + uint32_t *data_ptr = NULL; + const struct firmware *raw = NULL; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + /* todo : Assert the clock is on - if not turn it on to upload code */ + PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n"); + PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); + + /* Reset MTX */ + PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK, + MSVDX_MTX_SOFT_RESET); + + /* Initialses Communication controll area to 0 */ + if (psb_rev_id >= POULSBO_D1) { + PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1" + " or later revision.\n"); + PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1, + MSVDX_COMMS_OFFSET_FLAGS); + } else { + PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0" + " or earlier revision.\n"); + PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0, + MSVDX_COMMS_OFFSET_FLAGS); + } + + PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER); + PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE); + PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX); + PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX); + PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX); + PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX); + PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS); + + /* read register bank size */ + { + uint32_t bank_size, reg; + reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK); + bank_size = + REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK, + CR_MTX_RAM_BANK_SIZE); + ram_bank_size = (uint32_t) (1 << (bank_size + 2)); + } + + PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n", + ram_bank_size); + + /* if FW already loaded from storage */ + if (msvdx_priv->msvdx_fw) + fw_ptr = msvdx_priv->msvdx_fw; + else { + PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw.bin by udevd\n"); + fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin"); + } + + if (!fw_ptr) { + DRM_ERROR("MSVDX:load msvdx_fw.bin failed,is udevd running?\n"); + ret = 1; + goto out; + } + + fw = (struct msvdx_fw *) fw_ptr; + if (fw->ver != 0x02) { + DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch," + "got version=%02x expected version=%02x\n", + fw->ver, 0x02); + ret = 1; + goto out; + } + + text_ptr = + (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw)); + data_ptr = text_ptr + fw->text_size; + + PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n"); + PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size); + PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size); + PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n", + fw->data_location); + PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n", + *text_ptr); + PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n", + *data_ptr); + + PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n"); + psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size, + PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, + text_ptr); + psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size, + fw->data_location - MTX_DATA_BASE, fw->data_size, + data_ptr); + +#if 0 + /* todo : Verify code upload possibly only in debug */ + ret = psb_verify_fw(dev_priv, ram_bank_size, + MTX_CORE_CODE_MEM, + PC_START_ADDRESS - MTX_CODE_BASE, + fw->text_size, text_ptr); + if (ret) { + /* Firmware code upload failed */ + ret = 1; + goto out; + } + + ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM, + fw->data_location - MTX_DATA_BASE, + fw->data_size, data_ptr); + if (ret) { + /* Firmware data upload failed */ + ret = 1; + goto out; + } +#else + (void)psb_verify_fw; +#endif + /* -- Set starting PC address */ + psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS); + + /* -- Turn on the thread */ + PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE); + + /* Wait for the signature value to be written back */ + ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE, + MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/ + 0xffffffff /* Enabled bits */); + if (ret) { + DRM_ERROR("MSVDX: firmware fails to initialize.\n"); + goto out; + } + + PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n"); + PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n", + MSVDX_COMMS_AREA_ADDR); +#if 0 + + /* Send test message */ + { + uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2]; + + MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE, + FW_VA_DEBUG_TEST2_SIZE); + MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID, + VA_MSGID_TEST2); + + ret = psb_mtx_send(dev_priv, msg_buf); + if (ret) { + DRM_ERROR("psb: MSVDX sending fails.\n"); + goto out; + } + + /* Wait for Mtx to ack this message */ + psb_poll_mtx_irq(dev_priv); + + } +#endif +out: + + return ret; +} + + +static void psb_free_ccb(struct ttm_buffer_object **ccb) +{ + ttm_bo_unref(ccb); + *ccb = NULL; +} + +/** + * Reset chip and disable interrupts. + * Return 0 success, 1 failure + */ +int psb_msvdx_reset(struct drm_psb_private *dev_priv) +{ + int ret = 0; + + /* Issue software reset */ + PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL); + + ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0, + MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK); + + if (!ret) { + /* Clear interrupt enabled flag */ + PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE); + + /* Clear any pending interrupt flags */ + PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR); + } + + /* mutex_destroy(&msvdx_priv->msvdx_mutex); */ + + return ret; +} + +static int psb_allocate_ccb(struct drm_device *dev, + struct ttm_buffer_object **ccb, + uint32_t *base_addr, int size) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + struct ttm_bo_device *bdev = &dev_priv->bdev; + int ret; + struct ttm_bo_kmap_obj tmp_kmap; + bool is_iomem; + + PSB_DEBUG_INIT("MSVDX: allocate CCB\n"); + + ret = ttm_buffer_object_create(bdev, size, + ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_KERNEL | + TTM_PL_FLAG_NO_EVICT, 0, 0, 0, + NULL, ccb); + if (ret) { + DRM_ERROR("MSVDX:failed to allocate CCB.\n"); + *ccb = NULL; + return 1; + } + + ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap); + if (ret) { + PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret); + ttm_bo_unref(ccb); + *ccb = NULL; + return 1; + } + + memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0, + RENDEC_A_SIZE); + ttm_bo_kunmap(&tmp_kmap); + + *base_addr = (*ccb)->offset; + return 0; +} + +static ssize_t psb_msvdx_pmstate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct drm_psb_private *dev_priv; + struct msvdx_private *msvdx_priv; + unsigned int pmstate; + unsigned long flags; + int ret = -EINVAL; + + if (drm_dev == NULL) + return 0; + + dev_priv = drm_dev->dev_private; + msvdx_priv = dev_priv->msvdx_private; + pmstate = msvdx_priv->pmstate; + + spin_lock_irqsave(&msvdx_priv->msvdx_lock, flags); + ret = sprintf(buf, "%s\n", + (pmstate == PSB_PMSTATE_POWERUP) ? "powerup" + : ((pmstate == PSB_PMSTATE_POWERDOWN) ? "powerdown" + : "clockgated")); + spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, flags); + + return ret; +} + +static DEVICE_ATTR(msvdx_pmstate, 0444, psb_msvdx_pmstate_show, NULL); + + +int psb_msvdx_init(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + /* uint32_t clk_gate_ctrl = clk_enable_all; */ + uint32_t cmd; + int ret; + struct msvdx_private *msvdx_priv; + + if (!dev_priv->msvdx_private) { + msvdx_priv = kmalloc(sizeof(struct msvdx_private), GFP_KERNEL); + if (msvdx_priv == NULL) + goto err_exit; + + dev_priv->msvdx_private = msvdx_priv; + memset(msvdx_priv, 0, sizeof(struct msvdx_private)); + + /* get device --> drm_device --> drm_psb_private --> msvdx_priv + * for psb_msvdx_pmstate_show: msvdx_pmpolicy + * if not pci_set_drvdata, can't get drm_device from device + */ + /* pci_set_drvdata(dev->pdev, dev); */ + if (device_create_file(&dev->pdev->dev, + &dev_attr_msvdx_pmstate)) + DRM_ERROR("MSVDX: could not create sysfs file\n"); + msvdx_priv->sysfs_pmstate = sysfs_get_dirent( + dev->pdev->dev.kobj.sd, "msvdx_pmstate"); + } + + msvdx_priv = dev_priv->msvdx_private; + if (!msvdx_priv->ccb0) { /* one for the first time */ + /* Initialize comand msvdx queueing */ + INIT_LIST_HEAD(&msvdx_priv->msvdx_queue); + mutex_init(&msvdx_priv->msvdx_mutex); + spin_lock_init(&msvdx_priv->msvdx_lock); + /*figure out the stepping */ + pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id); + } + + msvdx_priv->msvdx_busy = 0; + + /* Enable Clocks */ + PSB_DEBUG_GENERAL("Enabling clocks\n"); + PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); + + /* Enable MMU by removing all bypass bits */ + PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0); + + /* move firmware loading to the place receiving first command buffer */ + + PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n"); + /* Allocate device virtual memory as required by rendec.... */ + if (!msvdx_priv->ccb0) { + ret = psb_allocate_ccb(dev, &msvdx_priv->ccb0, + &msvdx_priv->base_addr0, + RENDEC_A_SIZE); + if (ret) + goto err_exit; + } + + if (!msvdx_priv->ccb1) { + ret = psb_allocate_ccb(dev, &msvdx_priv->ccb1, + &msvdx_priv->base_addr1, + RENDEC_B_SIZE); + if (ret) + goto err_exit; + } + + + PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n", + msvdx_priv->base_addr0, msvdx_priv->base_addr1); + + PSB_WMSVDX32(msvdx_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0); + PSB_WMSVDX32(msvdx_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1); + + cmd = 0; + REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE, + RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096); + REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE, + RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096); + PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE); + + cmd = 0; + REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, + RENDEC_DECODE_START_SIZE, 0); + REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, + RENDEC_BURST_SIZE_W, 1); + REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, + RENDEC_BURST_SIZE_R, 1); + REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, + RENDEC_EXTERNAL_MEMORY, 1); + PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1); + + cmd = 0x00101010; + PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0); + PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1); + PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2); + PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3); + PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4); + PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5); + + cmd = 0; + REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE, + 1); + PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0); + + PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); + PSB_DEBUG_INIT("MSVDX:defer firmware loading to the" + " place when receiving user space commands\n"); + + msvdx_priv->msvdx_fw_loaded = 0; /* need to load firware */ + + psb_msvdx_clearirq(dev); + psb_msvdx_enableirq(dev); + + if (IS_MRST(dev)) { + PSB_DEBUG_INIT("MSDVX:old clock gating disable = 0x%08x\n", + PSB_RVDC32(PSB_MSVDX_CLOCKGATING)); + PSB_DEBUG_INIT("MSVDX:rest MSDVX to disable clock gating\n"); + + PSB_WVDC32(0x000101ff, PSB_MSVDX_CLOCKGATING); + + PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n", + PSB_RVDC32(PSB_MSVDX_CLOCKGATING)); + } + +#if 0 + ret = psb_setup_fw(dev); + if (ret) + goto err_exit; + /* Send Initialisation message to firmware */ + if (0) { + uint32_t msg_init[FW_VA_INIT_SIZE >> 2]; + MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE, + FW_VA_INIT_SIZE); + MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT); + + /* Need to set this for all but A0 */ + MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD, + psb_get_default_pd_addr(dev_priv->mmu)); + + ret = psb_mtx_send(dev_priv, msg_init); + if (ret) + goto err_exit; + + psb_poll_mtx_irq(dev_priv); + } +#endif + + return 0; + +err_exit: + DRM_ERROR("MSVDX: initialization failed\n"); + if (msvdx_priv->ccb0) + psb_free_ccb(&msvdx_priv->ccb0); + if (msvdx_priv->ccb1) + psb_free_ccb(&msvdx_priv->ccb1); + kfree(dev_priv->msvdx_private); + + return 1; +} + +int psb_msvdx_uninit(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + /* Reset MSVDX chip */ + psb_msvdx_reset(dev_priv); + + /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */ + PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n"); + PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE); + + if (msvdx_priv->ccb0) + psb_free_ccb(&msvdx_priv->ccb0); + if (msvdx_priv->ccb1) + psb_free_ccb(&msvdx_priv->ccb1); + if (msvdx_priv->msvdx_fw) + kfree(msvdx_priv->msvdx_fw + ); + if (msvdx_priv) { + /* pci_set_drvdata(dev->pdev, NULL); */ + device_remove_file(&dev->pdev->dev, &dev_attr_msvdx_pmstate); + sysfs_put(msvdx_priv->sysfs_pmstate); + msvdx_priv->sysfs_pmstate = NULL; + + kfree(msvdx_priv); + dev_priv->msvdx_private = NULL; + } + + return 0; +} diff --git a/drivers/gpu/drm/psb/psb_powermgmt.c b/drivers/gpu/drm/psb/psb_powermgmt.c new file mode 100644 index 0000000..c59a701 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_powermgmt.c @@ -0,0 +1,1146 @@ +/************************************************************************** + * Copyright (c) 2009, Intel Corporation. + * All Rights Reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Benjamin Defnet + * + */ +#include "psb_powermgmt.h" +#include "psb_drv.h" +#include "psb_intel_reg.h" +#include "psb_scene.h" +#include "lnc_topaz.h" +#include "psb_msvdx.h" + +#include + +static struct mutex g_state_change_mutex; +static int g_hw_power_status_mask; +static int g_pci_power_status; +static atomic_t g_display_access_count; +static atomic_t g_graphics_access_count; +static atomic_t g_videoenc_access_count; +static atomic_t g_videodec_access_count; +static bool g_suspend_in_progress; +static bool g_resume_in_progress; +static int g_suspend_mask; +static int g_resume_mask; +static bool g_forcing_resume; +static atomic_t g_pm_waiters; + +/*#define PWRMGMT_DEBUG*/ +#ifdef PWRMGMT_DEBUG + #define PWR_PRINT(_fmt, _arg...) \ + printk(KERN_INFO _fmt, ##_arg) +#else + #define PWR_PRINT(_fmt, _arg...) {} +#endif + +/* + * powermgmt_init + * + * Description: Initialize this power management module + */ +void powermgmt_init(void) +{ + mutex_init(&g_state_change_mutex); + g_hw_power_status_mask = PSB_ALL_ISLANDS; + g_pci_power_status = 1; + atomic_set(&g_display_access_count, 0); + atomic_set(&g_graphics_access_count, 0); + atomic_set(&g_videoenc_access_count, 0); + atomic_set(&g_videodec_access_count, 0); + atomic_set(&g_pm_waiters, 0); +} + +/* + * powermgmt_shutdown + * + * Description: Shut down this power management module + */ +void powermgmt_shutdown(void) +{ + mutex_destroy(&g_state_change_mutex); +} + +/* + * powermgmt_down_island_power + * + * Description: Cut power to the specified island (powergating) + */ +void powermgmt_down_island_power(struct drm_device *dev, int islands) +{ + u32 pwr_cnt = 0; + u32 pwr_mask = 0; + u32 pwr_sts; + + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + PWR_PRINT("BEN_KERNEL_OSPM************DOWN ISLAND POWER %d\n", islands); + + if (!IS_MRST(dev)) { + g_hw_power_status_mask &= ~islands; + return; + } + + g_hw_power_status_mask &= ~islands; + + if (islands & PSB_GRAPHICS_ISLAND) { + pwr_cnt |= PSB_PWRGT_GFX_MASK; + pwr_mask |= PSB_PWRGT_GFX_MASK; + } + if (islands & PSB_VIDEO_ENC_ISLAND) { + pwr_cnt |= PSB_PWRGT_VID_ENC_MASK; + pwr_mask |= PSB_PWRGT_VID_ENC_MASK; + } + if (islands & PSB_VIDEO_DEC_ISLAND) { + pwr_cnt |= PSB_PWRGT_VID_DEC_MASK; + pwr_mask |= PSB_PWRGT_VID_DEC_MASK; + } + if (pwr_cnt) { + pwr_cnt |= inl(dev_priv->apm_base); + outl(pwr_cnt, dev_priv->apm_base); + while (true) { + pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); + if ((pwr_sts & pwr_mask) == pwr_mask) + break; + else + udelay(10); + } + } + + if (islands & PSB_DISPLAY_ISLAND) { + pwr_mask = PSB_PWRGT_DISPLAY_MASK; + outl(PSB_PWRGT_DISPLAY_MASK, (dev_priv->ospm_base + PSB_PM_SSC)); + while (true) { + pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS); + if ((pwr_sts & pwr_mask) == pwr_mask) + break; + else + udelay(10); + } + } +} + +/* + * powermgmt_up_island_power + * + * Description: Restore power to the specified island (powergating) + */ +void powermgmt_up_island_power(struct drm_device *dev, int islands) +{ + u32 pwr_cnt; + u32 pwr_sts; + u32 pwr_mask; + u32 count; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + PWR_PRINT("BEN_KERNEL_OSPM************UP ISLAND POWER %d\n", islands); + + if (!IS_MRST(dev)) { + g_hw_power_status_mask |= islands; + return; + } + + if (islands & (PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND | + PSB_VIDEO_DEC_ISLAND)) { + pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD); + pwr_mask = 0; + if (islands & PSB_GRAPHICS_ISLAND) { + pwr_cnt &= ~PSB_PWRGT_GFX_MASK; + pwr_mask |= PSB_PWRGT_GFX_MASK; + } + if (islands & PSB_VIDEO_ENC_ISLAND) { + pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK; + pwr_mask |= PSB_PWRGT_VID_ENC_MASK; + } + if (islands & PSB_VIDEO_DEC_ISLAND) { + pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK; + pwr_mask |= PSB_PWRGT_VID_DEC_MASK; + } + + if (pwr_mask) { + count = 5; + pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); + outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); + while (true) { + pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); + if ((pwr_sts & pwr_mask) == 0) + break; + else + udelay(10); + } + } + } + + if (islands & PSB_DISPLAY_ISLAND) { + count = 5; + pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC); + pwr_cnt &= ~PSB_PWRGT_DISPLAY_MASK; + pwr_mask = PSB_PWRGT_DISPLAY_MASK; + outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC)); + while (true) { + pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS); + if ((pwr_sts & pwr_mask) == 0) + break; + else + udelay(10); + } + } + + g_hw_power_status_mask |= islands; +} + +/* + * save_display_registers + * + * Description: We are going to suspend so save current display + * register state. + */ +static int save_display_registers(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct drm_crtc * crtc; + struct drm_connector * connector; + int i; + + /* Display arbitration control + watermarks */ + dev_priv->saveDSPARB = PSB_RVDC32(DSPARB); + dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1); + dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2); + dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3); + dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4); + dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5); + dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6); + dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT); + + if (IS_MRST(dev)) { + /* Pipe & plane A info */ + dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF); + dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC); + dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0); + dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1); + dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A); + dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A); + dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A); + dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A); + dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A); + dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A); + dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A); + dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A); + dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR); + dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE); + dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE); + dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF); + dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF); + dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF); + + /*save cursor regs*/ + dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR); + dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE); + dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS); + + /*save palette (gamma) */ + for (i = 0; i < 256; i++) + dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i<<2)); + + /*save performance state*/ + dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE); + + /* LVDS state */ + dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL); + dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS); + dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS); + dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL); + dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2); + dev_priv->saveLVDS = PSB_RVDC32(LVDS); + dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL); + dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON); + dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF); + dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE); + + /* HW overlay */ + dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD); + dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0); + dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1); + dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2); + dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3); + dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4); + dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5); + + } else { /*PSB*/ + /*save crtc and output state*/ + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if(drm_helper_crtc_in_use(crtc)) { + crtc->funcs->save(crtc); + } + } + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + connector->funcs->save(connector); + } + mutex_unlock(&dev->mode_config.mutex); + } + + /* Interrupt state */ + /* + * Handled in psb_irq.c + */ + + return 0; +} + +/* + * restore_display_registers + * + * Description: We are going to resume so restore display register state. + */ +static int restore_display_registers(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct drm_crtc * crtc; + struct drm_connector * connector; + unsigned long i, pp_stat; + + /* Display arbitration + watermarks */ + PSB_WVDC32(dev_priv->saveDSPARB, DSPARB); + PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1); + PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2); + PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3); + PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4); + PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5); + PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6); + PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT); + + /*make sure VGA plane is off. it initializes to on after reset!*/ + PSB_WVDC32(0x80000000, VGACNTRL); + + if (IS_MRST(dev)) { + /* set the plls */ + PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0); + PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1); + /* Actually enable it */ + PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A); + DRM_UDELAY(150); + + /* Restore mode */ + PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A); + PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A); + PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A); + PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A); + PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A); + PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A); + PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC); + PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A); + + /*restore performance mode*/ + PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE); + + /*enable the pipe*/ + PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF); + + /*set up the plane*/ + PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF); + PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE); + PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF); + + /* Enable the plane */ + PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR); + PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF); + + /*Enable Cursor A*/ + PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR); + PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS); + PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE); + + /* restore palette (gamma) */ + /*DRM_UDELAY(50000); */ + for (i = 0; i < 256; i++) + PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i<<2)); + + PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2); + PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/ + PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL); + PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS); + PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS); + PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL); + PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON); + PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF); + PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE); + PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL); + + /*wait for cycle delay*/ + do { + pp_stat = PSB_RVDC32(PP_STATUS); + } while (pp_stat & 0x08000000); + + DRM_UDELAY(999); + /*wait for panel power up*/ + do { + pp_stat = PSB_RVDC32(PP_STATUS); + } while (pp_stat & 0x10000000); + + /* restore HW overlay */ + PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD); + PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0); + PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1); + PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2); + PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3); + PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4); + PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5); + + } else { /*PSB*/ + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if(drm_helper_crtc_in_use(crtc)) + crtc->funcs->restore(crtc); + } + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + connector->funcs->restore(connector); + } + mutex_unlock(&dev->mode_config.mutex); + } + + + /*Interrupt state*/ + /* + * Handled in psb_irq.c + */ + + return 0; +} + +/* + * powermgmt_suspend_graphics + * + * Description: Suspend the graphics hardware saving state and disabling + * as necessary. + */ +static void powermgmt_suspend_graphics(struct drm_device *dev, bool b_initiated_by_ospm) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + + if (!(g_hw_power_status_mask & PSB_GRAPHICS_ISLAND)) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics\n"); + + dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL); + if (b_initiated_by_ospm) { + int ret = -EBUSY; + ret = psb_idle_3d(dev); + if (ret == -EBUSY) + { + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics ***3d BUSY!!!!!!\n"); + return; + } + + ret = psb_idle_2d(dev); + if (ret == -EBUSY) + { + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_graphics ***2d BUSY!!!!!!\n"); + return; + } + if (IS_POULSBO(dev)) + flush_scheduled_work(); + } + psb_irq_uninstall_islands(dev, PSB_GRAPHICS_ISLAND); + powermgmt_down_island_power(dev, PSB_GRAPHICS_ISLAND); +} + +/* + * powermgmt_resume_graphics + * + * Description: Resume the graphics hardware restoring state and enabling + * as necessary. + */ +static void powermgmt_resume_graphics(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + + if (g_hw_power_status_mask & PSB_GRAPHICS_ISLAND) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_graphics\n"); + + INIT_LIST_HEAD(&dev_priv->resume_buf.head); + + powermgmt_up_island_power(dev, PSB_GRAPHICS_ISLAND); + + /* + * The SGX loses it's register contents. + * Restore BIF registers. The MMU page tables are + * "normal" pages, so their contents should be kept. + */ + PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL); + PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); + PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); + PSB_RSGX32(PSB_CR_BIF_BANK1); + + psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); + psb_mmu_set_pd_context(dev_priv->pf_pd, 1); + psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK); + + if (IS_POULSBO(dev)) + psb_reset(dev_priv, 1); + + dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start; + PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE); + (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); + + /* + * Persistant 3D base registers and USSE base registers.. + */ + + PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE); + PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE); + + /* + * Now, re-initialize the 3D engine. + */ + + if (dev_priv->xhw_on) + psb_xhw_resume(dev_priv, &dev_priv->resume_buf); + + psb_scheduler_ta_mem_check(dev_priv); + if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) { + psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf, + PSB_TA_MEM_FLAG_TA | + PSB_TA_MEM_FLAG_RASTER | + PSB_TA_MEM_FLAG_HOSTA | + PSB_TA_MEM_FLAG_HOSTD | + PSB_TA_MEM_FLAG_INIT, + dev_priv->ta_mem->ta_memory->offset, + dev_priv->ta_mem->hw_data->offset, + dev_priv->ta_mem->hw_cookie); + } +} + +/* + * powermgmt_suspend_videodec + * + * Description: Suspend the video decode hardware saving state and disabling + * as necessary. + */ +static void powermgmt_suspend_videodec(struct drm_device *dev, bool b_initiated_by_ospm) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + if (!(g_hw_power_status_mask & PSB_VIDEO_DEC_ISLAND)) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_videodec\n"); + + if (b_initiated_by_ospm) + psb_wait_msvdx_idle(dev); + else { + /* return without power off for D0i3/APM */ + if (psb_check_msvdx_idle(dev)) + return; + } + + psb_irq_uninstall_islands(dev, PSB_VIDEO_DEC_ISLAND); + /* UGLY ... expose internal structure.. + * it should be a function of save_context + * but there is no need for restore_context... + * replace it with a function? + */ + msvdx_priv->msvdx_needs_reset = 1; + powermgmt_down_island_power(dev, PSB_VIDEO_DEC_ISLAND); + + MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERDOWN); +} + +/* + * powermgmt_resume_videodec + * + * Description: Resume the video decode hardware restoring state and enabling + * as necessary. + */ +static void powermgmt_resume_videodec(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + if (g_hw_power_status_mask & PSB_VIDEO_DEC_ISLAND) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_videodec\n"); + + powermgmt_up_island_power(dev, PSB_VIDEO_DEC_ISLAND); + MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERUP); +} + +/* + * powermgmt_suspend_videoenc + * + * Description: Suspend the video encode hardware saving state and disabling + * as necessary. + */ +static void powermgmt_suspend_videoenc(struct drm_device *dev, bool b_initiated_by_ospm) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + if (!(g_hw_power_status_mask & PSB_VIDEO_ENC_ISLAND)) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_videoenc\n"); + + if (b_initiated_by_ospm) + lnc_wait_topaz_idle(dev); + else { + /* return without power off for D0i3/APM */ + if (lnc_check_topaz_idle(dev)) + return; + } + + psb_irq_uninstall_islands(dev, PSB_VIDEO_ENC_ISLAND); + lnc_topaz_save_mtx_state(dev); + powermgmt_down_island_power(dev, PSB_VIDEO_ENC_ISLAND); + + TOPAZ_NEW_PMSTATE(dev, topaz_priv, PSB_PMSTATE_POWERDOWN); +} + +/* + * powermgmt_resume_videoenc + * + * Description: Resume the video encode hardware restoring state and enabling + * as necessary. + */ +static void powermgmt_resume_videoenc(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)dev->dev_private; + struct topaz_private *topaz_priv = dev_priv->topaz_private; + + if (g_hw_power_status_mask & PSB_VIDEO_ENC_ISLAND) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_videoenc\n"); + + powermgmt_up_island_power(dev, PSB_VIDEO_ENC_ISLAND); + lnc_topaz_restore_mtx_state(dev); + + TOPAZ_NEW_PMSTATE(dev, topaz_priv, PSB_PMSTATE_POWERUP); +} + +/* + * powermgmt_suspend_display + * + * Description: Suspend the display hardware saving state and disabling + * as necessary. + */ +static void powermgmt_suspend_display(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + int pp_stat, jb; + + if (!(g_hw_power_status_mask & PSB_DISPLAY_ISLAND)) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_display\n"); + + save_display_registers(dev); + + /*shutdown the panel*/ + PSB_WVDC32(0, PP_CONTROL); + + do { + pp_stat = PSB_RVDC32(PP_STATUS); + } while (pp_stat & 0x80000000); + + /*turn off the plane*/ + PSB_WVDC32(0x58000000, DSPACNTR); + PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/ + jb = jiffies + 4; /*wait 4 ticks*/ + while (jiffies < jb) + schedule(); + + /*turn off pipe*/ + PSB_WVDC32(0x0, PIPEACONF); + jb = jiffies + 8; /*wait 8 ticks*/ + while (jiffies < jb) + schedule(); + + /*turn off PLLs*/ + PSB_WVDC32(0, MRST_DPLL_A); + + powermgmt_down_island_power(dev, PSB_DISPLAY_ISLAND); +} + +/* + * powermgmt_resume_display + * + * Description: Resume the display hardware restoring state and enabling + * as necessary. + */ +static void powermgmt_resume_display(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_gtt *pg = dev_priv->pg; + + if (g_hw_power_status_mask & PSB_DISPLAY_ISLAND) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_display\n"); + + /* turn on the display power island */ + powermgmt_up_island_power(dev, PSB_DISPLAY_ISLAND); + + PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); + pci_write_config_word(pdev, PSB_GMCH_CTRL, + pg->gmch_ctrl | _PSB_GMCH_ENABLED); + + /* Don't reinitialize the GTT as it is unnecessary. The gtt is + * stored in memory so it will automatically be restored. All + * we need to do is restore the PGETBL_CTL which we already do + * above. + */ + /*psb_gtt_init(dev_priv->pg, 1);*/ + + restore_display_registers(dev); +} + +/* + * powermgmt_suspend_pci + * + * Description: Suspend the pci device saving state and disabling + * as necessary. + */ +static void powermgmt_suspend_pci(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); + int bsm, vbt; + + if (!g_pci_power_status) + return; + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend_pci\n"); + + pci_save_state(pdev); + pci_read_config_dword(pci_gfx_root, 0x5C, &bsm); + dev_priv->saveBSM = bsm; + pci_read_config_dword(pci_gfx_root, 0xFC, &vbt); + dev_priv->saveVBT = vbt; + pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr); + pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data); + + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + g_pci_power_status = 0; +} + +/* + * powermgmt_resume_pci + * + * Description: Resume the pci device restoring state and enabling + * as necessary. + */ +static int powermgmt_resume_pci(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); + int ret = 0; + + if (g_pci_power_status) + return ret; + + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_pci\n"); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_write_config_dword(pci_gfx_root, 0x5c, dev_priv->saveBSM); + pci_write_config_dword(pci_gfx_root, 0xFC, dev_priv->saveVBT); + /* retoring MSI address and data in PCIx space */ + pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr); + pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data); + ret = pci_enable_device(pdev); + + g_pci_power_status = 1; + + return ret; +} + +/* + * powermgmt_suspend + * + * Description: OSPM is telling our driver to suspend to save state + * and power down all hardware. + */ +int powermgmt_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int ret; + ret = powermgmt_suspend_islands(pdev, PSB_ALL_ISLANDS, true); + if (ret == -EBUSY) + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_suspend***BUSY!!!!!!\n"); + + return ret; +} + +/* + * powermgmt_suspend_islands + * + * Description: Suspend the specified island by saving state + * and power down the hardware. + */ +int powermgmt_suspend_islands(struct pci_dev *pdev, int hw_islands, bool b_initiated_by_ospm) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + int ret = 0; + + if (in_interrupt()) { + /* + * If an interrupt handler calls powermgmt_suspend_islands(), we can't call mutex_lock. + * Right now, only video enc/dec calls us from interrupt handler. Should be safe to + * just proceed since the only code that resumes video enc/dec is internal to our driver + * and should be written in such a way that shouldn't cause any issues. If we are already + * in the middle of an OSPM initiated suspend, then just return since that will take care + * of powering off video enc/dec for us. Also, don't set g_suspend_mask and + * g_suspend_in_progress since this function will be atomic since we are in an + * interrupt handler and thus no outside parties will get the chance to care and we + * don't want to overright any pending suspend operations that go interrupted. + */ + if (b_initiated_by_ospm) + return ret; + } + else { + mutex_lock(&g_state_change_mutex); + + g_suspend_mask = hw_islands; + g_suspend_in_progress = true; + } + atomic_inc(&g_pm_waiters); + + if (g_hw_power_status_mask & PSB_GRAPHICS_ISLAND) { + if (atomic_read(&g_graphics_access_count)) + ret = -EBUSY; + if ((PSB_RSGX32(PSB_CR_2D_SOCIF) != + _PSB_C2_SOCIF_EMPTY) || + ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & + _PSB_C2B_STATUS_BUSY) != 0)) { + ret = -EBUSY; + } + spin_lock_irqsave(&scheduler->lock, irq_flags); + if (!scheduler->idle || + !list_empty(&scheduler->raster_queue) || + !list_empty(&scheduler->ta_queue) || + !list_empty(&scheduler->hp_raster_queue) || + scheduler->feedback_task) { + ret = -EBUSY; + } + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + } + if ((hw_islands & PSB_VIDEO_DEC_ISLAND) && + atomic_read(&g_videodec_access_count)) + ret = -EBUSY; + if ((hw_islands & PSB_VIDEO_ENC_ISLAND) && + atomic_read(&g_videoenc_access_count)) + ret = -EBUSY; + if ((hw_islands & PSB_DISPLAY_ISLAND) && + atomic_read(&g_display_access_count)) + ret = -EBUSY; + + atomic_dec(&g_pm_waiters); + + if (!ret) { + /*disable gfx interupt later when sgx is idle*/ + psb_irq_uninstall_islands(dev, hw_islands & ~PSB_GRAPHICS_ISLAND & + ~PSB_VIDEO_ENC_ISLAND & ~PSB_VIDEO_DEC_ISLAND); + + if (hw_islands & PSB_VIDEO_DEC_ISLAND) + powermgmt_suspend_videodec(dev, b_initiated_by_ospm); + if(IS_MRST(dev)) { + if (hw_islands & PSB_VIDEO_ENC_ISLAND) + powermgmt_suspend_videoenc(dev, b_initiated_by_ospm); + } + if (hw_islands & PSB_GRAPHICS_ISLAND) + powermgmt_suspend_graphics(dev, b_initiated_by_ospm); + if (hw_islands & PSB_DISPLAY_ISLAND) + powermgmt_suspend_display(dev); + if (g_hw_power_status_mask == 0) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_irq_uninstall(dev); + powermgmt_suspend_pci(pdev); + } + } + +#ifdef OSPM_STAT + if (hw_islands & PSB_GRAPHICS_ISLAND) { + bool b_change = true; + if (dev_priv->graphics_state == PSB_PWR_STATE_D0) + dev_priv->gfx_d0_time += jiffies - dev_priv->gfx_last_mode_change; + else if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3) + dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change; + else + b_change = false; + if (b_change) { + dev_priv->gfx_last_mode_change = jiffies; + if (g_hw_power_status_mask & PSB_DISPLAY_ISLAND) { + dev_priv->graphics_state = PSB_PWR_STATE_D0i3; + dev_priv->gfx_d0i3_cnt++; + } else { + dev_priv->graphics_state = PSB_PWR_STATE_D3; + dev_priv->gfx_d3_cnt++; + } + } + } +#endif + + if (!in_interrupt()) { + g_suspend_in_progress = false; + mutex_unlock(&g_state_change_mutex); + } + + return ret; +} + +/* + * powermgmt_resume + * + * Description: OSPM is telling our driver to resume so restore state + * and power up display. Leave graphics and video powered off as they + * will be powered up once needed. + */ +int powermgmt_resume(struct pci_dev *pdev) +{ + return 0; + //return powermgmt_resume_islands(pdev, PSB_DISPLAY_ISLAND); +} + +/* + * powermgmt_resume_islands + * + * Description: Resume the specified islands by restoring state + * and power things up. + */ +int powermgmt_resume_islands(struct pci_dev *pdev, int hw_islands) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; + bool b_install_irq = false; + int ret = 0; + + if (!g_forcing_resume) + mutex_lock(&g_state_change_mutex); + + g_resume_mask = hw_islands; + g_resume_in_progress = true; + + PWR_PRINT("BEN_KERNEL_OSPM************powermgmt_resume_islands\n"); + + if (g_hw_power_status_mask == 0) { + if (powermgmt_resume_pci(pdev)) + goto resume_exit; + b_install_irq = drm_core_check_feature(dev, DRIVER_MODESET); + } + + if (hw_islands & PSB_DISPLAY_ISLAND) + powermgmt_resume_display(pdev); + if (IS_MRST(dev)) { + if (hw_islands & PSB_VIDEO_ENC_ISLAND) + powermgmt_resume_videoenc(dev); + } + if (hw_islands & PSB_VIDEO_DEC_ISLAND) + powermgmt_resume_videodec(dev); + if (hw_islands & PSB_GRAPHICS_ISLAND) + powermgmt_resume_graphics(dev); + if (b_install_irq) + drm_irq_install(dev); + else { + psb_irq_preinstall_islands(dev, hw_islands); + psb_irq_postinstall_islands(dev, hw_islands); + } + +#ifdef OSPM_STAT + if (hw_islands & PSB_GRAPHICS_ISLAND) { + bool b_change = true; + if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3) + dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change; + else if (dev_priv->graphics_state == PSB_PWR_STATE_D3) + dev_priv->gfx_d3_time += jiffies - dev_priv->gfx_last_mode_change; + else + b_change = false; + + if (b_change) { + dev_priv->gfx_last_mode_change = jiffies; + dev_priv->graphics_state = PSB_PWR_STATE_D0; + dev_priv->gfx_d0_cnt++; + } + } +#endif + + g_resume_in_progress = false; + +resume_exit: + if (!g_forcing_resume) + mutex_unlock(&g_state_change_mutex); + return ret; +} + +/* + * powermgmt_using_hw_begin + * + * Description: Notify PowerMgmt module that you will be accessing the + * specified islands' hw so don't power it off. If force_on is true, + * this will power on any of the specified islands which are off. + * Otherwise, this will return false and the caller is expected to not + * access the hw. + * + * NOTE *** If this is called from and interrupt handler or other atomic + * context, then it will return false if we are in the middle of a + * power state transition and the caller will be expected to handle that + * even if force_on is set to true. + */ +bool powermgmt_using_hw_begin(struct pci_dev *pdev, int hw_islands, bool force_on) +{ + bool ret = true; + int off_islands = 0; + bool b_atomic = (in_interrupt() || in_atomic()); + + if (!b_atomic) + mutex_lock(&g_state_change_mutex); + + if (b_atomic && + (powermgmt_is_suspend_in_progress(hw_islands) || + powermgmt_is_resume_in_progress(hw_islands))) { + if (force_on) + printk(KERN_WARNING "!!!WARNING!!! powermgmt_using_hw_begin - force_on failed - be sure to check return value !!!WARNING!!!\n"); + ret = false; + } else { + off_islands = hw_islands & (PSB_ALL_ISLANDS & ~g_hw_power_status_mask); + if (off_islands) { + if (force_on) { + g_forcing_resume = true; + powermgmt_resume_islands(pdev, off_islands); + g_forcing_resume = false; + } else { + ret = false; + } + } + } + + if (ret) { + if (hw_islands & PSB_GRAPHICS_ISLAND) + atomic_inc(&g_graphics_access_count); + if (hw_islands & PSB_VIDEO_ENC_ISLAND) + atomic_inc(&g_videoenc_access_count); + if (hw_islands & PSB_VIDEO_DEC_ISLAND) + atomic_inc(&g_videodec_access_count); + if (hw_islands & PSB_DISPLAY_ISLAND) + atomic_inc(&g_display_access_count); + } + + if (!b_atomic) + mutex_unlock(&g_state_change_mutex); + + return ret; +} + +/* + * powermgmt_using_hw_end + * + * Description: Notify PowerMgmt module that you are done accessing the + * specified islands' hw so feel free to power it off. Note that this + * function doesn't actually power off the islands. The caller should + * call psb_suspend(hw_islands) if it wishes to proactively power them + * down. + */ +void powermgmt_using_hw_end(int hw_islands) +{ + if (hw_islands & PSB_GRAPHICS_ISLAND) { + atomic_dec(&g_graphics_access_count); + } + if (hw_islands & PSB_VIDEO_ENC_ISLAND){ + atomic_dec(&g_videoenc_access_count); + } + if (hw_islands & PSB_VIDEO_DEC_ISLAND){ + atomic_dec(&g_videodec_access_count); + } + if (hw_islands & PSB_DISPLAY_ISLAND){ + atomic_dec(&g_display_access_count); + } + + if(!atomic_read(&g_graphics_access_count) && + !atomic_read(&g_videoenc_access_count) && + !atomic_read(&g_videodec_access_count) && + !atomic_read(&g_display_access_count) && + atomic_read(&g_pm_waiters)) + + WARN_ON(atomic_read(&g_graphics_access_count) < 0); + WARN_ON(atomic_read(&g_videoenc_access_count) < 0); + WARN_ON(atomic_read(&g_videodec_access_count) < 0); + WARN_ON(atomic_read(&g_display_access_count) < 0); +} + +/* + * powermgmt_is_hw_on + * + * Description: do an instantaneous check for if the specified islands + * are on. Only use this in cases where you know the g_state_change_mutex + * is already held such as in irq install/uninstall. Otherwise, use + * powermgmt_usinghw_begin(). + */ +bool powermgmt_is_hw_on(struct pci_dev *pdev, int hw_islands) +{ + return ((g_hw_power_status_mask & hw_islands) == hw_islands); +} + +/* + * powermgmt_is_suspend_in_progress + * + * Description: Are we in the middle of suspending any of the + * specified hardware? + */ +bool powermgmt_is_suspend_in_progress(int hw_islands) +{ + return (g_suspend_in_progress) ? ((g_suspend_mask & hw_islands) ? true : false) : false; +} + +/* + * powermgmt_is_resume_in_progress + * + * Description: Are we in the middle of resuming any of the + * specified hardware? + */ +bool powermgmt_is_resume_in_progress(int hw_islands) +{ + return (g_resume_in_progress) ? ((g_resume_mask & hw_islands) ? true : false) : false; +} +/* + * powermgmt_is_gfx_busy + * + * Description: Is someone useing GFX HW currently? + * + */ +bool powermgmt_is_gfx_busy() +{ + return (atomic_read(&g_graphics_access_count) ? true : false); +} diff --git a/drivers/gpu/drm/psb/psb_powermgmt.h b/drivers/gpu/drm/psb/psb_powermgmt.h new file mode 100644 index 0000000..5b40495 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_powermgmt.h @@ -0,0 +1,73 @@ +/************************************************************************** + * Copyright (c) 2009, Intel Corporation. + * All Rights Reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Benjamin Defnet + * + */ +#ifndef _PSB_POWERMGMT_H_ +#define _PSB_POWERMGMT_H_ + +#include + +#define PSB_GRAPHICS_ISLAND 0x1 +#define PSB_VIDEO_ENC_ISLAND 0x2 +#define PSB_VIDEO_DEC_ISLAND 0x4 +#define PSB_DISPLAY_ISLAND 0x8 +#define PSB_ALL_ISLANDS 0xf + +void powermgmt_init(void); +void powermgmt_shutdown(void); + +/* + * OSPM will call these functions + */ +int powermgmt_suspend(struct pci_dev *pdev, pm_message_t state); +int powermgmt_resume(struct pci_dev *pdev); + +/* + * These are the functions the driver should call to do internally driven + * power gating (D0i3) + */ +int powermgmt_suspend_islands(struct pci_dev *pdev, int hw_islands, bool b_initiated_by_ospm); +int powermgmt_resume_islands(struct pci_dev *pdev, int hw_islands); + +/* + * These are the functions the driver should use to wrap all hw access + * (i.e. register reads and writes) + */ +bool powermgmt_using_hw_begin(struct pci_dev *pdev, int hw_islands, bool force_on); +void powermgmt_using_hw_end(int hw_islands); + +/* + * Use this function to do an instantaneous check for if the hw is on. + * Only use this in cases where you know the g_state_change_mutex + * is already held such as in irq install/uninstall and you need to + * prevent a deadlock situation. Otherwise use powermgmt_using_hw_begin(). + */ +bool powermgmt_is_hw_on(struct pci_dev *pdev, int hw_islands); + +bool powermgmt_is_suspend_in_progress(int hw_islands); +bool powermgmt_is_resume_in_progress(int hw_islands); +bool powermgmt_is_gfx_busy(void); +#endif /*_PSB_POWERMGMT_H_*/ diff --git a/drivers/gpu/drm/psb/psb_reg.h b/drivers/gpu/drm/psb/psb_reg.h new file mode 100644 index 0000000..4974689 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_reg.h @@ -0,0 +1,574 @@ +/************************************************************************** + * + * Copyright (c) (2005-2007) Imagination Technologies Limited. + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + */ +#ifndef _PSB_REG_H_ +#define _PSB_REG_H_ + +#define PSB_CR_CLKGATECTL 0x0000 +#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24) +#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20) +#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20) +#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16) +#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16) +#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12) +#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12) +#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8) +#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8) +#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4) +#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4) +#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0) +#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0) +#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0) +#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1) +#define _PSB_C_CLKGATECTL_CLKG_AUTO (2) + +#define PSB_CR_CORE_ID 0x0010 +#define _PSB_CC_ID_ID_SHIFT (16) +#define _PSB_CC_ID_ID_MASK (0xFFFF << 16) +#define _PSB_CC_ID_CONFIG_SHIFT (0) +#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0) + +#define PSB_CR_CORE_REVISION 0x0014 +#define _PSB_CC_REVISION_DESIGNER_SHIFT (24) +#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24) +#define _PSB_CC_REVISION_MAJOR_SHIFT (16) +#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16) +#define _PSB_CC_REVISION_MINOR_SHIFT (8) +#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8) +#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0) +#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0) + +#define PSB_CR_DESIGNER_REV_FIELD1 0x0018 + +#define PSB_CR_SOFT_RESET 0x0080 +#define _PSB_CS_RESET_TSP_RESET (1 << 6) +#define _PSB_CS_RESET_ISP_RESET (1 << 5) +#define _PSB_CS_RESET_USE_RESET (1 << 4) +#define _PSB_CS_RESET_TA_RESET (1 << 3) +#define _PSB_CS_RESET_DPM_RESET (1 << 2) +#define _PSB_CS_RESET_TWOD_RESET (1 << 1) +#define _PSB_CS_RESET_BIF_RESET (1 << 0) + +#define PSB_CR_DESIGNER_REV_FIELD2 0x001C + +#define PSB_CR_EVENT_HOST_ENABLE2 0x0110 + +#define PSB_CR_EVENT_STATUS2 0x0118 + +#define PSB_CR_EVENT_HOST_CLEAR2 0x0114 +#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4) + +#define PSB_CR_EVENT_STATUS 0x012C + +#define PSB_CR_EVENT_HOST_ENABLE 0x0130 + +#define PSB_CR_EVENT_HOST_CLEAR 0x0134 +#define _PSB_CE_MASTER_INTERRUPT (1 << 31) +#define _PSB_CE_TA_DPM_FAULT (1 << 28) +#define _PSB_CE_TWOD_COMPLETE (1 << 27) +#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25) +#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24) +#define _PSB_CE_PIXELBE_END_RENDER (1 << 18) +#define _PSB_CE_SW_EVENT (1 << 14) +#define _PSB_CE_TA_FINISHED (1 << 13) +#define _PSB_CE_TA_TERMINATE (1 << 12) +#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3) +#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2) +#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1) +#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0) + + +#define PSB_USE_OFFSET_MASK 0x0007FFFF +#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1) +#define PSB_CR_USE_CODE_BASE0 0x0A0C +#define PSB_CR_USE_CODE_BASE1 0x0A10 +#define PSB_CR_USE_CODE_BASE2 0x0A14 +#define PSB_CR_USE_CODE_BASE3 0x0A18 +#define PSB_CR_USE_CODE_BASE4 0x0A1C +#define PSB_CR_USE_CODE_BASE5 0x0A20 +#define PSB_CR_USE_CODE_BASE6 0x0A24 +#define PSB_CR_USE_CODE_BASE7 0x0A28 +#define PSB_CR_USE_CODE_BASE8 0x0A2C +#define PSB_CR_USE_CODE_BASE9 0x0A30 +#define PSB_CR_USE_CODE_BASE10 0x0A34 +#define PSB_CR_USE_CODE_BASE11 0x0A38 +#define PSB_CR_USE_CODE_BASE12 0x0A3C +#define PSB_CR_USE_CODE_BASE13 0x0A40 +#define PSB_CR_USE_CODE_BASE14 0x0A44 +#define PSB_CR_USE_CODE_BASE15 0x0A48 +#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2)) +#define _PSB_CUC_BASE_DM_SHIFT (25) +#define _PSB_CUC_BASE_DM_MASK (0x3 << 25) +#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */ +#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7) +#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0) +#define _PSB_CUC_DM_VERTEX (0) +#define _PSB_CUC_DM_PIXEL (1) +#define _PSB_CUC_DM_RESERVED (2) +#define _PSB_CUC_DM_EDM (3) + +#define PSB_CR_PDS_EXEC_BASE 0x0AB8 +#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */ +#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20) + +#define PSB_CR_EVENT_KICKER 0x0AC4 +#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */ + +#define PSB_CR_EVENT_KICK 0x0AC8 +#define _PSB_CE_KICK_NOW (1 << 0) + + +#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38 + +#define PSB_CR_BIF_CTRL 0x0C00 +#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4) +#define _PSB_CB_CTRL_INVALDC (1 << 3) +#define _PSB_CB_CTRL_FLUSH (1 << 2) + +#define PSB_CR_BIF_INT_STAT 0x0C04 + +#define PSB_CR_BIF_FAULT 0x0C08 +#define _PSB_CBI_STAT_PF_N_RW (1 << 14) +#define _PSB_CBI_STAT_FAULT_SHIFT (0) +#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0) +#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1) +#define _PSB_CBI_STAT_FAULT_TA (1 << 2) +#define _PSB_CBI_STAT_FAULT_VDM (1 << 3) +#define _PSB_CBI_STAT_FAULT_2D (1 << 4) +#define _PSB_CBI_STAT_FAULT_PBE (1 << 5) +#define _PSB_CBI_STAT_FAULT_TSP (1 << 6) +#define _PSB_CBI_STAT_FAULT_ISP (1 << 7) +#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8) +#define _PSB_CBI_STAT_FAULT_HOST (1 << 9) + +#define PSB_CR_BIF_BANK0 0x0C78 + +#define PSB_CR_BIF_BANK1 0x0C7C + +#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84 + +#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88 +#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC + +#define PSB_CR_2D_SOCIF 0x0E18 +#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0) +#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0) +#define _PSB_C2_SOCIF_EMPTY (0x80 << 0) + +#define PSB_CR_2D_BLIT_STATUS 0x0E04 +#define _PSB_C2B_STATUS_BUSY (1 << 24) +#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0) +#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0) + +/* + * 2D defs. + */ + +/* + * 2D Slave Port Data : Block Header's Object Type + */ + +#define PSB_2D_CLIP_BH (0x00000000) +#define PSB_2D_PAT_BH (0x10000000) +#define PSB_2D_CTRL_BH (0x20000000) +#define PSB_2D_SRC_OFF_BH (0x30000000) +#define PSB_2D_MASK_OFF_BH (0x40000000) +#define PSB_2D_RESERVED1_BH (0x50000000) +#define PSB_2D_RESERVED2_BH (0x60000000) +#define PSB_2D_FENCE_BH (0x70000000) +#define PSB_2D_BLIT_BH (0x80000000) +#define PSB_2D_SRC_SURF_BH (0x90000000) +#define PSB_2D_DST_SURF_BH (0xA0000000) +#define PSB_2D_PAT_SURF_BH (0xB0000000) +#define PSB_2D_SRC_PAL_BH (0xC0000000) +#define PSB_2D_PAT_PAL_BH (0xD0000000) +#define PSB_2D_MASK_SURF_BH (0xE0000000) +#define PSB_2D_FLUSH_BH (0xF0000000) + +/* + * Clip Definition block (PSB_2D_CLIP_BH) + */ +#define PSB_2D_CLIPCOUNT_MAX (1) +#define PSB_2D_CLIPCOUNT_MASK (0x00000000) +#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF) +#define PSB_2D_CLIPCOUNT_SHIFT (0) +/* clip rectangle min & max */ +#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000) +#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF) +#define PSB_2D_CLIP_XMAX_SHIFT (12) +#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF) +#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000) +#define PSB_2D_CLIP_XMIN_SHIFT (0) +/* clip rectangle offset */ +#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000) +#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF) +#define PSB_2D_CLIP_YMAX_SHIFT (12) +#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF) +#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000) +#define PSB_2D_CLIP_YMIN_SHIFT (0) + +/* + * Pattern Control (PSB_2D_PAT_BH) + */ +#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F) +#define PSB_2D_PAT_HEIGHT_SHIFT (0) +#define PSB_2D_PAT_WIDTH_MASK (0x000003E0) +#define PSB_2D_PAT_WIDTH_SHIFT (5) +#define PSB_2D_PAT_YSTART_MASK (0x00007C00) +#define PSB_2D_PAT_YSTART_SHIFT (10) +#define PSB_2D_PAT_XSTART_MASK (0x000F8000) +#define PSB_2D_PAT_XSTART_SHIFT (15) + +/* + * 2D Control block (PSB_2D_CTRL_BH) + */ +/* Present Flags */ +#define PSB_2D_SRCCK_CTRL (0x00000001) +#define PSB_2D_DSTCK_CTRL (0x00000002) +#define PSB_2D_ALPHA_CTRL (0x00000004) +/* Colour Key Colour (SRC/DST)*/ +#define PSB_2D_CK_COL_MASK (0xFFFFFFFF) +#define PSB_2D_CK_COL_CLRMASK (0x00000000) +#define PSB_2D_CK_COL_SHIFT (0) +/* Colour Key Mask (SRC/DST)*/ +#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF) +#define PSB_2D_CK_MASK_CLRMASK (0x00000000) +#define PSB_2D_CK_MASK_SHIFT (0) +/* Alpha Control (Alpha/RGB)*/ +#define PSB_2D_GBLALPHA_MASK (0x000FF000) +#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF) +#define PSB_2D_GBLALPHA_SHIFT (12) +#define PSB_2D_SRCALPHA_OP_MASK (0x00700000) +#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF) +#define PSB_2D_SRCALPHA_OP_SHIFT (20) +#define PSB_2D_SRCALPHA_OP_ONE (0x00000000) +#define PSB_2D_SRCALPHA_OP_SRC (0x00100000) +#define PSB_2D_SRCALPHA_OP_DST (0x00200000) +#define PSB_2D_SRCALPHA_OP_SG (0x00300000) +#define PSB_2D_SRCALPHA_OP_DG (0x00400000) +#define PSB_2D_SRCALPHA_OP_GBL (0x00500000) +#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000) +#define PSB_2D_SRCALPHA_INVERT (0x00800000) +#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF) +#define PSB_2D_DSTALPHA_OP_MASK (0x07000000) +#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF) +#define PSB_2D_DSTALPHA_OP_SHIFT (24) +#define PSB_2D_DSTALPHA_OP_ONE (0x00000000) +#define PSB_2D_DSTALPHA_OP_SRC (0x01000000) +#define PSB_2D_DSTALPHA_OP_DST (0x02000000) +#define PSB_2D_DSTALPHA_OP_SG (0x03000000) +#define PSB_2D_DSTALPHA_OP_DG (0x04000000) +#define PSB_2D_DSTALPHA_OP_GBL (0x05000000) +#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000) +#define PSB_2D_DSTALPHA_INVERT (0x08000000) +#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF) + +#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000) +#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF) +#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000) +#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF) + +/* + *Source Offset (PSB_2D_SRC_OFF_BH) + */ +#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12) +#define PSB_2D_SRCOFF_XSTART_SHIFT (12) +#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF) +#define PSB_2D_SRCOFF_YSTART_SHIFT (0) + +/* + * Mask Offset (PSB_2D_MASK_OFF_BH) + */ +#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12) +#define PSB_2D_MASKOFF_XSTART_SHIFT (12) +#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF) +#define PSB_2D_MASKOFF_YSTART_SHIFT (0) + +/* + * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored + */ + +/* + *Blit Rectangle (PSB_2D_BLIT_BH) + */ + +#define PSB_2D_ROT_MASK (3<<25) +#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK) +#define PSB_2D_ROT_NONE (0<<25) +#define PSB_2D_ROT_90DEGS (1<<25) +#define PSB_2D_ROT_180DEGS (2<<25) +#define PSB_2D_ROT_270DEGS (3<<25) + +#define PSB_2D_COPYORDER_MASK (3<<23) +#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK) +#define PSB_2D_COPYORDER_TL2BR (0<<23) +#define PSB_2D_COPYORDER_BR2TL (1<<23) +#define PSB_2D_COPYORDER_TR2BL (2<<23) +#define PSB_2D_COPYORDER_BL2TR (3<<23) + +#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF) +#define PSB_2D_DSTCK_DISABLE (0x00000000) +#define PSB_2D_DSTCK_PASS (0x00200000) +#define PSB_2D_DSTCK_REJECT (0x00400000) + +#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF) +#define PSB_2D_SRCCK_DISABLE (0x00000000) +#define PSB_2D_SRCCK_PASS (0x00080000) +#define PSB_2D_SRCCK_REJECT (0x00100000) + +#define PSB_2D_CLIP_ENABLE (0x00040000) + +#define PSB_2D_ALPHA_ENABLE (0x00020000) + +#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF) +#define PSB_2D_PAT_MASK (0x00010000) +#define PSB_2D_USE_PAT (0x00010000) +#define PSB_2D_USE_FILL (0x00000000) +/* + * Tungsten Graphics note on rop codes: If rop A and rop B are + * identical, the mask surface will not be read and need not be + * set up. + */ + +#define PSB_2D_ROP3B_MASK (0x0000FF00) +#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF) +#define PSB_2D_ROP3B_SHIFT (8) +/* rop code A */ +#define PSB_2D_ROP3A_MASK (0x000000FF) +#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00) +#define PSB_2D_ROP3A_SHIFT (0) + +#define PSB_2D_ROP4_MASK (0x0000FFFF) +/* + * DWORD0: (Only pass if Pattern control == Use Fill Colour) + * Fill Colour RGBA8888 + */ +#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF) +#define PSB_2D_FILLCOLOUR_SHIFT (0) +/* + * DWORD1: (Always Present) + * X Start (Dest) + * Y Start (Dest) + */ +#define PSB_2D_DST_XSTART_MASK (0x00FFF000) +#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF) +#define PSB_2D_DST_XSTART_SHIFT (12) +#define PSB_2D_DST_YSTART_MASK (0x00000FFF) +#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000) +#define PSB_2D_DST_YSTART_SHIFT (0) +/* + * DWORD2: (Always Present) + * X Size (Dest) + * Y Size (Dest) + */ +#define PSB_2D_DST_XSIZE_MASK (0x00FFF000) +#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF) +#define PSB_2D_DST_XSIZE_SHIFT (12) +#define PSB_2D_DST_YSIZE_MASK (0x00000FFF) +#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000) +#define PSB_2D_DST_YSIZE_SHIFT (0) + +/* + * Source Surface (PSB_2D_SRC_SURF_BH) + */ +/* + * WORD 0 + */ + +#define PSB_2D_SRC_FORMAT_MASK (0x00078000) +#define PSB_2D_SRC_1_PAL (0x00000000) +#define PSB_2D_SRC_2_PAL (0x00008000) +#define PSB_2D_SRC_4_PAL (0x00010000) +#define PSB_2D_SRC_8_PAL (0x00018000) +#define PSB_2D_SRC_8_ALPHA (0x00020000) +#define PSB_2D_SRC_4_ALPHA (0x00028000) +#define PSB_2D_SRC_332RGB (0x00030000) +#define PSB_2D_SRC_4444ARGB (0x00038000) +#define PSB_2D_SRC_555RGB (0x00040000) +#define PSB_2D_SRC_1555ARGB (0x00048000) +#define PSB_2D_SRC_565RGB (0x00050000) +#define PSB_2D_SRC_0888ARGB (0x00058000) +#define PSB_2D_SRC_8888ARGB (0x00060000) +#define PSB_2D_SRC_8888UYVY (0x00068000) +#define PSB_2D_SRC_RESERVED (0x00070000) +#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000) + + +#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF) +#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000) +#define PSB_2D_SRC_STRIDE_SHIFT (0) +/* + * WORD 1 - Base Address + */ +#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC) +#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003) +#define PSB_2D_SRC_ADDR_SHIFT (2) +#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2) + +/* + * Pattern Surface (PSB_2D_PAT_SURF_BH) + */ +/* + * WORD 0 + */ + +#define PSB_2D_PAT_FORMAT_MASK (0x00078000) +#define PSB_2D_PAT_1_PAL (0x00000000) +#define PSB_2D_PAT_2_PAL (0x00008000) +#define PSB_2D_PAT_4_PAL (0x00010000) +#define PSB_2D_PAT_8_PAL (0x00018000) +#define PSB_2D_PAT_8_ALPHA (0x00020000) +#define PSB_2D_PAT_4_ALPHA (0x00028000) +#define PSB_2D_PAT_332RGB (0x00030000) +#define PSB_2D_PAT_4444ARGB (0x00038000) +#define PSB_2D_PAT_555RGB (0x00040000) +#define PSB_2D_PAT_1555ARGB (0x00048000) +#define PSB_2D_PAT_565RGB (0x00050000) +#define PSB_2D_PAT_0888ARGB (0x00058000) +#define PSB_2D_PAT_8888ARGB (0x00060000) + +#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF) +#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000) +#define PSB_2D_PAT_STRIDE_SHIFT (0) +/* + * WORD 1 - Base Address + */ +#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC) +#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003) +#define PSB_2D_PAT_ADDR_SHIFT (2) +#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2) + +/* + * Destination Surface (PSB_2D_DST_SURF_BH) + */ +/* + * WORD 0 + */ + +#define PSB_2D_DST_FORMAT_MASK (0x00078000) +#define PSB_2D_DST_332RGB (0x00030000) +#define PSB_2D_DST_4444ARGB (0x00038000) +#define PSB_2D_DST_555RGB (0x00040000) +#define PSB_2D_DST_1555ARGB (0x00048000) +#define PSB_2D_DST_565RGB (0x00050000) +#define PSB_2D_DST_0888ARGB (0x00058000) +#define PSB_2D_DST_8888ARGB (0x00060000) +#define PSB_2D_DST_8888AYUV (0x00070000) + +#define PSB_2D_DST_STRIDE_MASK (0x00007FFF) +#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000) +#define PSB_2D_DST_STRIDE_SHIFT (0) +/* + * WORD 1 - Base Address + */ +#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC) +#define PSB_2D_DST_ADDR_CLRMASK (0x00000003) +#define PSB_2D_DST_ADDR_SHIFT (2) +#define PSB_2D_DST_ADDR_ALIGNSHIFT (2) + +/* + * Mask Surface (PSB_2D_MASK_SURF_BH) + */ +/* + * WORD 0 + */ +#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF) +#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000) +#define PSB_2D_MASK_STRIDE_SHIFT (0) +/* + * WORD 1 - Base Address + */ +#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC) +#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003) +#define PSB_2D_MASK_ADDR_SHIFT (2) +#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2) + +/* + * Source Palette (PSB_2D_SRC_PAL_BH) + */ + +#define PSB_2D_SRCPAL_ADDR_SHIFT (0) +#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007) +#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8) +#define PSB_2D_SRCPAL_BYTEALIGN (1024) + +/* + * Pattern Palette (PSB_2D_PAT_PAL_BH) + */ + +#define PSB_2D_PATPAL_ADDR_SHIFT (0) +#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007) +#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8) +#define PSB_2D_PATPAL_BYTEALIGN (1024) + +/* + * Rop3 Codes (2 LS bytes) + */ + +#define PSB_2D_ROP3_SRCCOPY (0xCCCC) +#define PSB_2D_ROP3_PATCOPY (0xF0F0) +#define PSB_2D_ROP3_WHITENESS (0xFFFF) +#define PSB_2D_ROP3_BLACKNESS (0x0000) +#define PSB_2D_ROP3_SRC (0xCC) +#define PSB_2D_ROP3_PAT (0xF0) +#define PSB_2D_ROP3_DST (0xAA) + + +/* + * Sizes. + */ + +#define PSB_SCENE_HW_COOKIE_SIZE 16 +#define PSB_TA_MEM_HW_COOKIE_SIZE 16 + +/* + * Scene stuff. + */ + +#define PSB_NUM_HW_SCENES 2 + +/* + * Scheduler completion actions. + */ + +#define PSB_RASTER_BLOCK 0 +#define PSB_RASTER 1 +#define PSB_RETURN 2 +#define PSB_TA 3 + + +/*Power management*/ +#define PSB_PUNIT_PORT 0x04 +#define PSB_APMBA 0x7a +#define PSB_APM_CMD 0x0 +#define PSB_APM_STS 0x04 +#define PSB_PWRGT_GFX_MASK 0x3 +#define PSB_PWRGT_VID_ENC_MASK 0x30 +#define PSB_PWRGT_VID_DEC_MASK 0xc + +#define PSB_PM_SSC 0x20 +#define PSB_PM_SSS 0x30 +#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/ +#endif diff --git a/drivers/gpu/drm/psb/psb_reset.c b/drivers/gpu/drm/psb/psb_reset.c new file mode 100644 index 0000000..04c9378 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_reset.c @@ -0,0 +1,484 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + * Authors: + * Thomas Hellstrom + */ + +#include +#include "psb_drv.h" +#include "psb_reg.h" +#include "psb_intel_reg.h" +#include "psb_scene.h" +#include "psb_msvdx.h" +#include "lnc_topaz.h" +#include +#include "psb_powermgmt.h" +#define PSB_2D_TIMEOUT_MSEC 100 + +void psb_reset(struct drm_psb_private *dev_priv, int reset_2d) +{ + uint32_t val; + + val = _PSB_CS_RESET_BIF_RESET | + _PSB_CS_RESET_DPM_RESET | + _PSB_CS_RESET_TA_RESET | + _PSB_CS_RESET_USE_RESET | + _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET; + + if (reset_2d) + val |= _PSB_CS_RESET_TWOD_RESET; + + PSB_WSGX32(val, PSB_CR_SOFT_RESET); + (void) PSB_RSGX32(PSB_CR_SOFT_RESET); + + udelay(100); + + PSB_WSGX32(0, PSB_CR_SOFT_RESET); + wmb(); + PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT, + PSB_CR_BIF_CTRL); + wmb(); + (void) PSB_RSGX32(PSB_CR_BIF_CTRL); + + udelay(100); + PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT, + PSB_CR_BIF_CTRL); + (void) PSB_RSGX32(PSB_CR_BIF_CTRL); +} + +void psb_print_pagefault(struct drm_psb_private *dev_priv) +{ + uint32_t val; + uint32_t addr; + + val = PSB_RSGX32(PSB_CR_BIF_INT_STAT); + addr = PSB_RSGX32(PSB_CR_BIF_FAULT); + + if (val) { + if (val & _PSB_CBI_STAT_PF_N_RW) + DRM_ERROR("Poulsbo MMU page fault:\n"); + else + DRM_ERROR("Poulsbo MMU read / write " + "protection fault:\n"); + + if (val & _PSB_CBI_STAT_FAULT_CACHE) + DRM_ERROR("\tCache requestor.\n"); + if (val & _PSB_CBI_STAT_FAULT_TA) + DRM_ERROR("\tTA requestor.\n"); + if (val & _PSB_CBI_STAT_FAULT_VDM) + DRM_ERROR("\tVDM requestor.\n"); + if (val & _PSB_CBI_STAT_FAULT_2D) + DRM_ERROR("\t2D requestor.\n"); + if (val & _PSB_CBI_STAT_FAULT_PBE) + DRM_ERROR("\tPBE requestor.\n"); + if (val & _PSB_CBI_STAT_FAULT_TSP) + DRM_ERROR("\tTSP requestor.\n"); + if (val & _PSB_CBI_STAT_FAULT_ISP) + DRM_ERROR("\tISP requestor.\n"); + if (val & _PSB_CBI_STAT_FAULT_USSEPDS) + DRM_ERROR("\tUSSEPDS requestor.\n"); + if (val & _PSB_CBI_STAT_FAULT_HOST) + DRM_ERROR("\tHost requestor.\n"); + + DRM_ERROR("\tMMU failing address is 0x%08x.\n", + (unsigned) addr); + } +} + +void psb_schedule_watchdog(struct drm_psb_private *dev_priv) +{ + struct timer_list *wt = &dev_priv->watchdog_timer; + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); + if (dev_priv->timer_available && !timer_pending(wt)) { + wt->expires = jiffies + PSB_WATCHDOG_DELAY; + add_timer(wt); + } + spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); +} + +#if 0 +static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv, + unsigned int engine, int *lockup, + int *idle) +{ + uint32_t received_seq; + + received_seq = dev_priv->comm[engine << 4]; + spin_lock(&dev_priv->sequence_lock); + *idle = (received_seq == dev_priv->sequence[engine]); + spin_unlock(&dev_priv->sequence_lock); + + if (*idle) { + dev_priv->idle[engine] = 1; + *lockup = 0; + return; + } + + if (dev_priv->idle[engine]) { + dev_priv->idle[engine] = 0; + dev_priv->last_sequence[engine] = received_seq; + *lockup = 0; + return; + } + + *lockup = (dev_priv->last_sequence[engine] == received_seq); +} + +#endif +static void psb_watchdog_func(unsigned long data) +{ + struct drm_psb_private *dev_priv = (struct drm_psb_private *) data; + int lockup; + int msvdx_lockup; + int msvdx_idle; + int lockup_2d; + int idle_2d; + int idle; + unsigned long irq_flags; + + psb_scheduler_lockup(dev_priv, &lockup, &idle); + psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle); + +#if 0 + psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d); +#else + lockup_2d = false; + idle_2d = true; +#endif + if (lockup || msvdx_lockup || lockup_2d) { + spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); + dev_priv->timer_available = 0; + spin_unlock_irqrestore(&dev_priv->watchdog_lock, + irq_flags); + if (lockup) { + /*comment out this to aviod illegal access for ospm*/ + /*psb_print_pagefault(dev_priv);*/ + schedule_work(&dev_priv->watchdog_wq); + } + if (msvdx_lockup) + schedule_work(&dev_priv->msvdx_watchdog_wq); + } + if (!idle || !msvdx_idle || !idle_2d) + psb_schedule_watchdog(dev_priv); +} + +void psb_msvdx_flush_cmd_queue(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_msvdx_cmd_queue *msvdx_cmd; + struct list_head *list, *next; + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + /*Flush the msvdx cmd queue and signal all fences in the queue */ + list_for_each_safe(list, next, &msvdx_priv->msvdx_queue) { + msvdx_cmd = + list_entry(list, struct psb_msvdx_cmd_queue, head); + PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n", + msvdx_cmd->sequence); + msvdx_priv->msvdx_current_sequence = msvdx_cmd->sequence; + psb_fence_error(dev, PSB_ENGINE_VIDEO, + msvdx_priv->msvdx_current_sequence, + _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG); + list_del(list); + kfree(msvdx_cmd->cmd); + kfree(msvdx_cmd + ); + } +} + +static void psb_msvdx_reset_wq(struct work_struct *work) +{ + struct drm_psb_private *dev_priv = + container_of(work, struct drm_psb_private, msvdx_watchdog_wq); + struct msvdx_private *msvdx_priv = dev_priv->msvdx_private; + + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + + mutex_lock(&msvdx_priv->msvdx_mutex); + msvdx_priv->msvdx_needs_reset = 1; + msvdx_priv->msvdx_current_sequence++; + PSB_DEBUG_GENERAL + ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n", + msvdx_priv->msvdx_current_sequence); + + psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO, + msvdx_priv->msvdx_current_sequence, + _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG); + + spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); + dev_priv->timer_available = 1; + spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); + + spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags); + psb_msvdx_flush_cmd_queue(scheduler->dev); + spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags); + + psb_schedule_watchdog(dev_priv); + mutex_unlock(&msvdx_priv->msvdx_mutex); +} + +static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv) +{ + struct psb_xhw_buf buf; + uint32_t bif_ctrl; + + INIT_LIST_HEAD(&buf.head); + psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); + bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL); + PSB_WSGX32(bif_ctrl | + _PSB_CB_CTRL_CLEAR_FAULT | + _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL); + (void) PSB_RSGX32(PSB_CR_BIF_CTRL); + udelay(100); + PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL); + (void) PSB_RSGX32(PSB_CR_BIF_CTRL); + return psb_xhw_reset_dpm(dev_priv, &buf); +} + +/* + * Block command submission and reset hardware and schedulers. + */ + +static void psb_reset_wq(struct work_struct *work) +{ + struct drm_psb_private *dev_priv = + container_of(work, struct drm_psb_private, watchdog_wq); + int lockup_2d; + int idle_2d; + unsigned long irq_flags; + int ret; + int reset_count = 0; + struct psb_xhw_buf buf; + uint32_t xhw_lockup; + + /* + * Block command submission. + */ + PSB_DEBUG_PM("ioctl: psb_pl_reference\n"); + + if (!powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, false)) { + DRM_ERROR("lock up hapeens when island off \n"); + return; + } + mutex_lock(&dev_priv->reset_mutex); + + INIT_LIST_HEAD(&buf.head); + ret = psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup); + if (likely(ret == 0)) { + if (psb_extend_timeout(dev_priv, xhw_lockup) == 0) { + /* + * no lockup, just re-schedule + */ + spin_lock_irqsave(&dev_priv->watchdog_lock, + irq_flags); + dev_priv->timer_available = 1; + spin_unlock_irqrestore(&dev_priv->watchdog_lock, + irq_flags); + psb_schedule_watchdog(dev_priv); + mutex_unlock(&dev_priv->reset_mutex); + return; + } + } else { + DRM_ERROR("Check lockup returned %d\n", ret); + } +#if 0 + mdelay(PSB_2D_TIMEOUT_MSEC); + + psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d); + + if (lockup_2d) { + uint32_t seq_2d; + spin_lock(&dev_priv->sequence_lock); + seq_2d = dev_priv->sequence[PSB_ENGINE_2D]; + spin_unlock(&dev_priv->sequence_lock); + psb_fence_error(dev_priv->scheduler.dev, + PSB_ENGINE_2D, + seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY); + DRM_INFO("Resetting 2D engine.\n"); + } + + psb_reset(dev_priv, lockup_2d); +#else + (void) lockup_2d; + (void) idle_2d; + psb_reset(dev_priv, 0); +#endif + (void) psb_xhw_mmu_reset(dev_priv); + DRM_INFO("Resetting scheduler.\n"); + psb_scheduler_pause(dev_priv); + psb_scheduler_reset(dev_priv, -EBUSY); + psb_scheduler_ta_mem_check(dev_priv); + + while (dev_priv->ta_mem && + !dev_priv->force_ta_mem_load && ++reset_count < 10) { + struct ttm_fence_object *fence; + + /* + * TA memory is currently fenced so offsets + * are valid. Reload offsets into the dpm now. + */ + + struct psb_xhw_buf buf; + INIT_LIST_HEAD(&buf.head); + + mdelay(100); + + fence = dev_priv->ta_mem->ta_memory->sync_obj; + + DRM_INFO("Reloading TA memory at offset " + "0x%08lx to 0x%08lx seq %d\n", + dev_priv->ta_mem->ta_memory->offset, + dev_priv->ta_mem->ta_memory->offset + + (dev_priv->ta_mem->ta_memory->num_pages << PAGE_SHIFT), + fence->sequence); + + fence = dev_priv->ta_mem->hw_data->sync_obj; + + DRM_INFO("Reloading TA HW memory at offset " + "0x%08lx to 0x%08lx seq %u\n", + dev_priv->ta_mem->hw_data->offset, + dev_priv->ta_mem->hw_data->offset + + (dev_priv->ta_mem->hw_data->num_pages << PAGE_SHIFT), + fence->sequence); + + ret = psb_xhw_ta_mem_load(dev_priv, &buf, + PSB_TA_MEM_FLAG_TA | + PSB_TA_MEM_FLAG_RASTER | + PSB_TA_MEM_FLAG_HOSTA | + PSB_TA_MEM_FLAG_HOSTD | + PSB_TA_MEM_FLAG_INIT, + dev_priv->ta_mem->ta_memory-> + offset, + dev_priv->ta_mem->hw_data-> + offset, + dev_priv->ta_mem->hw_cookie); + if (!ret) + break; + + DRM_INFO("Reloading TA memory failed. Retrying.\n"); + psb_reset(dev_priv, 0); + (void) psb_xhw_mmu_reset(dev_priv); + } + + psb_scheduler_restart(dev_priv); + spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); + dev_priv->timer_available = 1; + spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); + mutex_unlock(&dev_priv->reset_mutex); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); +} + +void psb_watchdog_init(struct drm_psb_private *dev_priv) +{ + struct timer_list *wt = &dev_priv->watchdog_timer; + unsigned long irq_flags; + + spin_lock_init(&dev_priv->watchdog_lock); + spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); + init_timer(wt); + INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq); + INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq); + wt->data = (unsigned long) dev_priv; + wt->function = &psb_watchdog_func; + dev_priv->timer_available = 1; + spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); +} + +void psb_watchdog_takedown(struct drm_psb_private *dev_priv) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); + dev_priv->timer_available = 0; + spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); + (void) del_timer_sync(&dev_priv->watchdog_timer); +} + +static void psb_lid_timer_func(unsigned long data) +{ + struct drm_psb_private * dev_priv = (struct drm_psb_private *)data; + struct drm_device * dev = (struct drm_device *)dev_priv->dev; + struct timer_list * lid_timer = &dev_priv->lid_timer; + unsigned long irq_flags; + u32 * lid_state = dev_priv->lid_state; + u32 pp_status; + + if(*lid_state == dev_priv->lid_last_state) + goto lid_timer_schedule; + + if((*lid_state) & 0x01) { + /*lid state is open*/ + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while((pp_status & PP_ON) == 0); + + /*FIXME: should be backlight level before*/ + psb_intel_lvds_set_brightness(dev, 100); + } else { + psb_intel_lvds_set_brightness(dev, 0); + + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON); + do { + pp_status = REG_READ(PP_STATUS); + } while((pp_status & PP_ON) == 0); + } + //printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__); + + dev_priv->lid_last_state = *lid_state; + +lid_timer_schedule: + spin_lock_irqsave(&dev_priv->lid_lock, irq_flags); + if(!timer_pending(lid_timer)){ + lid_timer->expires = jiffies + PSB_LID_DELAY; + add_timer(lid_timer); + } + spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags); +} + +void psb_lid_timer_init(struct drm_psb_private *dev_priv) +{ + struct timer_list * lid_timer = &dev_priv->lid_timer; + unsigned long irq_flags; + + spin_lock_init(&dev_priv->lid_lock); + spin_lock_irqsave(&dev_priv->lid_lock, irq_flags); + + init_timer(lid_timer); + + lid_timer->data = (unsigned long)dev_priv; + lid_timer->function = psb_lid_timer_func; + lid_timer->expires = jiffies + PSB_LID_DELAY; + + add_timer(lid_timer); + spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags); +} + +void psb_lid_timer_takedown(struct drm_psb_private * dev_priv) +{ + del_timer_sync(&dev_priv->lid_timer); +} + diff --git a/drivers/gpu/drm/psb/psb_scene.c b/drivers/gpu/drm/psb/psb_scene.c new file mode 100644 index 0000000..42b823d --- /dev/null +++ b/drivers/gpu/drm/psb/psb_scene.c @@ -0,0 +1,523 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include +#include "psb_drv.h" +#include "psb_scene.h" +#include "psb_powermgmt.h" + +void psb_clear_scene_atomic(struct psb_scene *scene) +{ + int i; + struct page *page; + void *v; + + for (i = 0; i < scene->clear_num_pages; ++i) { + page = ttm_tt_get_page(scene->hw_data->ttm, + scene->clear_p_start + i); + if (in_irq()) + v = kmap_atomic(page, KM_IRQ0); + else + v = kmap_atomic(page, KM_USER0); + + memset(v, 0, PAGE_SIZE); + + if (in_irq()) + kunmap_atomic(v, KM_IRQ0); + else + kunmap_atomic(v, KM_USER0); + } +} + +int psb_clear_scene(struct psb_scene *scene) +{ + struct ttm_bo_kmap_obj bmo; + bool is_iomem; + void *addr; + + int ret = ttm_bo_kmap(scene->hw_data, scene->clear_p_start, + scene->clear_num_pages, &bmo); + + PSB_DEBUG_RENDER("Scene clear.\n"); + if (ret) + return ret; + + addr = ttm_kmap_obj_virtual(&bmo, &is_iomem); + BUG_ON(is_iomem); + memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT); + ttm_bo_kunmap(&bmo); + + return 0; +} + +static void psb_destroy_scene(struct kref *kref) +{ + struct psb_scene *scene = + container_of(kref, struct psb_scene, kref); + + PSB_DEBUG_RENDER("Scene destroy.\n"); + psb_scheduler_remove_scene_refs(scene); + ttm_bo_unref(&scene->hw_data); + kfree(scene); +} + +void psb_scene_unref(struct psb_scene **p_scene) +{ + struct psb_scene *scene = *p_scene; + + PSB_DEBUG_RENDER("Scene unref.\n"); + *p_scene = NULL; + kref_put(&scene->kref, &psb_destroy_scene); +} + +struct psb_scene *psb_scene_ref(struct psb_scene *src) +{ + PSB_DEBUG_RENDER("Scene ref.\n"); + kref_get(&src->kref); + return src; +} + +static struct psb_scene *psb_alloc_scene(struct drm_device *dev, + uint32_t w, uint32_t h) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->bdev; + int ret = -EINVAL; + struct psb_scene *scene; + uint32_t bo_size; + struct psb_xhw_buf buf; + + PSB_DEBUG_RENDER("Alloc scene w %u h %u msaa %u\n", w & 0xffff, h, + w >> 16); + + scene = kzalloc(sizeof(*scene), GFP_KERNEL); + + if (!scene) { + DRM_ERROR("Out of memory allocating scene object.\n"); + return NULL; + } + + scene->dev = dev; + scene->w = w; + scene->h = h; + scene->hw_scene = NULL; + kref_init(&scene->kref); + + INIT_LIST_HEAD(&buf.head); + ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h, + scene->hw_cookie, &bo_size, + &scene->clear_p_start, + &scene->clear_num_pages); + if (ret) + goto out_err; + + ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_MMU | + TTM_PL_FLAG_CACHED, + 0, 0, 1, NULL, &scene->hw_data); + if (ret) + goto out_err; + + return scene; +out_err: + kfree(scene); + return NULL; +} + +int psb_validate_scene_pool(struct psb_context *context, + struct psb_scene_pool *pool, + uint32_t w, + uint32_t h, + int final_pass, struct psb_scene **scene_p) +{ + struct drm_device *dev = pool->dev; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct psb_scene *scene = pool->scenes[pool->cur_scene]; + int ret; + unsigned long irq_flags; + struct psb_scheduler *scheduler = &dev_priv->scheduler; + uint32_t bin_pt_offset; + uint32_t bin_param_offset; + + PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", + pool->cur_scene); + + if (unlikely(!dev_priv->ta_mem)) { + dev_priv->ta_mem = + psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages); + if (!dev_priv->ta_mem) + return -ENOMEM; + + bin_pt_offset = ~0; + bin_param_offset = ~0; + } else { + bin_pt_offset = dev_priv->ta_mem->hw_data->offset; + bin_param_offset = dev_priv->ta_mem->ta_memory->offset; + } + + pool->w = w; + pool->h = h; + if (scene && (scene->w != pool->w || scene->h != pool->h)) { + spin_lock_irqsave(&scheduler->lock, irq_flags); + if (scene->flags & PSB_SCENE_FLAG_DIRTY) { + spin_unlock_irqrestore(&scheduler->lock, + irq_flags); + DRM_ERROR("Trying to resize a dirty scene.\n"); + return -EINVAL; + } + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + psb_scene_unref(&pool->scenes[pool->cur_scene]); + scene = NULL; + } + + if (!scene) { + pool->scenes[pool->cur_scene] = scene = + psb_alloc_scene(pool->dev, pool->w, pool->h); + + if (!scene) + return -ENOMEM; + + scene->flags = PSB_SCENE_FLAG_CLEARED; + } + + ret = psb_validate_kernel_buffer(context, scene->hw_data, + PSB_ENGINE_TA, + PSB_BO_FLAG_SCENE | + PSB_GPU_ACCESS_READ | + PSB_GPU_ACCESS_WRITE, 0); + if (unlikely(ret != 0)) + return ret; + + /* + * FIXME: We need atomic bit manipulation here for the + * scheduler. For now use the spinlock. + */ + + spin_lock_irqsave(&scheduler->lock, irq_flags); + if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) { + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + PSB_DEBUG_RENDER("Waiting to clear scene memory.\n"); + mutex_lock(&scene->hw_data->mutex); + + ret = ttm_bo_wait(scene->hw_data, 0, 1, 0); + mutex_unlock(&scene->hw_data->mutex); + if (ret) + return ret; + + ret = psb_clear_scene(scene); + + if (ret) + return ret; + spin_lock_irqsave(&scheduler->lock, irq_flags); + scene->flags |= PSB_SCENE_FLAG_CLEARED; + } + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + + ret = psb_validate_kernel_buffer(context, dev_priv->ta_mem->hw_data, + PSB_ENGINE_TA, + PSB_BO_FLAG_SCENE | + PSB_GPU_ACCESS_READ | + PSB_GPU_ACCESS_WRITE, 0); + if (unlikely(ret != 0)) + return ret; + + ret = + psb_validate_kernel_buffer(context, + dev_priv->ta_mem->ta_memory, + PSB_ENGINE_TA, + PSB_BO_FLAG_SCENE | + PSB_GPU_ACCESS_READ | + PSB_GPU_ACCESS_WRITE, 0); + + if (unlikely(ret != 0)) + return ret; + + if (unlikely(bin_param_offset != + dev_priv->ta_mem->ta_memory->offset || + bin_pt_offset != + dev_priv->ta_mem->hw_data->offset || + dev_priv->force_ta_mem_load)) { + + struct psb_xhw_buf buf; + + INIT_LIST_HEAD(&buf.head); + ret = psb_xhw_ta_mem_load(dev_priv, &buf, + PSB_TA_MEM_FLAG_TA | + PSB_TA_MEM_FLAG_RASTER | + PSB_TA_MEM_FLAG_HOSTA | + PSB_TA_MEM_FLAG_HOSTD | + PSB_TA_MEM_FLAG_INIT, + dev_priv->ta_mem->ta_memory-> + offset, + dev_priv->ta_mem->hw_data-> + offset, + dev_priv->ta_mem->hw_cookie); + if (ret) + return ret; + + dev_priv->force_ta_mem_load = 0; + } + + if (final_pass) { + + /* + * Clear the scene on next use. Advance the scene counter. + */ + + spin_lock_irqsave(&scheduler->lock, irq_flags); + scene->flags &= ~PSB_SCENE_FLAG_CLEARED; + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes; + } + + *scene_p = psb_scene_ref(scene); + return 0; +} + +static void psb_scene_pool_destroy(struct kref *kref) +{ + struct psb_scene_pool *pool = + container_of(kref, struct psb_scene_pool, kref); + int i; + PSB_DEBUG_RENDER("Scene pool destroy.\n"); + + for (i = 0; i < pool->num_scenes; ++i) { + PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i, + (unsigned long) pool->scenes[i]); + if (pool->scenes[i]) + psb_scene_unref(&pool->scenes[i]); + } + + kfree(pool); +} + +void psb_scene_pool_unref(struct psb_scene_pool **p_pool) +{ + struct psb_scene_pool *pool = *p_pool; + + PSB_DEBUG_RENDER("Scene pool unref\n"); + *p_pool = NULL; + kref_put(&pool->kref, &psb_scene_pool_destroy); +} + +struct psb_scene_pool *psb_scene_pool_ref(struct psb_scene_pool *src) +{ + kref_get(&src->kref); + return src; +} + +/* + * Callback for base object manager. + */ + +static void psb_scene_pool_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct psb_scene_pool *pool = + container_of(base, struct psb_scene_pool, base); + *p_base = NULL; + + psb_scene_pool_unref(&pool); +} + +struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file *file_priv, + uint32_t handle, + int check_owner) +{ + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; + struct ttm_base_object *base; + struct psb_scene_pool *pool; + + + base = ttm_base_object_lookup(tfile, handle); + if (!base || (base->object_type != PSB_USER_OBJECT_SCENE_POOL)) { + DRM_ERROR("Could not find scene pool object 0x%08x\n", + handle); + return NULL; + } + + if (check_owner && tfile != base->tfile && !base->shareable) { + ttm_base_object_unref(&base); + return NULL; + } + + pool = container_of(base, struct psb_scene_pool, base); + kref_get(&pool->kref); + ttm_base_object_unref(&base); + return pool; +} + +struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *file_priv, + int shareable, + uint32_t num_scenes, + uint32_t w, uint32_t h) +{ + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; + struct drm_device *dev = file_priv->minor->dev; + struct psb_scene_pool *pool; + int ret; + + PSB_DEBUG_RENDER("Scene pool alloc\n"); + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) { + DRM_ERROR("Out of memory allocating scene pool object.\n"); + return NULL; + } + pool->w = w; + pool->h = h; + pool->dev = dev; + pool->num_scenes = num_scenes; + kref_init(&pool->kref); + + /* + * The base object holds a reference. + */ + + kref_get(&pool->kref); + ret = ttm_base_object_init(tfile, &pool->base, shareable, + PSB_USER_OBJECT_SCENE_POOL, + &psb_scene_pool_release, NULL); + if (unlikely(ret != 0)) + goto out_err; + + return pool; +out_err: + kfree(pool); + return NULL; +} + +/* + * Code to support multiple ta memory buffers. + */ + +static void psb_ta_mem_destroy(struct kref *kref) +{ + struct psb_ta_mem *ta_mem = + container_of(kref, struct psb_ta_mem, kref); + + ttm_bo_unref(&ta_mem->hw_data); + ttm_bo_unref(&ta_mem->ta_memory); + kfree(ta_mem); +} + +void psb_ta_mem_unref(struct psb_ta_mem **p_ta_mem) +{ + struct psb_ta_mem *ta_mem = *p_ta_mem; + *p_ta_mem = NULL; + kref_put(&ta_mem->kref, psb_ta_mem_destroy); +} + +struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src) +{ + kref_get(&src->kref); + return src; +} + +struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->bdev; + int ret = -EINVAL; + struct psb_ta_mem *ta_mem; + uint32_t bo_size; + uint32_t ta_min_size; + struct psb_xhw_buf buf; + + INIT_LIST_HEAD(&buf.head); + + ta_mem = kzalloc(sizeof(*ta_mem), GFP_KERNEL); + + if (!ta_mem) { + DRM_ERROR("Out of memory allocating parameter memory.\n"); + return NULL; + } + + kref_init(&ta_mem->kref); + ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages, + ta_mem->hw_cookie, + &bo_size, + &ta_min_size); + if (ret == -ENOMEM) { + DRM_ERROR("Parameter memory size is too small.\n"); + DRM_INFO("Attempted to use %u kiB of parameter memory.\n", + (unsigned int) (pages * (PAGE_SIZE / 1024))); + DRM_INFO("The Xpsb driver thinks this is too small and\n"); + DRM_INFO("suggests %u kiB. Check the psb DRM\n", + (unsigned int)(ta_min_size / 1024)); + DRM_INFO("\"ta_mem_size\" parameter!\n"); + } + if (ret) + goto out_err0; + + ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_MMU, + 0, 0, 0, NULL, + &ta_mem->hw_data); + if (ret) + goto out_err0; + + bo_size = pages * PAGE_SIZE; + ret = + ttm_buffer_object_create(bdev, bo_size, + ttm_bo_type_kernel, + DRM_PSB_FLAG_MEM_RASTGEOM, + 0, + 1024 * 1024 >> PAGE_SHIFT, 0, + NULL, + &ta_mem->ta_memory); + if (ret) + goto out_err1; + + return ta_mem; +out_err1: + ttm_bo_unref(&ta_mem->hw_data); +out_err0: + kfree(ta_mem); + return NULL; +} + +int drm_psb_scene_unref_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; + struct drm_psb_scene *scene = (struct drm_psb_scene *) data; + int ret = 0; + struct drm_psb_private *dev_priv = psb_priv(dev); + if (!scene->handle_valid) + return 0; + powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true); + + ret = + ttm_ref_object_base_unref(tfile, scene->handle, TTM_REF_USAGE); + if (unlikely(ret != 0)) + DRM_ERROR("Could not unreference a scene object.\n"); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} diff --git a/drivers/gpu/drm/psb/psb_scene.h b/drivers/gpu/drm/psb/psb_scene.h new file mode 100644 index 0000000..2a4f8bc --- /dev/null +++ b/drivers/gpu/drm/psb/psb_scene.h @@ -0,0 +1,119 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _PSB_SCENE_H_ +#define _PSB_SCENE_H_ + +#include "ttm/ttm_object.h" + +#define PSB_USER_OBJECT_SCENE_POOL ttm_driver_type0 +#define PSB_USER_OBJECT_TA_MEM ttm_driver_type1 +#define PSB_MAX_NUM_SCENES 8 + +struct psb_hw_scene; +struct psb_hw_ta_mem; + +struct psb_scene_pool { + struct ttm_base_object base; + struct drm_device *dev; + struct kref kref; + uint32_t w; + uint32_t h; + uint32_t cur_scene; + struct psb_scene *scenes[PSB_MAX_NUM_SCENES]; + uint32_t num_scenes; +}; + +struct psb_scene { + struct drm_device *dev; + struct kref kref; + uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE]; + uint32_t bo_size; + uint32_t w; + uint32_t h; + struct psb_ta_mem *ta_mem; + struct psb_hw_scene *hw_scene; + struct ttm_buffer_object *hw_data; + uint32_t flags; + uint32_t clear_p_start; + uint32_t clear_num_pages; +}; + +#if 0 +struct psb_scene_entry { + struct list_head head; + struct psb_scene *scene; +}; + +struct psb_user_scene { + struct ttm_base_object base; + struct drm_device *dev; +}; + +#endif + +struct psb_ta_mem { + struct ttm_base_object base; + struct drm_device *dev; + struct kref kref; + uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE]; + uint32_t bo_size; + struct ttm_buffer_object *ta_memory; + struct ttm_buffer_object *hw_data; + int is_deallocating; + int deallocating_scheduled; +}; + +extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv, + int shareable, + uint32_t num_scenes, + uint32_t w, uint32_t h); +extern void psb_scene_pool_unref(struct psb_scene_pool **pool); +extern struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file + *priv, + uint32_t handle, + int check_owner); +extern int psb_validate_scene_pool(struct psb_context *context, + struct psb_scene_pool *pool, + uint32_t w, + uint32_t h, int final_pass, + struct psb_scene **scene_p); +extern void psb_scene_unref(struct psb_scene **scene); +extern struct psb_scene *psb_scene_ref(struct psb_scene *src); +extern int drm_psb_scene_unref_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file_priv); + +static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool) +{ + return pool->base.hash.key; +} + +extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, + uint32_t pages); +extern struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src); +extern void psb_ta_mem_unref(struct psb_ta_mem **ta_mem); + +#endif diff --git a/drivers/gpu/drm/psb/psb_schedule.c b/drivers/gpu/drm/psb/psb_schedule.c new file mode 100644 index 0000000..9c4e2cd --- /dev/null +++ b/drivers/gpu/drm/psb/psb_schedule.c @@ -0,0 +1,1593 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include +#include "psb_drm.h" +#include "psb_drv.h" +#include "psb_reg.h" +#include "psb_scene.h" +#include "ttm/ttm_execbuf_util.h" + +#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20) +#define PSB_ALLOWED_TA_RUNTIME (DRM_HZ * 20) +#define PSB_RASTER_TIMEOUT (DRM_HZ / 10) +#define PSB_TA_TIMEOUT (DRM_HZ / 10) + +#undef PSB_SOFTWARE_WORKAHEAD + +#ifdef PSB_STABLE_SETTING + +/* + * Software blocks completely while the engines are working so there can be no + * overlap. + */ + +#define PSB_WAIT_FOR_RASTER_COMPLETION +#define PSB_WAIT_FOR_TA_COMPLETION + +#elif defined(PSB_PARANOID_SETTING) +/* + * Software blocks "almost" while the engines are working so there can be no + * overlap. + */ + +#define PSB_WAIT_FOR_RASTER_COMPLETION +#define PSB_WAIT_FOR_TA_COMPLETION +#define PSB_BE_PARANOID + +#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP) +/* + * Software leaps ahead while the rasterizer is running and prepares + * a new ta job that can be scheduled before the rasterizer has + * finished. + */ + +#define PSB_WAIT_FOR_TA_COMPLETION + +#elif defined(PSB_SOFTWARE_WORKAHEAD) +/* + * Don't sync, but allow software to work ahead. and queue a number of jobs. + * But block overlapping in the scheduler. + */ + +#define PSB_BLOCK_OVERLAP +#define ONLY_ONE_JOB_IN_RASTER_QUEUE + +#endif + +/* + * Avoid pixelbe pagefaults on C0. + */ +#if 0 +#define PSB_BLOCK_OVERLAP +#endif + +static void psb_dispatch_ta(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler, + uint32_t reply_flag); +static void psb_dispatch_raster(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler, + uint32_t reply_flag); + +#ifdef FIX_TG_16 + +void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv); +static int psb_check_2d_idle(struct drm_psb_private *dev_priv); + +#endif + +void psb_scheduler_lockup(struct drm_psb_private *dev_priv, + int *lockup, int *idle) +{ + unsigned long irq_flags; + struct psb_scheduler *scheduler = &dev_priv->scheduler; + + *lockup = 0; + *idle = 1; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + + if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL && + time_after_eq(jiffies, scheduler->ta_end_jiffies)) { + *lockup = 1; + } + if (!*lockup + && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) + && time_after_eq(jiffies, scheduler->raster_end_jiffies)) { + *lockup = 1; + } + if (!*lockup) + *idle = scheduler->idle; + + spin_unlock_irqrestore(&scheduler->lock, irq_flags); +} + +static inline void psb_set_idle(struct psb_scheduler *scheduler) +{ + scheduler->idle = + (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) && + (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL); + if (scheduler->idle) + wake_up(&scheduler->idle_queue); +} + +/* + * Call with the scheduler spinlock held. + * Assigns a scene context to either the ta or the rasterizer, + * flushing out other scenes to memory if necessary. + */ + +static int psb_set_scene_fire(struct psb_scheduler *scheduler, + struct psb_scene *scene, + int engine, struct psb_task *task) +{ + uint32_t flags = 0; + struct psb_hw_scene *hw_scene; + struct drm_device *dev = scene->dev; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + + hw_scene = scene->hw_scene; + if (hw_scene && hw_scene->last_scene == scene) { + + /* + * Reuse the last hw scene context and delete it from the + * free list. + */ + + PSB_DEBUG_RENDER("Reusing hw scene %d.\n", + hw_scene->context_number); + if (scene->flags & PSB_SCENE_FLAG_DIRTY) { + + /* + * No hw context initialization to be done. + */ + + flags |= PSB_SCENE_FLAG_SETUP_ONLY; + } + + list_del_init(&hw_scene->head); + + } else { + struct list_head *list; + hw_scene = NULL; + + /* + * Grab a new hw scene context. + */ + + list_for_each(list, &scheduler->hw_scenes) { + hw_scene = + list_entry(list, struct psb_hw_scene, head); + break; + } + BUG_ON(!hw_scene); + PSB_DEBUG_RENDER("New hw scene %d.\n", + hw_scene->context_number); + + list_del_init(list); + } + scene->hw_scene = hw_scene; + hw_scene->last_scene = scene; + + flags |= PSB_SCENE_FLAG_SETUP; + + /* + * Switch context and setup the engine. + */ + + return psb_xhw_scene_bind_fire(dev_priv, + &task->buf, + task->flags, + hw_scene->context_number, + scene->hw_cookie, + task->oom_cmds, + task->oom_cmd_size, + scene->hw_data->offset, + engine, flags | scene->flags); +} + +static inline void psb_report_fence(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler, + uint32_t class, + uint32_t sequence, + uint32_t type, int call_handler) +{ + struct psb_scheduler_seq *seq = &scheduler->seq[type]; + struct ttm_fence_device *fdev = &dev_priv->fdev; + struct ttm_fence_class_manager *fc = &fdev->fence_class[PSB_ENGINE_TA]; + unsigned long irq_flags; + + /** + * Block racing poll_ta calls, that take the lock in write mode. + */ + + read_lock_irqsave(&fc->lock, irq_flags); + seq->sequence = sequence; + seq->reported = 0; + read_unlock_irqrestore(&fc->lock, irq_flags); + + if (call_handler) + psb_fence_handler(scheduler->dev, class); +} + +static void psb_schedule_raster(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler); + +static void psb_schedule_ta(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + struct psb_task *task = NULL; + struct list_head *list, *next; + int pushed_raster_task = 0; + + PSB_DEBUG_RENDER("schedule ta\n"); + + if (scheduler->idle_count != 0) + return; + + if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) + return; + + if (scheduler->ta_state) + return; + + /* + * Skip the ta stage for rasterization-only + * tasks. They arrive here to make sure we're rasterizing + * tasks in the correct order. + */ + + list_for_each_safe(list, next, &scheduler->ta_queue) { + task = list_entry(list, struct psb_task, head); + if (task->task_type != psb_raster_task && task->task_type != psb_flip_task) + break; + + if (task->task_type == psb_flip_task) { + list_del_init(list); + list_add_tail(list, &scheduler->raster_queue); + task = NULL; + } + else { + list_del_init(list); + list_add_tail(list, &scheduler->raster_queue); + psb_report_fence(dev_priv, scheduler, task->engine, + task->sequence, + _PSB_FENCE_TA_DONE_SHIFT, 1); + task = NULL; + pushed_raster_task = 1; + } + } + + if (pushed_raster_task) + psb_schedule_raster(dev_priv, scheduler); + + if (!task) + return; + + /* + * Still waiting for a vistest? + */ + + if (scheduler->feedback_task == task) + return; + +#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE + + /* + * Block ta from trying to use both hardware contexts + * without the rasterizer starting to render from one of them. + */ + + if (!list_empty(&scheduler->raster_queue)) + return; + +#endif + +#ifdef PSB_BLOCK_OVERLAP + /* + * Make sure rasterizer isn't doing anything. + */ + if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) + return; +#endif + if (list_empty(&scheduler->hw_scenes)) + return; + +#ifdef FIX_TG_16 + if (psb_check_2d_idle(dev_priv)) + return; +#endif + + list_del_init(&task->head); + if (task->flags & PSB_FIRE_FLAG_XHW_OOM) + scheduler->ta_state = 1; + + scheduler->current_task[PSB_SCENE_ENGINE_TA] = task; + scheduler->idle = 0; + scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT; + scheduler->total_ta_jiffies = 0; + + task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ? + 0x00000000 : PSB_RF_FIRE_TA; + + (void) psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size); + psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA, + task); + psb_schedule_watchdog(dev_priv); +} + +static int psb_fire_raster(struct psb_scheduler *scheduler, + struct psb_task *task) +{ + struct drm_device *dev = scheduler->dev; + struct drm_psb_private *dev_priv = (struct drm_psb_private *) + dev->dev_private; + + PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence); + + return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags); +} + +/* + * Take the first rasterization task from the hp raster queue or from the + * raster queue and fire the rasterizer. + */ + +static void psb_schedule_raster(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + struct psb_task *task; + struct list_head *list; + int pipe; + + if (scheduler->idle_count != 0) + return; + + if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) { + PSB_DEBUG_RENDER("Raster busy.\n"); + return; + } +#ifdef PSB_BLOCK_OVERLAP + if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) { + PSB_DEBUG_RENDER("TA busy.\n"); + return; + } +#endif + + if (!list_empty(&scheduler->hp_raster_queue)) + list = scheduler->hp_raster_queue.next; + else if (!list_empty(&scheduler->raster_queue)) + list = scheduler->raster_queue.next; + else { + PSB_DEBUG_RENDER("Nothing in list\n"); + return; + } + + task = list_entry(list, struct psb_task, head); + + if (task->task_type == psb_flip_task) { + for (pipe=0; pipe<2; pipe++) { + if (dev_priv->pipe_active[pipe] == 1) + psb_flip_set_base(dev_priv, pipe); + } + list_del_init(list); + task = NULL; + psb_schedule_raster(dev_priv, scheduler); + return; + } + + /* + * Sometimes changing ZLS format requires an ISP reset. + * Doesn't seem to consume too much time. + */ + + if (task->scene) + PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET); + + scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task; + + list_del_init(list); + scheduler->idle = 0; + scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT; + scheduler->total_raster_jiffies = 0; + + if (task->scene) + PSB_WSGX32(0, PSB_CR_SOFT_RESET); + + (void) psb_reg_submit(dev_priv, task->raster_cmds, + task->raster_cmd_size); + + if (task->scene) { + task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ? + 0x00000000 : PSB_RF_FIRE_RASTER; + psb_set_scene_fire(scheduler, + task->scene, PSB_SCENE_ENGINE_RASTER, + task); + } else { + task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER; + psb_fire_raster(scheduler, task); + } + psb_schedule_watchdog(dev_priv); +} + +int psb_extend_timeout(struct drm_psb_private *dev_priv, + uint32_t xhw_lockup) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + int ret = -EBUSY; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + + if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL && + time_after_eq(jiffies, scheduler->ta_end_jiffies)) { + if (xhw_lockup & PSB_LOCKUP_TA) { + goto out_unlock; + } else { + scheduler->total_ta_jiffies += + jiffies - scheduler->ta_end_jiffies + + PSB_TA_TIMEOUT; + if (scheduler->total_ta_jiffies > + PSB_ALLOWED_TA_RUNTIME) + goto out_unlock; + scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT; + } + } + if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL && + time_after_eq(jiffies, scheduler->raster_end_jiffies)) { + if (xhw_lockup & PSB_LOCKUP_RASTER) { + goto out_unlock; + } else { + scheduler->total_raster_jiffies += + jiffies - scheduler->raster_end_jiffies + + PSB_RASTER_TIMEOUT; + if (scheduler->total_raster_jiffies > + PSB_ALLOWED_RASTER_RUNTIME) + goto out_unlock; + scheduler->raster_end_jiffies = + jiffies + PSB_RASTER_TIMEOUT; + } + } + + ret = 0; + +out_unlock: + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + return ret; +} + +/* + * TA done handler. + */ + +static void psb_ta_done(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + struct psb_task *task = + scheduler->current_task[PSB_SCENE_ENGINE_TA]; + struct psb_scene *scene = task->scene; + + PSB_DEBUG_RENDER("TA done %u\n", task->sequence); + + switch (task->ta_complete_action) { + case PSB_RASTER_BLOCK: + scheduler->ta_state = 1; + scene->flags |= + (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE); + list_add_tail(&task->head, &scheduler->raster_queue); + break; + case PSB_RASTER: + scene->flags |= + (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE); + list_add_tail(&task->head, &scheduler->raster_queue); + break; + case PSB_RETURN: + scheduler->ta_state = 0; + scene->flags |= PSB_SCENE_FLAG_DIRTY; + list_add_tail(&scene->hw_scene->head, + &scheduler->hw_scenes); + + break; + } + + scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL; + +#ifdef FIX_TG_16 + psb_2d_atomic_unlock(dev_priv); +#endif + + if (task->ta_complete_action != PSB_RASTER_BLOCK) + psb_report_fence(dev_priv, scheduler, task->engine, + task->sequence, + _PSB_FENCE_TA_DONE_SHIFT, 1); + + psb_schedule_raster(dev_priv, scheduler); + psb_schedule_ta(dev_priv, scheduler); + psb_set_idle(scheduler); + + if (task->ta_complete_action != PSB_RETURN) + return; + + list_add_tail(&task->head, &scheduler->task_done_queue); + schedule_delayed_work(&scheduler->wq, 0); +} + +/* + * Rasterizer done handler. + */ + +static void psb_raster_done(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + struct psb_task *task = + scheduler->current_task[PSB_SCENE_ENGINE_RASTER]; + struct psb_scene *scene = task->scene; + uint32_t complete_action = task->raster_complete_action; + + PSB_DEBUG_RENDER("Raster done %u\n", task->sequence); + + scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL; + + if (complete_action != PSB_RASTER) + psb_schedule_raster(dev_priv, scheduler); + + if (scene) { + if (task->feedback.page) { + if (unlikely(scheduler->feedback_task)) { + /* + * This should never happen, since the previous + * feedback query will return before the next + * raster task is fired. + */ + DRM_ERROR("Feedback task busy.\n"); + } + scheduler->feedback_task = task; + psb_xhw_vistest(dev_priv, &task->buf); + } + switch (complete_action) { + case PSB_RETURN: + scene->flags &= + ~(PSB_SCENE_FLAG_DIRTY | + PSB_SCENE_FLAG_COMPLETE); + list_add_tail(&scene->hw_scene->head, + &scheduler->hw_scenes); + psb_report_fence(dev_priv, scheduler, task->engine, + task->sequence, + _PSB_FENCE_SCENE_DONE_SHIFT, 1); + if (task->flags & PSB_FIRE_FLAG_XHW_OOM) + scheduler->ta_state = 0; + + break; + case PSB_RASTER: + list_add(&task->head, &scheduler->raster_queue); + task->raster_complete_action = PSB_RETURN; + psb_schedule_raster(dev_priv, scheduler); + break; + case PSB_TA: + list_add(&task->head, &scheduler->ta_queue); + scheduler->ta_state = 0; + task->raster_complete_action = PSB_RETURN; + task->ta_complete_action = PSB_RASTER; + break; + + } + } + psb_schedule_ta(dev_priv, scheduler); + psb_set_idle(scheduler); + + if (complete_action == PSB_RETURN) { + if (task->scene == NULL) { + psb_report_fence(dev_priv, scheduler, task->engine, + task->sequence, + _PSB_FENCE_RASTER_DONE_SHIFT, 1); + } + if (!task->feedback.page) { + list_add_tail(&task->head, + &scheduler->task_done_queue); + schedule_delayed_work(&scheduler->wq, 0); + } + } +} + +void psb_scheduler_pause(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + scheduler->idle_count++; + spin_unlock_irqrestore(&scheduler->lock, irq_flags); +} + +void psb_scheduler_restart(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + if (--scheduler->idle_count == 0) { + psb_schedule_ta(dev_priv, scheduler); + psb_schedule_raster(dev_priv, scheduler); + } + spin_unlock_irqrestore(&scheduler->lock, irq_flags); +} + +int psb_scheduler_idle(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + int ret; + spin_lock_irqsave(&scheduler->lock, irq_flags); + ret = scheduler->idle_count != 0 && scheduler->idle; + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + return ret; +} + +int psb_scheduler_finished(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + int ret; + spin_lock_irqsave(&scheduler->lock, irq_flags); + ret = (scheduler->idle && + list_empty(&scheduler->raster_queue) && + list_empty(&scheduler->ta_queue) && + list_empty(&scheduler->hp_raster_queue)); + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + return ret; +} + +static void psb_ta_oom(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + + struct psb_task *task = + scheduler->current_task[PSB_SCENE_ENGINE_TA]; + if (!task) + return; + + if (task->aborting) + return; + task->aborting = 1; + + DRM_INFO("Info: TA out of parameter memory.\n"); + + (void) psb_xhw_ta_oom(dev_priv, &task->buf, + task->scene->hw_cookie); +} + +static void psb_ta_oom_reply(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + + struct psb_task *task = + scheduler->current_task[PSB_SCENE_ENGINE_TA]; + uint32_t flags; + if (!task) + return; + + psb_xhw_ta_oom_reply(dev_priv, &task->buf, + task->scene->hw_cookie, + &task->ta_complete_action, + &task->raster_complete_action, &flags); + task->flags |= flags; + task->aborting = 0; + psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY); +} + +static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + DRM_ERROR("TA hw scene freed.\n"); +} + +static void psb_vistest_reply(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + struct psb_task *task = scheduler->feedback_task; + uint8_t *feedback_map; + uint32_t add; + uint32_t cur; + struct drm_psb_vistest *vistest; + int i; + + scheduler->feedback_task = NULL; + if (!task) { + DRM_ERROR("No Poulsbo feedback task.\n"); + return; + } + if (!task->feedback.page) { + DRM_ERROR("No Poulsbo feedback page.\n"); + goto out; + } + + if (in_irq()) + feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0); + else + feedback_map = kmap_atomic(task->feedback.page, KM_USER0); + + /* + * Loop over all requested vistest components here. + * Only one (vistest) currently. + */ + + vistest = (struct drm_psb_vistest *) + (feedback_map + task->feedback.offset); + + for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) { + add = task->buf.arg.arg.feedback[i]; + cur = vistest->vt[i]; + + /* + * Vistest saturates. + */ + + vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add; + } + if (in_irq()) + kunmap_atomic(feedback_map, KM_IRQ0); + else + kunmap_atomic(feedback_map, KM_USER0); +out: + psb_report_fence(dev_priv, scheduler, task->engine, task->sequence, + _PSB_FENCE_FEEDBACK_SHIFT, 1); + + if (list_empty(&task->head)) { + list_add_tail(&task->head, &scheduler->task_done_queue); + schedule_delayed_work(&scheduler->wq, 0); + } else + psb_schedule_ta(dev_priv, scheduler); +} + +static void psb_ta_fire_reply(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + struct psb_task *task = + scheduler->current_task[PSB_SCENE_ENGINE_TA]; + + psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie); + + psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA); +} + +static void psb_raster_fire_reply(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + struct psb_task *task = + scheduler->current_task[PSB_SCENE_ENGINE_RASTER]; + uint32_t reply_flags; + + if (!task) { + DRM_ERROR("Null task.\n"); + return; + } + + task->raster_complete_action = task->buf.arg.arg.sb.rca; + psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie); + + reply_flags = PSB_RF_FIRE_RASTER; + if (task->raster_complete_action == PSB_RASTER) + reply_flags |= PSB_RF_DEALLOC; + + psb_dispatch_raster(dev_priv, scheduler, reply_flags); +} + +static int psb_user_interrupt(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler) +{ + uint32_t type; + int ret; + unsigned long irq_flags; + + /* + * Xhw cannot write directly to the comm page, so + * do it here. Firmware would have written directly. + */ + + ret = psb_xhw_handler(dev_priv); + if (unlikely(ret)) + return ret; + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + type = dev_priv->comm[PSB_COMM_USER_IRQ]; + dev_priv->comm[PSB_COMM_USER_IRQ] = 0; + if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) { + dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0; + DRM_ERROR("Lost Poulsbo hardware event.\n"); + } + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + + if (type == 0) + return 0; + + switch (type) { + case PSB_UIRQ_VISTEST: + psb_vistest_reply(dev_priv, scheduler); + break; + case PSB_UIRQ_OOM_REPLY: + psb_ta_oom_reply(dev_priv, scheduler); + break; + case PSB_UIRQ_FIRE_TA_REPLY: + psb_ta_fire_reply(dev_priv, scheduler); + break; + case PSB_UIRQ_FIRE_RASTER_REPLY: + psb_raster_fire_reply(dev_priv, scheduler); + break; + default: + DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type); + } + return 0; +} + +int psb_forced_user_interrupt(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + int ret; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + ret = psb_user_interrupt(dev_priv, scheduler); + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + return ret; +} + +static void psb_dispatch_ta(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler, + uint32_t reply_flag) +{ + struct psb_task *task = + scheduler->current_task[PSB_SCENE_ENGINE_TA]; + uint32_t flags; + uint32_t mask; + + if (unlikely(!task)) + return; + + task->reply_flags |= reply_flag; + flags = task->reply_flags; + mask = PSB_RF_FIRE_TA; + + if (!(flags & mask)) + return; + + mask = PSB_RF_TA_DONE; + if ((flags & mask) == mask) { + task->reply_flags &= ~mask; + psb_ta_done(dev_priv, scheduler); + } + + mask = PSB_RF_OOM; + if ((flags & mask) == mask) { + task->reply_flags &= ~mask; + psb_ta_oom(dev_priv, scheduler); + } + + mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE); + if ((flags & mask) == mask) { + task->reply_flags &= ~mask; + psb_ta_done(dev_priv, scheduler); + } +} + +static void psb_dispatch_raster(struct drm_psb_private *dev_priv, + struct psb_scheduler *scheduler, + uint32_t reply_flag) +{ + struct psb_task *task = + scheduler->current_task[PSB_SCENE_ENGINE_RASTER]; + uint32_t flags; + uint32_t mask; + + if (unlikely(!task)) + return; + + task->reply_flags |= reply_flag; + flags = task->reply_flags; + mask = PSB_RF_FIRE_RASTER; + + if (!(flags & mask)) + return; + + /* + * For rasterizer-only tasks, don't report fence done here, + * as this is time consuming and the rasterizer wants a new + * task immediately. For other tasks, the hardware is probably + * still busy deallocating TA memory, so we can report + * fence done in parallel. + */ + + if (task->raster_complete_action == PSB_RETURN && + (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) { + psb_report_fence(dev_priv, scheduler, task->engine, + task->sequence, + _PSB_FENCE_RASTER_DONE_SHIFT, 1); + } + + mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC; + if ((flags & mask) == mask) { + task->reply_flags &= ~mask; + psb_raster_done(dev_priv, scheduler); + } +} + +void psb_scheduler_handler(struct drm_psb_private *dev_priv, + uint32_t status) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + + spin_lock(&scheduler->lock); + + if (status & _PSB_CE_PIXELBE_END_RENDER) { + psb_dispatch_raster(dev_priv, scheduler, + PSB_RF_RASTER_DONE); + } + if (status & _PSB_CE_DPM_3D_MEM_FREE) + psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC); + + if (status & _PSB_CE_TA_FINISHED) + psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE); + + if (status & _PSB_CE_TA_TERMINATE) + psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE); + + if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH | + _PSB_CE_DPM_OUT_OF_MEMORY_GBL | + _PSB_CE_DPM_OUT_OF_MEMORY_MT)) { + psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM); + } + if (status & _PSB_CE_DPM_TA_MEM_FREE) + psb_ta_hw_scene_freed(dev_priv, scheduler); + + if (status & _PSB_CE_SW_EVENT) + psb_user_interrupt(dev_priv, scheduler); + + spin_unlock(&scheduler->lock); +} + +static void psb_free_task_wq(struct work_struct *work) +{ + struct psb_scheduler *scheduler = + container_of(work, struct psb_scheduler, wq.work); + + struct list_head *list, *next; + unsigned long irq_flags; + struct psb_task *task; + + if (!mutex_trylock(&scheduler->task_wq_mutex)) + return; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + list_for_each_safe(list, next, &scheduler->task_done_queue) { + task = list_entry(list, struct psb_task, head); + list_del_init(list); + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + + PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, " + "Feedback bo 0x%08lx, done %d\n", + task->sequence, + (unsigned long) task->scene, + (unsigned long) task->feedback.bo, + atomic_read(&task->buf.done)); + + if (task->scene) { + PSB_DEBUG_RENDER("Unref scene %d\n", + task->sequence); + psb_scene_unref(&task->scene); + if (task->feedback.bo) { + PSB_DEBUG_RENDER("Unref feedback bo %d\n", + task->sequence); + ttm_bo_unref(&task->feedback.bo); + } + } + + if (atomic_read(&task->buf.done)) { + PSB_DEBUG_RENDER("Deleting task %d\n", + task->sequence); + kfree(task); + task = NULL; + } + spin_lock_irqsave(&scheduler->lock, irq_flags); + if (task != NULL) + list_add(list, &scheduler->task_done_queue); + } + if (!list_empty(&scheduler->task_done_queue)) { + PSB_DEBUG_RENDER("Rescheduling wq\n"); + schedule_delayed_work(&scheduler->wq, 1); + } + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + if (list_empty(&scheduler->task_done_queue) && + drm_psb_ospm && IS_MRST(scheduler->dev)) { + psb_try_power_down_sgx(scheduler->dev); + } + mutex_unlock(&scheduler->task_wq_mutex); +} + +static void psb_powerdown_topaz(struct work_struct *work) +{ + struct psb_scheduler *scheduler = + container_of(work, struct psb_scheduler, topaz_suspend_wq.work); + + if (!mutex_trylock(&scheduler->topaz_power_mutex)) + return; + + psb_try_power_down_topaz(scheduler->dev); + mutex_unlock(&scheduler->topaz_power_mutex); +} + +static void psb_powerdown_msvdx(struct work_struct *work) +{ + struct psb_scheduler *scheduler = + container_of(work, struct psb_scheduler, msvdx_suspend_wq.work); + + if (!mutex_trylock(&scheduler->msvdx_power_mutex)) + return; + + psb_try_power_down_msvdx(scheduler->dev); + mutex_unlock(&scheduler->msvdx_power_mutex); +} + +/* + * Check if any of the tasks in the queues is using a scene. + * In that case we know the TA memory buffer objects are + * fenced and will not be evicted until that fence is signaled. + */ + +void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + struct psb_task *task; + struct psb_task *next_task; + + dev_priv->force_ta_mem_load = 1; + spin_lock_irqsave(&scheduler->lock, irq_flags); + list_for_each_entry_safe(task, next_task, &scheduler->ta_queue, + head) { + if (task->scene) { + dev_priv->force_ta_mem_load = 0; + break; + } + } + list_for_each_entry_safe(task, next_task, &scheduler->raster_queue, + head) { + if (task->scene) { + dev_priv->force_ta_mem_load = 0; + break; + } + } + spin_unlock_irqrestore(&scheduler->lock, irq_flags); +} + +void psb_scheduler_reset(struct drm_psb_private *dev_priv, + int error_condition) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long wait_jiffies; + unsigned long cur_jiffies; + struct psb_task *task; + struct psb_task *next_task; + unsigned long irq_flags; + + psb_scheduler_pause(dev_priv); + if (!psb_scheduler_idle(dev_priv)) { + spin_lock_irqsave(&scheduler->lock, irq_flags); + + cur_jiffies = jiffies; + wait_jiffies = cur_jiffies; + if (scheduler->current_task[PSB_SCENE_ENGINE_TA] && + time_after_eq(scheduler->ta_end_jiffies, wait_jiffies)) + wait_jiffies = scheduler->ta_end_jiffies; + if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] && + time_after_eq(scheduler->raster_end_jiffies, + wait_jiffies)) + wait_jiffies = scheduler->raster_end_jiffies; + + wait_jiffies -= cur_jiffies; + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + + (void) wait_event_timeout(scheduler->idle_queue, + psb_scheduler_idle(dev_priv), + wait_jiffies); + } + + if (!psb_scheduler_idle(dev_priv)) { + spin_lock_irqsave(&scheduler->lock, irq_flags); + task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER]; + if (task) { + DRM_ERROR("Detected Poulsbo rasterizer lockup.\n"); + if (task->engine == PSB_ENGINE_HPRAST) { + psb_fence_error(scheduler->dev, + PSB_ENGINE_HPRAST, + task->sequence, + _PSB_FENCE_TYPE_RASTER_DONE, + error_condition); + + list_del(&task->head); + psb_xhw_clean_buf(dev_priv, &task->buf); + list_add_tail(&task->head, + &scheduler->task_done_queue); + } else { + list_add(&task->head, + &scheduler->raster_queue); + } + } + scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL; + task = scheduler->current_task[PSB_SCENE_ENGINE_TA]; + if (task) { + DRM_ERROR("Detected Poulsbo ta lockup.\n"); + list_add_tail(&task->head, + &scheduler->raster_queue); +#ifdef FIX_TG_16 + psb_2d_atomic_unlock(dev_priv); +#endif + } + scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL; + scheduler->ta_state = 0; + +#ifdef FIX_TG_16 + atomic_set(&dev_priv->ta_wait_2d, 0); + atomic_set(&dev_priv->ta_wait_2d_irq, 0); + wake_up(&dev_priv->queue_2d); +#endif + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + } + + /* + * Empty raster queue. + */ + + spin_lock_irqsave(&scheduler->lock, irq_flags); + list_for_each_entry_safe(task, next_task, &scheduler->raster_queue, + head) { + struct psb_scene *scene = task->scene; + + DRM_INFO("Signaling fence sequence %u\n", + task->sequence); + + psb_fence_error(scheduler->dev, + task->engine, + task->sequence, + _PSB_FENCE_TYPE_TA_DONE | + _PSB_FENCE_TYPE_RASTER_DONE | + _PSB_FENCE_TYPE_SCENE_DONE | + _PSB_FENCE_TYPE_FEEDBACK, error_condition); + if (scene) { + scene->flags = 0; + if (scene->hw_scene) { + list_add_tail(&scene->hw_scene->head, + &scheduler->hw_scenes); + scene->hw_scene = NULL; + } + } + + psb_xhw_clean_buf(dev_priv, &task->buf); + list_del(&task->head); + list_add_tail(&task->head, &scheduler->task_done_queue); + } + + schedule_delayed_work(&scheduler->wq, 1); + scheduler->idle = 1; + wake_up(&scheduler->idle_queue); + + spin_unlock_irqrestore(&scheduler->lock, irq_flags); + psb_scheduler_restart(dev_priv); + +} + +int psb_scheduler_init(struct drm_device *dev, + struct psb_scheduler *scheduler) +{ + struct psb_hw_scene *hw_scene; + int i; + + memset(scheduler, 0, sizeof(*scheduler)); + scheduler->dev = dev; + mutex_init(&scheduler->task_wq_mutex); + mutex_init(&scheduler->topaz_power_mutex); + mutex_init(&scheduler->msvdx_power_mutex); + spin_lock_init(&scheduler->lock); + scheduler->idle = 1; + + INIT_LIST_HEAD(&scheduler->ta_queue); + INIT_LIST_HEAD(&scheduler->raster_queue); + INIT_LIST_HEAD(&scheduler->hp_raster_queue); + INIT_LIST_HEAD(&scheduler->hw_scenes); + INIT_LIST_HEAD(&scheduler->task_done_queue); + INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq); + INIT_DELAYED_WORK(&scheduler->topaz_suspend_wq, + &psb_powerdown_topaz); + INIT_DELAYED_WORK(&scheduler->msvdx_suspend_wq, + &psb_powerdown_msvdx); + init_waitqueue_head(&scheduler->idle_queue); + + for (i = 0; i < PSB_NUM_HW_SCENES; ++i) { + hw_scene = &scheduler->hs[i]; + hw_scene->context_number = i; + list_add_tail(&hw_scene->head, &scheduler->hw_scenes); + } + + for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) + scheduler->seq[i].reported = 0; + return 0; +} + +/* + * Scene references maintained by the scheduler are not refcounted. + * Remove all references to a particular scene here. + */ + +void psb_scheduler_remove_scene_refs(struct psb_scene *scene) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) scene->dev->dev_private; + struct psb_scheduler *scheduler = &dev_priv->scheduler; + struct psb_hw_scene *hw_scene; + unsigned long irq_flags; + unsigned int i; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + for (i = 0; i < PSB_NUM_HW_SCENES; ++i) { + hw_scene = &scheduler->hs[i]; + if (hw_scene->last_scene == scene) { + BUG_ON(list_empty(&hw_scene->head)); + hw_scene->last_scene = NULL; + } + } + spin_unlock_irqrestore(&scheduler->lock, irq_flags); +} + +void psb_scheduler_takedown(struct psb_scheduler *scheduler) +{ + flush_scheduled_work(); +} + +static int psb_setup_task(struct drm_device *dev, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *raster_cmd_buffer, + struct ttm_buffer_object *ta_cmd_buffer, + struct ttm_buffer_object *oom_cmd_buffer, + struct psb_scene *scene, + enum psb_task_type task_type, + uint32_t engine, + uint32_t flags, struct psb_task **task_p) +{ + struct psb_task *task; + int ret; + + if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) { + DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size); + return -EINVAL; + } + if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) { + DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size); + return -EINVAL; + } + if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) { + DRM_ERROR("Too many oom cmds %d.\n", arg->oom_size); + return -EINVAL; + } + + task = kzalloc(sizeof(*task), GFP_KERNEL); + if (!task) + return -ENOMEM; + + atomic_set(&task->buf.done, 1); + task->engine = engine; + INIT_LIST_HEAD(&task->head); + INIT_LIST_HEAD(&task->buf.head); + if (ta_cmd_buffer && arg->ta_size != 0) { + task->ta_cmd_size = arg->ta_size; + ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer, + arg->ta_offset, + arg->ta_size, + PSB_ENGINE_TA, task->ta_cmds); + if (ret) + goto out_err; + } + if (raster_cmd_buffer) { + task->raster_cmd_size = arg->cmdbuf_size; + ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer, + arg->cmdbuf_offset, + arg->cmdbuf_size, + PSB_ENGINE_TA, + task->raster_cmds); + if (ret) + goto out_err; + } + if (oom_cmd_buffer && arg->oom_size != 0) { + task->oom_cmd_size = arg->oom_size; + ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer, + arg->oom_offset, + arg->oom_size, + PSB_ENGINE_TA, + task->oom_cmds); + if (ret) + goto out_err; + } + task->task_type = task_type; + task->flags = flags; + if (scene) + task->scene = psb_scene_ref(scene); + + *task_p = task; + return 0; +out_err: + kfree(task); + *task_p = NULL; + return ret; +} + +int psb_cmdbuf_ta(struct drm_file *priv, + struct psb_context *context, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct ttm_buffer_object *ta_buffer, + struct ttm_buffer_object *oom_buffer, + struct psb_scene *scene, + struct psb_feedback_info *feedback, + struct psb_ttm_fence_rep *fence_arg) +{ + struct drm_device *dev = priv->minor->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct ttm_fence_object *fence = NULL; + struct psb_task *task = NULL; + int ret; + struct psb_scheduler *scheduler = &dev_priv->scheduler; + uint32_t sequence; + + PSB_DEBUG_RENDER("Cmdbuf ta\n"); + + ret = psb_setup_task(dev, arg, cmd_buffer, ta_buffer, + oom_buffer, scene, + psb_ta_task, PSB_ENGINE_TA, + PSB_FIRE_FLAG_RASTER_DEALLOC, &task); + + if (ret) + goto out_err; + + task->feedback = *feedback; + mutex_lock(&dev_priv->reset_mutex); + + /* + * Hand the task over to the scheduler. + */ + + task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA); + + task->ta_complete_action = PSB_RASTER; + task->raster_complete_action = PSB_RETURN; + sequence = task->sequence; + + spin_lock_irq(&scheduler->lock); + + list_add_tail(&task->head, &scheduler->ta_queue); + PSB_DEBUG_RENDER("queued ta %u\n", task->sequence); + + psb_schedule_ta(dev_priv, scheduler); + + /** + * From this point we may no longer dereference task, + * as the object it points to may be freed by another thread. + */ + + task = NULL; + spin_unlock_irq(&scheduler->lock); + mutex_unlock(&dev_priv->reset_mutex); + + psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types, + arg->fence_flags, + &context->validate_list, fence_arg, &fence); + ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence); + + if (fence) { + spin_lock_irq(&scheduler->lock); + psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, + sequence, _PSB_FENCE_EXE_SHIFT, 1); + spin_unlock_irq(&scheduler->lock); + fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE; + } + +out_err: + if (ret && ret != -ERESTART) + DRM_ERROR("TA task queue job failed.\n"); + + if (fence) { +#ifdef PSB_WAIT_FOR_TA_COMPLETION + ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE | + _PSB_FENCE_TYPE_TA_DONE); +#ifdef PSB_BE_PARANOID + ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE | + _PSB_FENCE_TYPE_SCENE_DONE); +#endif +#endif + ttm_fence_object_unref(&fence); + } + return ret; +} + +int psb_cmdbuf_raster(struct drm_file *priv, + struct psb_context *context, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct psb_ttm_fence_rep *fence_arg) +{ + struct drm_device *dev = priv->minor->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct ttm_fence_object *fence = NULL; + struct psb_task *task = NULL; + int ret; + uint32_t sequence; + struct psb_scheduler *scheduler = &dev_priv->scheduler; + + PSB_DEBUG_RENDER("Cmdbuf Raster\n"); + + ret = psb_setup_task(dev, arg, cmd_buffer, NULL, NULL, + NULL, psb_raster_task, + PSB_ENGINE_TA, 0, &task); + + if (ret) + goto out_err; + + /* + * Hand the task over to the scheduler. + */ + + mutex_lock(&dev_priv->reset_mutex); + task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA); + task->ta_complete_action = PSB_RASTER; + task->raster_complete_action = PSB_RETURN; + sequence = task->sequence; + + spin_lock_irq(&scheduler->lock); + list_add_tail(&task->head, &scheduler->ta_queue); + PSB_DEBUG_RENDER("queued raster %u\n", task->sequence); + psb_schedule_ta(dev_priv, scheduler); + + /** + * From this point we may no longer dereference task, + * as the object it points to may be freed by another thread. + */ + + task = NULL; + spin_unlock_irq(&scheduler->lock); + mutex_unlock(&dev_priv->reset_mutex); + + psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types, + arg->fence_flags, + &context->validate_list, fence_arg, &fence); + + ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence); + if (fence) { + spin_lock_irq(&scheduler->lock); + psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, sequence, + _PSB_FENCE_EXE_SHIFT, 1); + spin_unlock_irq(&scheduler->lock); + fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE; + } +out_err: + if (ret && ret != -ERESTART) + DRM_ERROR("Raster task queue job failed.\n"); + + if (fence) { +#ifdef PSB_WAIT_FOR_RASTER_COMPLETION + ttm_fence_object_wait(fence, 1, 1, fence->type); +#endif + ttm_fence_object_unref(&fence); + } + + return ret; +} + +#ifdef FIX_TG_16 + +static int psb_check_2d_idle(struct drm_psb_private *dev_priv) +{ + if (psb_2d_trylock(dev_priv)) { + if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) && + !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & + _PSB_C2B_STATUS_BUSY))) { + return 0; + } + if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0) + psb_2D_irq_on(dev_priv); + + PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT); + PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT); + (void) PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT); + + psb_2d_atomic_unlock(dev_priv); + } + + atomic_set(&dev_priv->ta_wait_2d, 1); + return -EBUSY; +} + +static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + + if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) { + psb_schedule_ta(dev_priv, scheduler); + if (atomic_read(&dev_priv->waiters_2d) != 0) + wake_up(&dev_priv->queue_2d); + } +} + +void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) { + atomic_set(&dev_priv->ta_wait_2d, 0); + psb_2D_irq_off(dev_priv); + psb_schedule_ta(dev_priv, scheduler); + if (atomic_read(&dev_priv->waiters_2d) != 0) + wake_up(&dev_priv->queue_2d); + } + spin_unlock_irqrestore(&scheduler->lock, irq_flags); +} + +/* + * 2D locking functions. Can't use a mutex since the trylock() and + * unlock() methods need to be accessible from interrupt context. + */ + +int psb_2d_trylock(struct drm_psb_private *dev_priv) +{ + return atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0; +} + +void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv) +{ + atomic_set(&dev_priv->lock_2d, 0); + if (atomic_read(&dev_priv->waiters_2d) != 0) + wake_up(&dev_priv->queue_2d); +} + +void psb_2d_unlock(struct drm_psb_private *dev_priv) +{ + struct psb_scheduler *scheduler = &dev_priv->scheduler; + unsigned long irq_flags; + + spin_lock_irqsave(&scheduler->lock, irq_flags); + psb_2d_atomic_unlock(dev_priv); + if (atomic_read(&dev_priv->ta_wait_2d) != 0) + psb_atomic_resume_ta_2d_idle(dev_priv); + spin_unlock_irqrestore(&scheduler->lock, irq_flags); +} + +void psb_2d_lock(struct drm_psb_private *dev_priv) +{ + atomic_inc(&dev_priv->waiters_2d); + wait_event(dev_priv->queue_2d, + atomic_read(&dev_priv->ta_wait_2d) == 0); + wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv)); + atomic_dec(&dev_priv->waiters_2d); +} + +#endif diff --git a/drivers/gpu/drm/psb/psb_schedule.h b/drivers/gpu/drm/psb/psb_schedule.h new file mode 100644 index 0000000..01c27b0 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_schedule.h @@ -0,0 +1,181 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _PSB_SCHEDULE_H_ +#define _PSB_SCHEDULE_H_ + +#include + +struct psb_context; + +enum psb_task_type { + psb_ta_midscene_task, + psb_ta_task, + psb_raster_task, + psb_freescene_task, + psb_flip_task +}; + +#define PSB_MAX_TA_CMDS 60 +#define PSB_MAX_RASTER_CMDS 66 +#define PSB_MAX_OOM_CMDS (DRM_PSB_NUM_RASTER_USE_REG * 2 + 6) + +struct psb_xhw_buf { + struct list_head head; + int copy_back; + atomic_t done; + struct drm_psb_xhw_arg arg; + +}; + +struct psb_feedback_info { + struct ttm_buffer_object *bo; + struct page *page; + uint32_t offset; +}; + +struct psb_task { + struct list_head head; + struct psb_scene *scene; + struct psb_feedback_info feedback; + enum psb_task_type task_type; + uint32_t engine; + uint32_t sequence; + uint32_t ta_cmds[PSB_MAX_TA_CMDS]; + uint32_t raster_cmds[PSB_MAX_RASTER_CMDS]; + uint32_t oom_cmds[PSB_MAX_OOM_CMDS]; + uint32_t ta_cmd_size; + uint32_t raster_cmd_size; + uint32_t oom_cmd_size; + uint32_t feedback_offset; + uint32_t ta_complete_action; + uint32_t raster_complete_action; + uint32_t hw_cookie; + uint32_t flags; + uint32_t reply_flags; + uint32_t aborting; + struct psb_xhw_buf buf; +}; + +struct psb_hw_scene { + struct list_head head; + uint32_t context_number; + + /* + * This pointer does not refcount the last_scene_buffer, + * so we must make sure it is set to NULL before destroying + * the corresponding task. + */ + + struct psb_scene *last_scene; +}; + +struct psb_scene; +struct drm_psb_private; + +struct psb_scheduler_seq { + uint32_t sequence; + int reported; +}; + +struct psb_scheduler { + struct drm_device *dev; + struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES]; + struct psb_hw_scene hs[PSB_NUM_HW_SCENES]; + struct mutex task_wq_mutex; + struct mutex topaz_power_mutex; + struct mutex msvdx_power_mutex; + spinlock_t lock; + struct list_head hw_scenes; + struct list_head ta_queue; + struct list_head raster_queue; + struct list_head hp_raster_queue; + struct list_head task_done_queue; + struct psb_task *current_task[PSB_SCENE_NUM_ENGINES]; + struct psb_task *feedback_task; + int ta_state; + struct psb_hw_scene *pending_hw_scene; + uint32_t pending_hw_scene_seq; + struct delayed_work wq; + struct delayed_work topaz_suspend_wq; + struct delayed_work msvdx_suspend_wq; + struct psb_scene_pool *pool; + uint32_t idle_count; + int idle; + wait_queue_head_t idle_queue; + unsigned long ta_end_jiffies; + unsigned long total_ta_jiffies; + unsigned long raster_end_jiffies; + unsigned long total_raster_jiffies; +}; + +#define PSB_RF_FIRE_TA (1 << 0) +#define PSB_RF_OOM (1 << 1) +#define PSB_RF_OOM_REPLY (1 << 2) +#define PSB_RF_TERMINATE (1 << 3) +#define PSB_RF_TA_DONE (1 << 4) +#define PSB_RF_FIRE_RASTER (1 << 5) +#define PSB_RF_RASTER_DONE (1 << 6) +#define PSB_RF_DEALLOC (1 << 7) + +extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv, + int shareable, + uint32_t w, uint32_t h); +extern uint32_t psb_scene_handle(struct psb_scene *scene); +extern int psb_scheduler_init(struct drm_device *dev, + struct psb_scheduler *scheduler); +extern void psb_scheduler_takedown(struct psb_scheduler *scheduler); +extern int psb_cmdbuf_ta(struct drm_file *priv, + struct psb_context *context, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct ttm_buffer_object *ta_buffer, + struct ttm_buffer_object *oom_buffer, + struct psb_scene *scene, + struct psb_feedback_info *feedback, + struct psb_ttm_fence_rep *fence_arg); +extern int psb_cmdbuf_raster(struct drm_file *priv, + struct psb_context *context, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct psb_ttm_fence_rep *fence_arg); +extern void psb_scheduler_handler(struct drm_psb_private *dev_priv, + uint32_t status); +extern void psb_scheduler_pause(struct drm_psb_private *dev_priv); +extern void psb_scheduler_restart(struct drm_psb_private *dev_priv); +extern int psb_scheduler_idle(struct drm_psb_private *dev_priv); +extern int psb_scheduler_finished(struct drm_psb_private *dev_priv); + +extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv, + int *lockup, int *idle); +extern void psb_scheduler_reset(struct drm_psb_private *dev_priv, + int error_condition); +extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv); +extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene); +extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv); +extern int psb_extend_timeout(struct drm_psb_private *dev_priv, + uint32_t xhw_lockup); + +#endif diff --git a/drivers/gpu/drm/psb/psb_setup.c b/drivers/gpu/drm/psb/psb_setup.c new file mode 100644 index 0000000..134ff08 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_setup.c @@ -0,0 +1,18 @@ +#include +#include +#include +#include +#include "psb_intel_drv.h" +#include "psb_drv.h" +#include "psb_intel_reg.h" + +/* Fixed name */ +#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC" +#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD" + +#include "psb_intel_i2c.c" +#include "psb_intel_sdvo.c" +#include "psb_intel_modes.c" +#include "psb_intel_lvds.c" +#include "psb_intel_dsi.c" +#include "psb_intel_display.c" diff --git a/drivers/gpu/drm/psb/psb_sgx.c b/drivers/gpu/drm/psb/psb_sgx.c new file mode 100644 index 0000000..2c1f1a4 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_sgx.c @@ -0,0 +1,1784 @@ +/************************************************************************** + * Copyright (c) 2007, Intel Corporation. + * All Rights Reserved. + * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + */ + +#include +#include "psb_drv.h" +#include "psb_drm.h" +#include "psb_reg.h" +#include "psb_scene.h" +#include "psb_msvdx.h" +#include "lnc_topaz.h" +#include "ttm/ttm_bo_api.h" +#include "ttm/ttm_execbuf_util.h" +#include "ttm/ttm_userobj_api.h" +#include "ttm/ttm_placement_common.h" +#include "psb_sgx.h" +#include "psb_intel_reg.h" +#include "psb_powermgmt.h" + + +static inline int psb_same_page(unsigned long offset, + unsigned long offset2) +{ + return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); +} + +static inline unsigned long psb_offset_end(unsigned long offset, + unsigned long end) +{ + offset = (offset + PAGE_SIZE) & PAGE_MASK; + return (end < offset) ? end : offset; +} + +static void psb_idle_engine(struct drm_device *dev, int engine); + +struct psb_dstbuf_cache { + unsigned int dst; + struct ttm_buffer_object *dst_buf; + unsigned long dst_offset; + uint32_t *dst_page; + unsigned int dst_page_offset; + struct ttm_bo_kmap_obj dst_kmap; + bool dst_is_iomem; +}; + +struct psb_validate_buffer { + struct ttm_validate_buffer base; + struct psb_validate_req req; + int ret; + struct psb_validate_arg __user *user_val_arg; + uint32_t flags; + uint32_t offset; + int po_correct; +}; + + + +#define PSB_REG_GRAN_SHIFT 2 +#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT) +#define PSB_MAX_REG 0x1000 + +static const uint32_t disallowed_ranges[][2] = { + {0x0000, 0x0200}, + {0x0208, 0x0214}, + {0x021C, 0x0224}, + {0x0230, 0x0234}, + {0x0248, 0x024C}, + {0x0254, 0x0358}, + {0x0428, 0x0428}, + {0x0430, 0x043C}, + {0x0498, 0x04B4}, + {0x04CC, 0x04D8}, + {0x04E0, 0x07FC}, + {0x0804, 0x0A14}, + {0x0A4C, 0x0A58}, + {0x0A68, 0x0A80}, + {0x0AA0, 0x0B1C}, + {0x0B2C, 0x0CAC}, + {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY} +}; + +static uint32_t psb_disallowed_regs[PSB_MAX_REG / + (PSB_REG_GRANULARITY * + (sizeof(uint32_t) << 3))]; + +static inline int psb_disallowed(uint32_t reg) +{ + reg >>= PSB_REG_GRAN_SHIFT; + return (psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0; +} + +void psb_init_disallowed(void) +{ + int i; + uint32_t reg, tmp; + static int initialized; + + if (initialized) + return; + + initialized = 1; + memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs)); + + for (i = 0; + i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t))); + ++i) { + for (reg = disallowed_ranges[i][0]; + reg <= disallowed_ranges[i][1]; reg += 4) { + tmp = reg >> 2; + psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31)); + } + } +} + +static int psb_memcpy_check(uint32_t *dst, const uint32_t *src, + uint32_t size) +{ + size >>= 3; + while (size--) { + if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) { + DRM_ERROR("Forbidden SGX register access: " + "0x%04x.\n", *src); + return -EPERM; + } + *dst++ = *src++; + *dst++ = *src++; + } + return 0; +} + +int psb_2d_wait_available(struct drm_psb_private *dev_priv, + unsigned size) +{ + uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF); + int ret = 0; + +retry: + if (avail < size) { +#if 0 + /* We'd ideally + * like to have an IRQ-driven event here. + */ + + psb_2D_irq_on(dev_priv); + DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ, + ((avail = + PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size)); + psb_2D_irq_off(dev_priv); + if (ret == 0) + return 0; + if (ret == -EINTR) { + ret = 0; + goto retry; + } +#else + avail = PSB_RSGX32(PSB_CR_2D_SOCIF); + goto retry; +#endif + } + return ret; +} + +int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf, + unsigned size) +{ + int ret = 0; + int i; + unsigned submit_size; + + while (size > 0) { + submit_size = (size < 0x60) ? size : 0x60; + size -= submit_size; + ret = psb_2d_wait_available(dev_priv, submit_size); + if (ret) + return ret; + + submit_size <<= 2; + mutex_lock(&dev_priv->reset_mutex); + for (i = 0; i < submit_size; i += 4) { + PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i); + } + (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4); + mutex_unlock(&dev_priv->reset_mutex); + } + return 0; +} + +int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence) +{ + uint32_t buffer[8]; + uint32_t *bufp = buffer; + int ret; + + *bufp++ = PSB_2D_FENCE_BH; + + *bufp++ = PSB_2D_DST_SURF_BH | + PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT); + *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset; + + *bufp++ = PSB_2D_BLIT_BH | + PSB_2D_ROT_NONE | + PSB_2D_COPYORDER_TL2BR | + PSB_2D_DSTCK_DISABLE | + PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY; + + *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT; + *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) | + (0 << PSB_2D_DST_YSTART_SHIFT); + *bufp++ = + (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT); + + *bufp++ = PSB_2D_FLUSH_BH; + + psb_2d_lock(dev_priv); + ret = psb_2d_submit(dev_priv, buffer, bufp - buffer); + psb_2d_unlock(dev_priv); + + if (!ret) + psb_schedule_watchdog(dev_priv); + return ret; +} + +int psb_emit_2d_copy_blit(struct drm_device *dev, + uint32_t src_offset, + uint32_t dst_offset, uint32_t pages, + int direction) +{ + uint32_t cur_pages; + struct drm_psb_private *dev_priv = dev->dev_private; + uint32_t buf[10]; + uint32_t *bufp; + uint32_t xstart; + uint32_t ystart; + uint32_t blit_cmd; + uint32_t pg_add; + int ret = 0; + + if (!dev_priv) + return 0; + + if (direction) { + pg_add = (pages - 1) << PAGE_SHIFT; + src_offset += pg_add; + dst_offset += pg_add; + } + + blit_cmd = PSB_2D_BLIT_BH | + PSB_2D_ROT_NONE | + PSB_2D_DSTCK_DISABLE | + PSB_2D_SRCCK_DISABLE | + PSB_2D_USE_PAT | + PSB_2D_ROP3_SRCCOPY | + (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR); + xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0; + + psb_2d_lock(dev_priv); + while (pages > 0) { + cur_pages = pages; + if (cur_pages > 2048) + cur_pages = 2048; + pages -= cur_pages; + ystart = (direction) ? cur_pages - 1 : 0; + + bufp = buf; + *bufp++ = PSB_2D_FENCE_BH; + + *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB | + (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT); + *bufp++ = dst_offset; + *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB | + (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT); + *bufp++ = src_offset; + *bufp++ = + PSB_2D_SRC_OFF_BH | (xstart << + PSB_2D_SRCOFF_XSTART_SHIFT) | + (ystart << PSB_2D_SRCOFF_YSTART_SHIFT); + *bufp++ = blit_cmd; + *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) | + (ystart << PSB_2D_DST_YSTART_SHIFT); + *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) | + (cur_pages << PSB_2D_DST_YSIZE_SHIFT); + + ret = psb_2d_submit(dev_priv, buf, bufp - buf); + if (ret) + goto out; + pg_add = + (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1); + src_offset += pg_add; + dst_offset += pg_add; + } +out: + psb_2d_unlock(dev_priv); + return ret; +} + +void psb_init_2d(struct drm_psb_private *dev_priv) +{ + spin_lock_init(&dev_priv->sequence_lock); + psb_reset(dev_priv, 1); + dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start; + PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE); + (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); +} + +int psb_idle_2d(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + unsigned long _end = jiffies + DRM_HZ; + int busy = 0; + bool b_need_release = false; + + if (!powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND)) { + if (!powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, false)) + return 0; + else + b_need_release = true; + } + + /* + * First idle the 2D engine. + */ + + if (dev_priv->engine_lockup_2d) { + busy = -EBUSY; + goto out; + } + + if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) && + ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == + 0)) + goto out; + + do { + busy = + (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY); + } while (busy && !time_after_eq(jiffies, _end)); + + if (busy) + busy = + (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY); + if (busy) + goto out; + + do { + busy = + ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & + _PSB_C2B_STATUS_BUSY) + != 0); + } while (busy && !time_after_eq(jiffies, _end)); + if (busy) + busy = + ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & + _PSB_C2B_STATUS_BUSY) + != 0); + +out: + if (busy) + dev_priv->engine_lockup_2d = 1; + + if (b_need_release) + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + + return (busy) ? -EBUSY : 0; +} + +int psb_idle_3d(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_scheduler *scheduler = &dev_priv->scheduler; + int ret; + + ret = wait_event_timeout(scheduler->idle_queue, + psb_scheduler_finished(dev_priv), + DRM_HZ * 10); + + /* + * + * wait_event_timeout - function returns 0 if the @timeout elapsed, and the remaining + * jiffies if the condition evaluated to true before the timeout elapsed. + * + */ + if(ret == 0) + DRM_ERROR(" wait_event_timeout - timeout elapsed in waiting for scheduler wq \n"); + + return (ret < 1) ? -EBUSY : 0; +} + +static int psb_check_presumed(struct psb_validate_req *req, + struct ttm_buffer_object *bo, + struct psb_validate_arg __user *data, + int *presumed_ok) +{ + struct psb_validate_req __user *user_req = &(data->d.req); + + *presumed_ok = 0; + + if (bo->mem.mem_type == TTM_PL_SYSTEM) { + *presumed_ok = 1; + return 0; + } + + if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED))) + return 0; + + if (bo->offset == req->presumed_gpu_offset) { + *presumed_ok = 1; + return 0; + } + + return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED, + &user_req->presumed_flags); +} + + +static void psb_unreference_buffers(struct psb_context *context) +{ + struct ttm_validate_buffer *entry, *next; + struct psb_validate_buffer *vbuf; + struct list_head *list = &context->validate_list; + + list_for_each_entry_safe(entry, next, list, head) { + vbuf = + container_of(entry, struct psb_validate_buffer, base); + list_del(&entry->head); + ttm_bo_unref(&entry->bo); + } + + list = &context->kern_validate_list; + + list_for_each_entry_safe(entry, next, list, head) { + vbuf = + container_of(entry, struct psb_validate_buffer, base); + list_del(&entry->head); + ttm_bo_unref(&entry->bo); + } +} + + +static int psb_lookup_validate_buffer(struct drm_file *file_priv, + uint64_t data, + struct psb_validate_buffer *item) +{ + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; + + item->user_val_arg = + (struct psb_validate_arg __user *) (unsigned long) data; + + if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req, + sizeof(item->req)) != 0)) { + DRM_ERROR("Lookup copy fault.\n"); + return -EFAULT; + } + + item->base.bo = + ttm_buffer_object_lookup(tfile, item->req.buffer_handle); + + if (unlikely(item->base.bo == NULL)) { + DRM_ERROR("Bo lookup fault.\n"); + return -EINVAL; + } + + return 0; +} + +static int psb_reference_buffers(struct drm_file *file_priv, + uint64_t data, + struct psb_context *context) +{ + struct psb_validate_buffer *item; + int ret; + + while (likely(data != 0)) { + if (unlikely(context->used_buffers >= + PSB_NUM_VALIDATE_BUFFERS)) { + DRM_ERROR("Too many buffers " + "on validate list.\n"); + ret = -EINVAL; + goto out_err0; + } + + item = &context->buffers[context->used_buffers]; + + ret = psb_lookup_validate_buffer(file_priv, data, item); + if (unlikely(ret != 0)) + goto out_err0; + + item->base.reserved = 0; + list_add_tail(&item->base.head, &context->validate_list); + context->used_buffers++; + data = item->req.next; + } + return 0; + +out_err0: + psb_unreference_buffers(context); + return ret; +} + +static int +psb_placement_fence_type(struct ttm_buffer_object *bo, + uint64_t set_val_flags, + uint64_t clr_val_flags, + uint32_t new_fence_class, + uint32_t *new_fence_type) +{ + int ret; + uint32_t n_fence_type; + uint32_t set_flags = set_val_flags & 0xFFFFFFFF; + uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF; + struct ttm_fence_object *old_fence; + uint32_t old_fence_type; + + if (unlikely + (!(set_val_flags & + (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) { + DRM_ERROR + ("GPU access type (read / write) is not indicated.\n"); + return -EINVAL; + } + + ret = ttm_bo_check_placement(bo, set_flags, clr_flags); + if (unlikely(ret != 0)) + return ret; + + switch (new_fence_class) { + case PSB_ENGINE_TA: + n_fence_type = _PSB_FENCE_TYPE_EXE | + _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE; + if (set_val_flags & PSB_BO_FLAG_TA) + n_fence_type &= ~_PSB_FENCE_TYPE_RASTER_DONE; + if (set_val_flags & PSB_BO_FLAG_COMMAND) + n_fence_type &= + ~(_PSB_FENCE_TYPE_RASTER_DONE | + _PSB_FENCE_TYPE_TA_DONE); + if (set_val_flags & PSB_BO_FLAG_SCENE) + n_fence_type |= _PSB_FENCE_TYPE_SCENE_DONE; + if (set_val_flags & PSB_BO_FLAG_FEEDBACK) + n_fence_type |= _PSB_FENCE_TYPE_FEEDBACK; + break; + default: + n_fence_type = _PSB_FENCE_TYPE_EXE; + } + + *new_fence_type = n_fence_type; + old_fence = (struct ttm_fence_object *) bo->sync_obj; + old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg; + + if (old_fence && ((new_fence_class != old_fence->fence_class) || + ((n_fence_type ^ old_fence_type) & + old_fence_type))) { + ret = ttm_bo_wait(bo, 0, 1, 0); + if (unlikely(ret != 0)) + return ret; + } + + bo->proposed_flags = (bo->proposed_flags | set_flags) + & ~clr_flags & TTM_PL_MASK_MEMTYPE; + + return 0; +} + +int psb_validate_kernel_buffer(struct psb_context *context, + struct ttm_buffer_object *bo, + uint32_t fence_class, + uint64_t set_flags, uint64_t clr_flags) +{ + struct psb_validate_buffer *item; + uint32_t cur_fence_type; + int ret; + + if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) { + DRM_ERROR("Out of free validation buffer entries for " + "kernel buffer validation.\n"); + return -ENOMEM; + } + + item = &context->buffers[context->used_buffers]; + item->user_val_arg = NULL; + item->base.reserved = 0; + + ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq); + if (unlikely(ret != 0)) + goto out_unlock; + + mutex_lock(&bo->mutex); + ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class, + &cur_fence_type); + if (unlikely(ret != 0)) { + ttm_bo_unreserve(bo); + goto out_unlock; + } + + item->base.bo = ttm_bo_reference(bo); + item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type; + item->base.reserved = 1; + + list_add_tail(&item->base.head, &context->kern_validate_list); + context->used_buffers++; + + ret = ttm_buffer_object_validate(bo, 1, 0); + if (unlikely(ret != 0)) + goto out_unlock; + + item->offset = bo->offset; + item->flags = bo->mem.flags; + context->fence_types |= cur_fence_type; + +out_unlock: + mutex_unlock(&bo->mutex); + return ret; +} + + +static int psb_validate_buffer_list(struct drm_file *file_priv, + uint32_t fence_class, + struct psb_context *context, + int *po_correct) +{ + struct psb_validate_buffer *item; + struct ttm_buffer_object *bo; + int ret; + struct psb_validate_req *req; + uint32_t fence_types = 0; + uint32_t cur_fence_type; + struct ttm_validate_buffer *entry; + struct list_head *list = &context->validate_list; + + *po_correct = 1; + + list_for_each_entry(entry, list, head) { + item = + container_of(entry, struct psb_validate_buffer, base); + bo = entry->bo; + item->ret = 0; + req = &item->req; + + mutex_lock(&bo->mutex); + ret = psb_placement_fence_type(bo, + req->set_flags, + req->clear_flags, + fence_class, + &cur_fence_type); + if (unlikely(ret != 0)) + goto out_err; + + ret = ttm_buffer_object_validate(bo, 1, 0); + + if (unlikely(ret != 0)) + goto out_err; + + fence_types |= cur_fence_type; + entry->new_sync_obj_arg = (void *) + (unsigned long) cur_fence_type; + + item->offset = bo->offset; + item->flags = bo->mem.flags; + mutex_unlock(&bo->mutex); + + ret = + psb_check_presumed(&item->req, bo, item->user_val_arg, + &item->po_correct); + if (unlikely(ret != 0)) + goto out_err; + + if (unlikely(!item->po_correct)) + *po_correct = 0; + + item++; + } + + context->fence_types |= fence_types; + + return 0; +out_err: + mutex_unlock(&bo->mutex); + item->ret = ret; + return ret; +} + + +int +psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t *regs, + unsigned int cmds) +{ + int i; + + /* + * cmds is 32-bit words. + */ + + cmds >>= 1; + for (i = 0; i < cmds; ++i) { + PSB_WSGX32(regs[1], regs[0]); + regs += 2; + } + wmb(); + return 0; +} + +/* + * Security: Block user-space writing to MMU mapping registers. + * This is important for security and brings Poulsbo DRM + * up to par with the other DRM drivers. Using this, + * user-space should not be able to map arbitrary memory + * pages to graphics memory, but all user-space processes + * basically have access to all buffer objects mapped to + * graphics memory. + */ + +int +psb_submit_copy_cmdbuf(struct drm_device *dev, + struct ttm_buffer_object *cmd_buffer, + unsigned long cmd_offset, + unsigned long cmd_size, + int engine, uint32_t *copy_buffer) +{ + unsigned long cmd_end = cmd_offset + (cmd_size << 2); + struct drm_psb_private *dev_priv = dev->dev_private; + unsigned long cmd_page_offset = + cmd_offset - (cmd_offset & PAGE_MASK); + unsigned long cmd_next; + struct ttm_bo_kmap_obj cmd_kmap; + uint32_t *cmd_page; + unsigned cmds; + bool is_iomem; + int ret = 0; + + if (cmd_size == 0) + return 0; + + if (engine == PSB_ENGINE_2D) + psb_2d_lock(dev_priv); + + do { + cmd_next = psb_offset_end(cmd_offset, cmd_end); + ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, + 1, &cmd_kmap); + + if (ret) { + if (engine == PSB_ENGINE_2D) + psb_2d_unlock(dev_priv); + return ret; + } + cmd_page = ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem); + cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2; + cmds = (cmd_next - cmd_offset) >> 2; + + switch (engine) { + case PSB_ENGINE_2D: + ret = + psb_2d_submit(dev_priv, + cmd_page + cmd_page_offset, + cmds); + break; + case PSB_ENGINE_RASTERIZER: + case PSB_ENGINE_TA: + case PSB_ENGINE_HPRAST: + PSB_DEBUG_GENERAL("Reg copy.\n"); + ret = psb_memcpy_check(copy_buffer, + cmd_page + cmd_page_offset, + cmds * sizeof(uint32_t)); + copy_buffer += cmds; + break; + default: + ret = -EINVAL; + } + ttm_bo_kunmap(&cmd_kmap); + if (ret) + break; + } while (cmd_offset = cmd_next, cmd_offset != cmd_end); + + if (engine == PSB_ENGINE_2D) + psb_2d_unlock(dev_priv); + + return ret; +} + +static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache) +{ + if (dst_cache->dst_page) { + ttm_bo_kunmap(&dst_cache->dst_kmap); + dst_cache->dst_page = NULL; + } + dst_cache->dst_buf = NULL; + dst_cache->dst = ~0; +} + +static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache, + struct psb_validate_buffer *buffers, + unsigned int dst, + unsigned long dst_offset) +{ + int ret; + + PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst); + + if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) { + psb_clear_dstbuf_cache(dst_cache); + dst_cache->dst = dst; + dst_cache->dst_buf = buffers[dst].base.bo; + } + + if (unlikely + (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) { + DRM_ERROR("Relocation destination out of bounds.\n"); + return -EINVAL; + } + + if (!psb_same_page(dst_cache->dst_offset, dst_offset) || + NULL == dst_cache->dst_page) { + if (NULL != dst_cache->dst_page) { + ttm_bo_kunmap(&dst_cache->dst_kmap); + dst_cache->dst_page = NULL; + } + + ret = + ttm_bo_kmap(dst_cache->dst_buf, + dst_offset >> PAGE_SHIFT, 1, + &dst_cache->dst_kmap); + if (ret) { + DRM_ERROR("Could not map destination buffer for " + "relocation.\n"); + return ret; + } + + dst_cache->dst_page = + ttm_kmap_obj_virtual(&dst_cache->dst_kmap, + &dst_cache->dst_is_iomem); + dst_cache->dst_offset = dst_offset & PAGE_MASK; + dst_cache->dst_page_offset = dst_cache->dst_offset >> 2; + } + return 0; +} + +static int psb_apply_reloc(struct drm_psb_private *dev_priv, + uint32_t fence_class, + const struct drm_psb_reloc *reloc, + struct psb_validate_buffer *buffers, + int num_buffers, + struct psb_dstbuf_cache *dst_cache, + int no_wait, int interruptible) +{ + uint32_t val; + uint32_t background; + unsigned int index; + int ret; + unsigned int shift; + unsigned int align_shift; + struct ttm_buffer_object *reloc_bo; + + + PSB_DEBUG_GENERAL("Reloc type %d\n" + "\t where 0x%04x\n" + "\t buffer 0x%04x\n" + "\t mask 0x%08x\n" + "\t shift 0x%08x\n" + "\t pre_add 0x%08x\n" + "\t background 0x%08x\n" + "\t dst_buffer 0x%08x\n" + "\t arg0 0x%08x\n" + "\t arg1 0x%08x\n", + reloc->reloc_op, + reloc->where, + reloc->buffer, + reloc->mask, + reloc->shift, + reloc->pre_add, + reloc->background, + reloc->dst_buffer, reloc->arg0, reloc->arg1); + + if (unlikely(reloc->buffer >= num_buffers)) { + DRM_ERROR("Illegal relocation buffer %d.\n", + reloc->buffer); + return -EINVAL; + } + + if (buffers[reloc->buffer].po_correct) + return 0; + + if (unlikely(reloc->dst_buffer >= num_buffers)) { + DRM_ERROR + ("Illegal destination buffer for relocation %d.\n", + reloc->dst_buffer); + return -EINVAL; + } + + ret = + psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer, + reloc->where << 2); + if (ret) + return ret; + + reloc_bo = buffers[reloc->buffer].base.bo; + + if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) { + DRM_ERROR("Illegal relocation offset add.\n"); + return -EINVAL; + } + + switch (reloc->reloc_op) { + case PSB_RELOC_OP_OFFSET: + val = reloc_bo->offset + reloc->pre_add; + break; + case PSB_RELOC_OP_2D_OFFSET: + val = reloc_bo->offset + reloc->pre_add - + dev_priv->mmu_2d_offset; + if (unlikely(val >= PSB_2D_SIZE)) { + DRM_ERROR("2D relocation out of bounds\n"); + return -EINVAL; + } + break; + case PSB_RELOC_OP_PDS_OFFSET: + val = + reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START; + if (unlikely + (val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) { + DRM_ERROR("PDS relocation out of bounds\n"); + return -EINVAL; + } + break; + default: + DRM_ERROR("Unimplemented relocation.\n"); + return -EINVAL; + } + + shift = + (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT; + align_shift = + (reloc-> + shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT; + + val = ((val >> align_shift) << shift); + index = reloc->where - dst_cache->dst_page_offset; + + background = reloc->background; + val = (background & ~reloc->mask) | (val & reloc->mask); + dst_cache->dst_page[index] = val; + + PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n", + reloc->dst_buffer, index, + dst_cache->dst_page[index]); + + return 0; +} + +static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv, + unsigned int num_pages) +{ + int ret = 0; + + spin_lock(&dev_priv->reloc_lock); + if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) { + dev_priv->rel_mapped_pages += num_pages; + ret = 1; + } + spin_unlock(&dev_priv->reloc_lock); + return ret; +} + +static int psb_fixup_relocs(struct drm_file *file_priv, + uint32_t fence_class, + unsigned int num_relocs, + unsigned int reloc_offset, + uint32_t reloc_handle, + struct psb_context *context, + int no_wait, int interruptible) +{ + struct drm_device *dev = file_priv->minor->dev; + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct ttm_buffer_object *reloc_buffer = NULL; + unsigned int reloc_num_pages; + unsigned int reloc_first_page; + unsigned int reloc_last_page; + struct psb_dstbuf_cache dst_cache; + struct drm_psb_reloc *reloc; + struct ttm_bo_kmap_obj reloc_kmap; + bool reloc_is_iomem; + int count; + int ret = 0; + int registered = 0; + uint32_t num_buffers = context->used_buffers; + + if (num_relocs == 0) + return 0; + + memset(&dst_cache, 0, sizeof(dst_cache)); + memset(&reloc_kmap, 0, sizeof(reloc_kmap)); + + reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle); + if (!reloc_buffer) + goto out; + + if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) { + DRM_ERROR("Relocation buffer was not on validate list.\n"); + ret = -EINVAL; + goto out; + } + + reloc_first_page = reloc_offset >> PAGE_SHIFT; + reloc_last_page = + (reloc_offset + + num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT; + reloc_num_pages = reloc_last_page - reloc_first_page + 1; + reloc_offset &= ~PAGE_MASK; + + if (reloc_num_pages > PSB_MAX_RELOC_PAGES) { + DRM_ERROR("Relocation buffer is too large\n"); + ret = -EINVAL; + goto out; + } + + DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ, + (registered = + psb_ok_to_map_reloc(dev_priv, reloc_num_pages))); + + if (ret == -EINTR) { + ret = -ERESTART; + goto out; + } + if (ret) { + DRM_ERROR("Error waiting for space to map " + "relocation buffer.\n"); + goto out; + } + + ret = ttm_bo_kmap(reloc_buffer, reloc_first_page, + reloc_num_pages, &reloc_kmap); + + if (ret) { + DRM_ERROR("Could not map relocation buffer.\n" + "\tReloc buffer id 0x%08x.\n" + "\tReloc first page %d.\n" + "\tReloc num pages %d.\n", + reloc_handle, reloc_first_page, reloc_num_pages); + goto out; + } + + reloc = (struct drm_psb_reloc *) + ((unsigned long) + ttm_kmap_obj_virtual(&reloc_kmap, + &reloc_is_iomem) + reloc_offset); + + for (count = 0; count < num_relocs; ++count) { + ret = psb_apply_reloc(dev_priv, fence_class, + reloc, context->buffers, + num_buffers, &dst_cache, + no_wait, interruptible); + if (ret) + goto out1; + reloc++; + } + +out1: + ttm_bo_kunmap(&reloc_kmap); +out: + if (registered) { + spin_lock(&dev_priv->reloc_lock); + dev_priv->rel_mapped_pages -= reloc_num_pages; + spin_unlock(&dev_priv->reloc_lock); + DRM_WAKEUP(&dev_priv->rel_mapped_queue); + } + + psb_clear_dstbuf_cache(&dst_cache); + if (reloc_buffer) + ttm_bo_unref(&reloc_buffer); + return ret; +} + +void psb_fence_or_sync(struct drm_file *file_priv, + uint32_t engine, + uint32_t fence_types, + uint32_t fence_flags, + struct list_head *list, + struct psb_ttm_fence_rep *fence_arg, + struct ttm_fence_object **fence_p) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_psb_private *dev_priv = psb_priv(dev); + struct ttm_fence_device *fdev = &dev_priv->fdev; + int ret; + struct ttm_fence_object *fence; + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; + uint32_t handle; + + ret = ttm_fence_user_create(fdev, tfile, + engine, fence_types, + TTM_FENCE_FLAG_EMIT, &fence, &handle); + if (ret) { + + /* + * Fence creation failed. + * Fall back to synchronous operation and idle the engine. + */ + + psb_idle_engine(dev, engine); + if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) { + + /* + * Communicate to user-space that + * fence creation has failed and that + * the engine is idle. + */ + + fence_arg->handle = ~0; + fence_arg->error = ret; + } + + ttm_eu_backoff_reservation(list); + if (fence_p) + *fence_p = NULL; + return; + } + + ttm_eu_fence_buffer_objects(list, fence); + if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) { + struct ttm_fence_info info = ttm_fence_get_info(fence); + fence_arg->handle = handle; + fence_arg->fence_class = ttm_fence_class(fence); + fence_arg->fence_type = ttm_fence_types(fence); + fence_arg->signaled_types = info.signaled_types; + fence_arg->error = 0; + } else { + ret = + ttm_ref_object_base_unref(tfile, handle, + ttm_fence_type); + BUG_ON(ret); + } + + if (fence_p) + *fence_p = fence; + else if (fence) + ttm_fence_object_unref(&fence); +} + + + +static int psb_cmdbuf_2d(struct drm_file *priv, + struct list_head *validate_list, + uint32_t fence_type, + struct drm_psb_cmdbuf_arg *arg, + struct ttm_buffer_object *cmd_buffer, + struct psb_ttm_fence_rep *fence_arg) +{ + struct drm_device *dev = priv->minor->dev; + int ret; + + ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset, + arg->cmdbuf_size, PSB_ENGINE_2D, + NULL); + if (ret) + goto out_unlock; + + psb_fence_or_sync(priv, PSB_ENGINE_2D, fence_type, + arg->fence_flags, validate_list, fence_arg, + NULL); + + mutex_lock(&cmd_buffer->mutex); + if (cmd_buffer->sync_obj != NULL) + ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj); + mutex_unlock(&cmd_buffer->mutex); +out_unlock: + return ret; +} + +#if 0 +static int psb_dump_page(struct ttm_buffer_object *bo, + unsigned int page_offset, unsigned int num) +{ + struct ttm_bo_kmap_obj kmobj; + int is_iomem; + uint32_t *p; + int ret; + unsigned int i; + + ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj); + if (ret) + return ret; + + p = ttm_kmap_obj_virtual(&kmobj, &is_iomem); + for (i = 0; i < num; ++i) + PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++); + + ttm_bo_kunmap(&kmobj); + return 0; +} +#endif + +static void psb_idle_engine(struct drm_device *dev, int engine) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + uint32_t dummy; + unsigned long dummy2; + + switch (engine) { + case PSB_ENGINE_2D: + + /* + * Make sure we flush 2D properly using a dummy + * fence sequence emit. + */ + + (void) psb_fence_emit_sequence(&dev_priv->fdev, + PSB_ENGINE_2D, 0, + &dummy, &dummy2); + psb_2d_lock(dev_priv); + (void) psb_idle_2d(dev); + psb_2d_unlock(dev_priv); + break; + case PSB_ENGINE_TA: + case PSB_ENGINE_RASTERIZER: + case PSB_ENGINE_HPRAST: + (void) psb_idle_3d(dev); + break; + default: + + /* + * FIXME: Insert video engine idle command here. + */ + + break; + } +} + +static int psb_handle_copyback(struct drm_device *dev, + struct psb_context *context, + int ret) +{ + int err = ret; + struct ttm_validate_buffer *entry; + struct psb_validate_arg arg; + struct list_head *list = &context->validate_list; + + if (ret) { + ttm_eu_backoff_reservation(list); + ttm_eu_backoff_reservation(&context->kern_validate_list); + } + + + if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) { + list_for_each_entry(entry, list, head) { + struct psb_validate_buffer *vbuf = + container_of(entry, struct psb_validate_buffer, + base); + arg.handled = 1; + arg.ret = vbuf->ret; + if (!arg.ret) { + struct ttm_buffer_object *bo = entry->bo; + mutex_lock(&bo->mutex); + arg.d.rep.gpu_offset = bo->offset; + arg.d.rep.placement = bo->mem.flags; + arg.d.rep.fence_type_mask = + (uint32_t) (unsigned long) + entry->new_sync_obj_arg; + mutex_unlock(&bo->mutex); + } + + if (__copy_to_user(vbuf->user_val_arg, + &arg, sizeof(arg))) + err = -EFAULT; + + if (arg.ret) + break; + } + } + + return err; +} + + + +static int psb_feedback_buf(struct ttm_object_file *tfile, + struct psb_context *context, + uint32_t feedback_ops, + uint32_t handle, + uint32_t offset, + uint32_t feedback_breakpoints, + uint32_t feedback_size, + struct psb_feedback_info *feedback) +{ + struct ttm_buffer_object *bo; + struct page *page; + uint32_t page_no; + uint32_t page_offset; + int ret; + + if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) { + DRM_ERROR("Illegal feedback op.\n"); + return -EINVAL; + } + + if (feedback_breakpoints != 0) { + DRM_ERROR("Feedback breakpoints not implemented yet.\n"); + return -EINVAL; + } + + if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) { + DRM_ERROR("Feedback buffer size too small.\n"); + return -EINVAL; + } + + page_offset = offset & ~PAGE_MASK; + if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) + < page_offset) { + DRM_ERROR("Illegal feedback buffer alignment.\n"); + return -EINVAL; + } + + bo = ttm_buffer_object_lookup(tfile, handle); + if (unlikely(bo == NULL)) { + DRM_ERROR("Failed looking up feedback buffer.\n"); + return -EINVAL; + } + + + ret = psb_validate_kernel_buffer(context, bo, + PSB_ENGINE_TA, + TTM_PL_FLAG_SYSTEM | + TTM_PL_FLAG_CACHED | + PSB_GPU_ACCESS_WRITE | + PSB_BO_FLAG_FEEDBACK, + TTM_PL_MASK_MEM & + ~(TTM_PL_FLAG_SYSTEM | + TTM_PL_FLAG_CACHED)); + if (unlikely(ret != 0)) + goto out_unref; + + page_no = offset >> PAGE_SHIFT; + if (unlikely(page_no >= bo->num_pages)) { + ret = -EINVAL; + DRM_ERROR("Illegal feedback buffer offset.\n"); + goto out_unref; + } + + if (unlikely(bo->ttm == NULL)) { + ret = -EINVAL; + DRM_ERROR("Vistest buffer without TTM.\n"); + goto out_unref; + } + + page = ttm_tt_get_page(bo->ttm, page_no); + if (unlikely(page == NULL)) { + ret = -ENOMEM; + goto out_unref; + } + + feedback->page = page; + feedback->offset = page_offset; + + /* + * Note: bo referece transferred. + */ + + feedback->bo = bo; + return 0; + +out_unref: + ttm_bo_unref(&bo); + return ret; +} + +inline int psb_try_power_down_sgx(struct drm_device *dev) +{ + if(powermgmt_is_gfx_busy()){ + return 0; + } + + return powermgmt_suspend_islands(dev->pdev, PSB_GRAPHICS_ISLAND, false); +} + +void psb_init_ospm(struct drm_psb_private *dev_priv) +{ + static int init; + if (!init) { + dev_priv->apm_reg = MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA); + dev_priv->apm_base = dev_priv->apm_reg & 0xffff; + PSB_DEBUG_PM("apm_reg:%x\n", dev_priv->apm_reg); +#ifdef OSPM_STAT + dev_priv->graphics_state = PSB_PWR_STATE_D0; + dev_priv->gfx_last_mode_change = jiffies; + dev_priv->gfx_d0_time = 0; + dev_priv->gfx_d0i3_time = 0; + dev_priv->gfx_d3_time = 0; +#endif + init = 1; + } +} + +int psb_cmdbuf_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_cmdbuf_arg *arg = data; + int ret = 0; + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; + struct ttm_buffer_object *cmd_buffer = NULL; + struct ttm_buffer_object *ta_buffer = NULL; + struct ttm_buffer_object *oom_buffer = NULL; + struct psb_ttm_fence_rep fence_arg; + struct drm_psb_scene user_scene; + struct psb_scene_pool *pool = NULL; + struct psb_scene *scene = NULL; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)file_priv->minor->dev->dev_private; + int engine; + struct psb_feedback_info feedback; + int po_correct; + struct psb_context *context; + unsigned num_buffers; + + num_buffers = PSB_NUM_VALIDATE_BUFFERS; + + ret = ttm_read_lock(&dev_priv->ttm_lock, true); + if (unlikely(ret != 0)) + return ret; + + if (arg->engine == PSB_ENGINE_VIDEO) + powermgmt_using_hw_begin(dev->pdev, PSB_VIDEO_DEC_ISLAND, true); + + if (arg->engine == LNC_ENGINE_ENCODE) + powermgmt_using_hw_begin(dev->pdev, PSB_VIDEO_ENC_ISLAND, true); + + if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA) || + (arg->engine == PSB_ENGINE_RASTERIZER)) + powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true); + + ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); + if (unlikely(ret != 0)) + goto out_err0; + + + context = &dev_priv->context; + context->used_buffers = 0; + context->fence_types = 0; + BUG_ON(!list_empty(&context->validate_list)); + BUG_ON(!list_empty(&context->kern_validate_list)); + + if (unlikely(context->buffers == NULL)) { + context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS * + sizeof(*context->buffers)); + if (unlikely(context->buffers == NULL)) { + ret = -ENOMEM; + goto out_err1; + } + } + + ret = psb_reference_buffers(file_priv, + arg->buffer_list, + context); + + if (unlikely(ret != 0)) + goto out_err1; + + context->val_seq = atomic_add_return(1, &dev_priv->val_seq); + + ret = ttm_eu_reserve_buffers(&context->validate_list, + context->val_seq); + if (unlikely(ret != 0)) { + goto out_err2; + } + + engine = (arg->engine == PSB_ENGINE_RASTERIZER) ? + PSB_ENGINE_TA : arg->engine; + + ret = psb_validate_buffer_list(file_priv, engine, + context, &po_correct); + if (unlikely(ret != 0)) + goto out_err3; + + if (!po_correct) { + ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs, + arg->reloc_offset, + arg->reloc_handle, context, 0, 1); + if (unlikely(ret != 0)) + goto out_err3; + + } + + cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle); + if (unlikely(cmd_buffer == NULL)) { + ret = -EINVAL; + goto out_err4; + } + + switch (arg->engine) { + case PSB_ENGINE_2D: + ret = psb_cmdbuf_2d(file_priv, &context->validate_list, + context->fence_types, arg, cmd_buffer, + &fence_arg); + if (unlikely(ret != 0)) + goto out_err4; + break; + case PSB_ENGINE_VIDEO: + ret = psb_cmdbuf_video(file_priv, &context->validate_list, + context->fence_types, arg, + cmd_buffer, &fence_arg); + + if (unlikely(ret != 0)) + goto out_err4; + break; + case LNC_ENGINE_ENCODE: + ret = lnc_cmdbuf_video(file_priv, &context->validate_list, + context->fence_types, arg, + cmd_buffer, &fence_arg); + if (unlikely(ret != 0)) + goto out_err4; + break; + case PSB_ENGINE_RASTERIZER: + ret = psb_cmdbuf_raster(file_priv, context, + arg, cmd_buffer, &fence_arg); + if (unlikely(ret != 0)) + goto out_err4; + break; + case PSB_ENGINE_TA: + if (arg->ta_handle == arg->cmdbuf_handle) { + ta_buffer = ttm_bo_reference(cmd_buffer); + } else { + ta_buffer = + ttm_buffer_object_lookup(tfile, + arg->ta_handle); + if (!ta_buffer) { + ret = -EINVAL; + goto out_err4; + } + } + if (arg->oom_size != 0) { + if (arg->oom_handle == arg->cmdbuf_handle) { + oom_buffer = ttm_bo_reference(cmd_buffer); + } else { + oom_buffer = + ttm_buffer_object_lookup(tfile, + arg-> + oom_handle); + if (!oom_buffer) { + ret = -EINVAL; + goto out_err4; + } + } + } + + ret = copy_from_user(&user_scene, (void __user *) + ((unsigned long) arg->scene_arg), + sizeof(user_scene)); + if (ret) + goto out_err4; + + if (!user_scene.handle_valid) { + pool = psb_scene_pool_alloc(file_priv, 0, + user_scene.num_buffers, + user_scene.w, + user_scene.h); + if (!pool) { + ret = -ENOMEM; + goto out_err0; + } + + user_scene.handle = psb_scene_pool_handle(pool); + user_scene.handle_valid = 1; + ret = copy_to_user((void __user *) + ((unsigned long) arg-> + scene_arg), &user_scene, + sizeof(user_scene)); + + if (ret) + goto out_err4; + } else { + pool = + psb_scene_pool_lookup(file_priv, + user_scene.handle, 1); + if (!pool) { + ret = -EINVAL; + goto out_err4; + } + } + + ret = psb_validate_scene_pool(context, pool, + user_scene.w, + user_scene.h, + arg->ta_flags & + PSB_TA_FLAG_LASTPASS, &scene); + if (ret) + goto out_err4; + + memset(&feedback, 0, sizeof(feedback)); + if (arg->feedback_ops) { + ret = psb_feedback_buf(tfile, + context, + arg->feedback_ops, + arg->feedback_handle, + arg->feedback_offset, + arg->feedback_breakpoints, + arg->feedback_size, + &feedback); + if (ret) + goto out_err4; + } + ret = psb_cmdbuf_ta(file_priv, context, + arg, cmd_buffer, ta_buffer, + oom_buffer, scene, &feedback, + &fence_arg); + if (ret) + goto out_err4; + break; + default: + DRM_ERROR + ("Unimplemented command submission mechanism (%x).\n", + arg->engine); + ret = -EINVAL; + goto out_err4; + } + + if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) { + ret = copy_to_user((void __user *) + ((unsigned long) arg->fence_arg), + &fence_arg, sizeof(fence_arg)); + } + +out_err4: + if (scene) + psb_scene_unref(&scene); + if (pool) + psb_scene_pool_unref(&pool); + if (cmd_buffer) + ttm_bo_unref(&cmd_buffer); + if (ta_buffer) + ttm_bo_unref(&ta_buffer); + if (oom_buffer) + ttm_bo_unref(&oom_buffer); +out_err3: + ret = psb_handle_copyback(dev, context, ret); +out_err2: + psb_unreference_buffers(context); +out_err1: + mutex_unlock(&dev_priv->cmdbuf_mutex); +out_err0: + ttm_read_unlock(&dev_priv->ttm_lock); + + if (arg->engine == PSB_ENGINE_VIDEO) + powermgmt_using_hw_end(PSB_VIDEO_DEC_ISLAND); + + if (arg->engine == LNC_ENGINE_ENCODE) + powermgmt_using_hw_end(PSB_VIDEO_ENC_ISLAND); + + if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA) + || (arg->engine == PSB_ENGINE_RASTERIZER)) + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + return ret; +} + +static int psb_do_init_pageflip(struct drm_device * dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + u32 pipe_status[2]; + int pipe, dspbase; + + if (!powermgmt_using_hw_begin(dev->pdev, PSB_DISPLAY_ISLAND, false)) + return -1; + + dev_priv->dri_page_flipping = 1; + dev_priv->current_page = 0; + for (pipe = 0; pipe < 2; pipe++){ + pipe_status[pipe] = REG_READ(pipe == 0 ? PIPEACONF : PIPEBCONF); + if (pipe_status[pipe] & PIPEACONF_ENABLE){ + dev_priv->pipe_active[pipe] = 1; + dev_priv->saved_stride[pipe] = REG_READ((pipe == 0) ? DSPASTRIDE : DSPBSTRIDE); + dspbase = (pipe == 0 ? DSPABASE : DSPBBASE); + if (IS_MRST(dev) && (pipe == 0)) + dspbase = MRST_DSPABASE; + if (IS_MRST(dev)) { + dev_priv->saved_start[pipe] = REG_READ(pipe == 0 ? DSPASURF : DSPBSURF); + dev_priv->saved_offset[pipe] = REG_READ(dspbase); + } else { + dev_priv->saved_start[pipe] = REG_READ(pipe == 0 ? DSPABASE : DSPBBASE); + dev_priv->saved_offset[pipe] = 0; + } + } + else + dev_priv->pipe_active[pipe] = 0; + } + + powermgmt_using_hw_end(PSB_DISPLAY_ISLAND); + + return 0; +} + +int psb_page_flip(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_pageflip_arg *arg = data; + int pipe; + + struct drm_psb_private *dev_priv = + (struct drm_psb_private *)file_priv->minor->dev->dev_private; + struct psb_scheduler *scheduler = &dev_priv->scheduler; + struct psb_task *task = NULL; + + if (!dev_priv->dri_page_flipping) + if (psb_do_init_pageflip(dev)) + return 0; + + task = kzalloc(sizeof(*task), GFP_KERNEL); + if (!task) + return -ENOMEM; + INIT_LIST_HEAD(&task->head); + INIT_LIST_HEAD(&task->buf.head); + task->task_type = psb_flip_task; + + spin_lock_irq(&scheduler->lock); + list_add_tail(&task->head, &scheduler->ta_queue); + /** + * From this point we may no longer dereference task, + * as the object it points to may be freed by another thread. + */ + + task = NULL; + spin_unlock_irq(&scheduler->lock); + for (pipe=0; pipe<2; pipe++) { + if (dev_priv->pipe_active[pipe] == 1) { + dev_priv->flip_start[pipe] = arg->flip_offset; + dev_priv->flip_offset[pipe] = dev_priv->saved_offset[pipe]; + dev_priv->flip_stride[pipe] = dev_priv->saved_stride[pipe]; + } + } + return 0; +} + +int psb_flip_set_base(struct drm_psb_private *dev_priv, int pipe) +{ + struct drm_device *dev = dev_priv->dev; + + unsigned long Start, Offset, Stride; + int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE); + int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); + int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; + + if (IS_MRST(dev) && (pipe == 0)) + dspbase = MRST_DSPABASE; + + Start = dev_priv->flip_start[pipe]; + Offset = dev_priv->flip_offset[pipe]; + Stride = dev_priv->flip_stride[pipe]; + + REG_WRITE(dspstride, Stride); + + DRM_DEBUG("Writing base: %08lX Offset: %08lX Stride: %08lXn", Start, Offset, Stride); + if (IS_MRST(dev)) { + REG_WRITE(dspbase, Offset); + REG_READ(dspbase); + REG_WRITE(dspsurf, Start); + REG_READ(dspsurf); + } else { + REG_WRITE(dspbase, Start + Offset); + REG_READ(dspbase); + } + + if (dev_priv->dri_page_flipping == 1) + dev_priv->current_page = 1 - dev_priv->current_page; + + return 0; +} + diff --git a/drivers/gpu/drm/psb/psb_sgx.h b/drivers/gpu/drm/psb/psb_sgx.h new file mode 100644 index 0000000..9321b98 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_sgx.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2008, Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Eric Anholt + * + **/ +#ifndef _PSB_SGX_H_ +#define _PSB_SGX_H_ + +extern int psb_submit_video_cmdbuf(struct drm_device *dev, + struct ttm_buffer_object *cmd_buffer, + unsigned long cmd_offset, + unsigned long cmd_size, + struct ttm_fence_object *fence); + +extern int psb_2d_wait_available(struct drm_psb_private *dev_priv, + unsigned size); +extern int drm_idle_check_interval; +extern int drm_psb_ospm; + +#endif diff --git a/drivers/gpu/drm/psb/psb_socket.c b/drivers/gpu/drm/psb/psb_socket.c new file mode 100644 index 0000000..4814e55 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_socket.c @@ -0,0 +1,340 @@ +/* + * kernel userspace event delivery + * + * Copyright (C) 2004 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004 Novell, Inc. All rights reserved. + * Copyright (C) 2004 IBM, Inc. All rights reserved. + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Licensed under the GNU GPL v2. + * + * Authors: + * Robert Love + * Kay Sievers + * Arjan van de Ven + * Greg Kroah-Hartman + * James C. Gualario + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define NETLINK_PSB_KOBJECT_UEVENT 31 + +u64 psb_uevent_seqnum; +char psb_uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; +static DEFINE_SPINLOCK(sequence_lock); +#if defined(CONFIG_NET) +static struct sock *uevent_sock; +#endif + +/* the strings here must match the enum in include/linux/kobject.h */ +static const char *psb_kobject_actions[] = { + [KOBJ_ADD] = "add", + [KOBJ_REMOVE] = "remove", + [KOBJ_CHANGE] = "change", + [KOBJ_MOVE] = "move", + [KOBJ_ONLINE] = "online", + [KOBJ_OFFLINE] = "offline", +}; + +/** + * kobject_action_type - translate action string to numeric type + * + * @buf: buffer containing the action string, newline is ignored + * @len: length of buffer + * @type: pointer to the location to store the action type + * + * Returns 0 if the action string was recognized. + */ +int psb_kobject_action_type(const char *buf, size_t count, + enum kobject_action *type) +{ + enum kobject_action action; + int ret = -EINVAL; + + if (count && (buf[count-1] == '\n' || buf[count-1] == '\0')) + count--; + + if (!count) + goto out; + + for (action = 0; action < ARRAY_SIZE(psb_kobject_actions); action++) { + if (strncmp(psb_kobject_actions[action], buf, count) != 0) + continue; + if (psb_kobject_actions[action][count] != '\0') + continue; + *type = action; + ret = 0; + break; + } +out: + return ret; +} + +/** + * psb_kobject_uevent_env - send an uevent with environmental data + * + * @action: action that is happening + * @kobj: struct kobject that the action is happening to + * @envp_ext: pointer to environmental data + * + * Returns 0 if kobject_uevent() is completed with success or the + * corresponding error when it fails. + */ +int psb_kobject_uevent_env(struct kobject *kobj, enum kobject_action action, + char *envp_ext[]) +{ + struct kobj_uevent_env *env; + const char *action_string = psb_kobject_actions[action]; + const char *devpath = NULL; + const char *subsystem; + struct kobject *top_kobj; + struct kset *kset; + struct kset_uevent_ops *uevent_ops; + u64 seq; + int i = 0; + int retval = 0; + + pr_debug("kobject: '%s' (%p): %s\n", + kobject_name(kobj), kobj, __func__); + + /* search the kset we belong to */ + top_kobj = kobj; + while (!top_kobj->kset && top_kobj->parent) + top_kobj = top_kobj->parent; + + if (!top_kobj->kset) { + pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " + "without kset!\n", kobject_name(kobj), kobj, + __func__); + return -EINVAL; + } + + kset = top_kobj->kset; + uevent_ops = kset->uevent_ops; + + /* skip the event, if uevent_suppress is set*/ + if (kobj->uevent_suppress) { + pr_debug("kobject: '%s' (%p): %s: uevent_suppress " + "caused the event to drop!\n", + kobject_name(kobj), kobj, __func__); + return 0; + } + /* skip the event, if the filter returns zero. */ + if (uevent_ops && uevent_ops->filter) + if (!uevent_ops->filter(kset, kobj)) { + pr_debug("kobject: '%s' (%p): %s: filter function " + "caused the event to drop!\n", + kobject_name(kobj), kobj, __func__); + return 0; + } + + /* originating subsystem */ + if (uevent_ops && uevent_ops->name) + subsystem = uevent_ops->name(kset, kobj); + else + subsystem = kobject_name(&kset->kobj); + if (!subsystem) { + pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " + "event to drop!\n", kobject_name(kobj), kobj, + __func__); + return 0; + } + + /* environment buffer */ + env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); + if (!env) + return -ENOMEM; + + /* complete object path */ + devpath = kobject_get_path(kobj, GFP_KERNEL); + if (!devpath) { + retval = -ENOENT; + goto exit; + } + + /* default keys */ + retval = add_uevent_var(env, "ACTION=%s", action_string); + if (retval) + goto exit; + retval = add_uevent_var(env, "DEVPATH=%s", devpath); + if (retval) + goto exit; + retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem); + if (retval) + goto exit; + + /* keys passed in from the caller */ + if (envp_ext) { + for (i = 0; envp_ext[i]; i++) { + retval = add_uevent_var(env, "%s", envp_ext[i]); + if (retval) + goto exit; + } + } + + /* let the kset specific function add its stuff */ + if (uevent_ops && uevent_ops->uevent) { + retval = uevent_ops->uevent(kset, kobj, env); + if (retval) { + pr_debug("kobject: '%s' (%p): %s: uevent() returned " + "%d\n", kobject_name(kobj), kobj, + __func__, retval); + goto exit; + } + } + + /* + * Mark "add" and "remove" events in the object to ensure proper + * events to userspace during automatic cleanup. If the object did + * send an "add" event, "remove" will automatically generated by + * the core, if not already done by the caller. + */ + if (action == KOBJ_ADD) + kobj->state_add_uevent_sent = 1; + else if (action == KOBJ_REMOVE) + kobj->state_remove_uevent_sent = 1; + + /* we will send an event, so request a new sequence number */ + spin_lock(&sequence_lock); + seq = ++psb_uevent_seqnum; + spin_unlock(&sequence_lock); + retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq); + if (retval) + goto exit; + +#if defined(CONFIG_NET) + /* send netlink message */ + if (uevent_sock) { + struct sk_buff *skb; + size_t len; + + /* allocate message with the maximum possible size */ + len = strlen(action_string) + strlen(devpath) + 2; + skb = alloc_skb(len + env->buflen, GFP_KERNEL); + if (skb) { + char *scratch; + + /* add header */ + scratch = skb_put(skb, len); + sprintf(scratch, "%s@%s", action_string, devpath); + + /* copy keys to our continuous event payload buffer */ + for (i = 0; i < env->envp_idx; i++) { + len = strlen(env->envp[i]) + 1; + scratch = skb_put(skb, len); + strcpy(scratch, env->envp[i]); + } + + NETLINK_CB(skb).dst_group = 1; + retval = netlink_broadcast(uevent_sock, skb, 0, 1, + GFP_KERNEL); + /* ENOBUFS should be handled in userspace */ + if (retval == -ENOBUFS) + retval = 0; + } else + retval = -ENOMEM; + } +#endif + + /* call psb_uevent_helper, usually only enabled during early boot */ + if (psb_uevent_helper[0]) { + char *argv[3]; + + argv[0] = psb_uevent_helper; + argv[1] = (char *)subsystem; + argv[2] = NULL; + retval = add_uevent_var(env, "HOME=/"); + if (retval) + goto exit; + retval = add_uevent_var(env, + "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); + if (retval) + goto exit; + + retval = call_usermodehelper(argv[0], argv, + env->envp, UMH_WAIT_EXEC); + } + +exit: + kfree(devpath); + kfree(env); + return retval; +} +EXPORT_SYMBOL_GPL(psb_kobject_uevent_env); + +/** + * psb_kobject_uevent - notify userspace by ending an uevent + * + * @action: action that is happening + * @kobj: struct kobject that the action is happening to + * + * Returns 0 if psb_kobject_uevent() is completed with success or the + * corresponding error when it fails. + */ +int psb_kobject_uevent(struct kobject *kobj, enum kobject_action action) +{ + return psb_kobject_uevent_env(kobj, action, NULL); +} +EXPORT_SYMBOL_GPL(psb_kobject_uevent); + +/** + * psb_add_uevent_var - add key value string to the environment buffer + * @env: environment buffer structure + * @format: printf format for the key=value pair + * + * Returns 0 if environment variable was added successfully or -ENOMEM + * if no space was available. + */ +int psb_add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) +{ + va_list args; + int len; + + if (env->envp_idx >= ARRAY_SIZE(env->envp)) { + WARN(1, KERN_ERR "psb_add_uevent_var: too many keys\n"); + return -ENOMEM; + } + + va_start(args, format); + len = vsnprintf(&env->buf[env->buflen], + sizeof(env->buf) - env->buflen, + format, args); + va_end(args); + + if (len >= (sizeof(env->buf) - env->buflen)) { + WARN(1, + KERN_ERR "psb_add_uevent_var: buffer size too small\n"); + return -ENOMEM; + } + + env->envp[env->envp_idx++] = &env->buf[env->buflen]; + env->buflen += len + 1; + return 0; +} +EXPORT_SYMBOL_GPL(psb_add_uevent_var); + +#if defined(CONFIG_NET) +static int __init psb_kobject_uevent_init(void) +{ + uevent_sock = netlink_kernel_create(&init_net, + NETLINK_PSB_KOBJECT_UEVENT, + 1, NULL, NULL, THIS_MODULE); + if (!uevent_sock) { + printk(KERN_ERR "psb_kobject_uevent: failed create socket!\n"); + return -ENODEV; + } + netlink_set_nonroot(NETLINK_PSB_KOBJECT_UEVENT, NL_NONROOT_RECV); + return 0; +} + +postcore_initcall(psb_kobject_uevent_init); +#endif diff --git a/drivers/gpu/drm/psb/psb_ttm_glue.c b/drivers/gpu/drm/psb/psb_ttm_glue.c new file mode 100644 index 0000000..cada0d9 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_ttm_glue.c @@ -0,0 +1,342 @@ +/************************************************************************** + * Copyright (c) 2008, Intel Corporation. + * All Rights Reserved. + * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + * develop this driver. + * + **************************************************************************/ +/* + */ + +#include +#include "psb_drv.h" +#include "ttm/ttm_userobj_api.h" +#include "psb_powermgmt.h" + +static struct vm_operations_struct psb_ttm_vm_ops; + +int psb_open(struct inode *inode, struct file *filp) +{ + struct drm_file *file_priv; + struct drm_psb_private *dev_priv; + struct psb_fpriv *psb_fp; + int ret; + + ret = drm_open(inode, filp); + if (unlikely(ret)) + return ret; + + psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL); + + if (unlikely(psb_fp == NULL)) + goto out_err0; + + file_priv = (struct drm_file *) filp->private_data; + dev_priv = psb_priv(file_priv->minor->dev); + + + psb_fp->tfile = ttm_object_file_init(dev_priv->tdev, + PSB_FILE_OBJECT_HASH_ORDER); + if (unlikely(psb_fp->tfile == NULL)) + goto out_err1; + + file_priv->driver_priv = psb_fp; + + if (unlikely(dev_priv->bdev.dev_mapping == NULL)) + dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping; + + return 0; + +out_err1: + kfree(psb_fp); +out_err0: + (void) drm_release(inode, filp); + return ret; +} + +int psb_release(struct inode *inode, struct file *filp) +{ + struct drm_file *file_priv; + struct psb_fpriv *psb_fp; + struct drm_psb_private *dev_priv; + int ret; + + file_priv = (struct drm_file *) filp->private_data; + psb_fp = psb_fpriv(file_priv); + dev_priv = psb_priv(file_priv->minor->dev); + + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND|PSB_DISPLAY_ISLAND, true); + + ttm_object_file_release(&psb_fp->tfile); + kfree(psb_fp); + + if (dev_priv && dev_priv->xhw_file) + psb_xhw_init_takedown(dev_priv, file_priv, 1); + + ret = drm_release(inode, filp); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND|PSB_DISPLAY_ISLAND); + if (drm_psb_ospm && IS_MRST(dev_priv->dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 0); + + if (IS_MRST(dev_priv->dev)) + schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0); + if (IS_MRST(dev_priv->dev)) + schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0); + + return ret; +} + +int psb_fence_signaled_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_psb_private *dev_priv = psb_priv(dev); + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true); + ret = ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} + +int psb_fence_finish_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_psb_private *dev_priv = psb_priv(dev); + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true); + ret = ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} + +int psb_fence_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_psb_private *dev_priv = psb_priv(dev); + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true); + ret = ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} + +int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data); +} + +int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_psb_private *dev_priv = psb_priv(dev); + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true); + ret = ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile, + &psb_priv(dev)->ttm_lock, data); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} + +int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_psb_private *dev_priv = psb_priv(dev); + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true); + ret = ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} + +int psb_pl_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + int ret; + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true); + ret = ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} + +int psb_pl_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + int ret; + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true); + ret = ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} + +int psb_pl_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = psb_priv(dev); + int ret; + powermgmt_using_hw_begin(dev_priv->dev->pdev, PSB_GRAPHICS_ISLAND, true); + ret = ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile, + &dev_priv->bdev, &dev_priv->ttm_lock, data); + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + if (drm_psb_ospm && IS_MRST(dev)) + schedule_delayed_work(&dev_priv->scheduler.wq, 1); + return ret; +} + +/** + * psb_ttm_fault - Wrapper around the ttm fault method. + * + * @vma: The struct vm_area_struct as in the vm fault() method. + * @vmf: The struct vm_fault as in the vm fault() method. + * + * Since ttm_fault() will reserve buffers while faulting, + * we need to take the ttm read lock around it, as this driver + * relies on the ttm_lock in write mode to exclude all threads from + * reserving and thus validating buffers in aperture- and memory shortage + * situations. + */ + +static int psb_ttm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; + struct drm_psb_private *dev_priv = + container_of(bo->bdev, struct drm_psb_private, bdev); + int ret; + + ret = ttm_read_lock(&dev_priv->ttm_lock, true); + if (unlikely(ret != 0)) + return VM_FAULT_NOPAGE; + + ret = dev_priv->ttm_vm_ops->fault(vma, vmf); + + ttm_read_unlock(&dev_priv->ttm_lock); + return ret; +} + + +int psb_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct drm_psb_private *dev_priv; + int ret; + + if (unlikely(vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET)) + return drm_mmap(filp, vma); + + file_priv = (struct drm_file *) filp->private_data; + dev_priv = psb_priv(file_priv->minor->dev); + + ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev); + if (unlikely(ret != 0)) + return ret; + + if (unlikely(dev_priv->ttm_vm_ops == NULL)) { + dev_priv->ttm_vm_ops = vma->vm_ops; + psb_ttm_vm_ops = *vma->vm_ops; + psb_ttm_vm_ops.fault = &psb_ttm_fault; + } + + vma->vm_ops = &psb_ttm_vm_ops; + + return 0; +} + +ssize_t psb_ttm_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct drm_file *file_priv = (struct drm_file *)filp->private_data; + struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev); + + return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1); +} + +ssize_t psb_ttm_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + struct drm_file *file_priv = (struct drm_file *)filp->private_data; + struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev); + + return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1); +} + +int psb_verify_access(struct ttm_buffer_object *bo, + struct file *filp) +{ + struct drm_file *file_priv = (struct drm_file *)filp->private_data; + + if (capable(CAP_SYS_ADMIN)) + return 0; + + if (unlikely(!file_priv->authenticated)) + return -EPERM; + + return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile); +} + +static int psb_ttm_mem_global_init(struct drm_global_reference *ref) +{ + return ttm_mem_global_init(ref->object); +} + +static void psb_ttm_mem_global_release(struct drm_global_reference *ref) +{ + ttm_mem_global_release(ref->object); +} + +int psb_ttm_global_init(struct drm_psb_private *dev_priv) +{ + struct drm_global_reference *global_ref; + int ret; + + global_ref = &dev_priv->mem_global_ref; + global_ref->global_type = DRM_GLOBAL_TTM_MEM; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &psb_ttm_mem_global_init; + global_ref->release = &psb_ttm_mem_global_release; + + ret = drm_global_item_ref(global_ref); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed referencing a global TTM memory object.\n"); + return ret; + } + + return 0; +} + +void psb_ttm_global_release(struct drm_psb_private *dev_priv) +{ + drm_global_item_unref(&dev_priv->mem_global_ref); +} diff --git a/drivers/gpu/drm/psb/psb_umevents.c b/drivers/gpu/drm/psb/psb_umevents.c new file mode 100644 index 0000000..90b91c1 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_umevents.c @@ -0,0 +1,490 @@ +/* + * Copyright © 2009 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * James C. Gualario + * + */ +#include "psb_umevents.h" +/** + * define sysfs operations supported by umevent objects. + * + */ +static struct sysfs_ops umevent_obj_sysfs_ops = { + .show = psb_umevent_attr_show, + .store = psb_umevent_attr_store, +}; +/** + * define the data attributes we will expose through sysfs. + * + */ +static struct umevent_attribute data_0 = + __ATTR(data_0_val, 0666, psb_umevent_attr_show_imp, + psb_umevent_attr_store_imp); +static struct umevent_attribute data_1 = + __ATTR(data_1_val, 0666, psb_umevent_attr_show_imp, + psb_umevent_attr_store_imp); +static struct umevent_attribute data_2 = + __ATTR(data_2_val, 0666, psb_umevent_attr_show_imp, + psb_umevent_attr_store_imp); +static struct umevent_attribute data_3 = + __ATTR(data_3_val, 0666, psb_umevent_attr_show_imp, + psb_umevent_attr_store_imp); +static struct umevent_attribute data_4 = + __ATTR(data_4_val, 0666, psb_umevent_attr_show_imp, + psb_umevent_attr_store_imp); +static struct umevent_attribute data_5 = + __ATTR(data_5_val, 0666, psb_umevent_attr_show_imp, + psb_umevent_attr_store_imp); +static struct umevent_attribute data_6 = + __ATTR(data_6_val, 0666, psb_umevent_attr_show_imp, + psb_umevent_attr_store_imp); +static struct umevent_attribute data_7 = + __ATTR(data_7_val, 0666, psb_umevent_attr_show_imp, + psb_umevent_attr_store_imp); +/** + * define the structure used to seed our ktype. + * + */ +static struct attribute *umevent_obj_default_attrs[] = { + &data_0.attr, + &data_1.attr, + &data_2.attr, + &data_3.attr, + &data_4.attr, + &data_5.attr, + &data_6.attr, + &data_7.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; +/** + * specify the ktype for our kobjects. + * + */ +static struct kobj_type umevent_obj_ktype = { + .sysfs_ops = &umevent_obj_sysfs_ops, + .release = psb_umevent_obj_release, + .default_attrs = umevent_obj_default_attrs, +}; +/** + * psb_umevent_attr_show - default kobject show function + * + * @kobj: kobject associated with the show operation + * @attr: attribute being requested + * @buf: pointer to the return buffer + * + */ +ssize_t psb_umevent_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct umevent_attribute *attribute; + struct umevent_obj *any_umevent_obj; + attribute = to_umevent_attr(attr); + any_umevent_obj = to_umevent_obj(kobj); + if (!attribute->show) + return -EIO; + + return attribute->show(any_umevent_obj, attribute, buf); +} +/** + * psb_umevent_attr_store - default kobject store function + * + * @kobj: kobject associated with the store operation + * @attr: attribute being requested + * @buf: input data to write to attribute + * @len: character count + * + */ +ssize_t psb_umevent_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct umevent_attribute *attribute; + struct umevent_obj *any_umevent_obj; + attribute = to_umevent_attr(attr); + any_umevent_obj = to_umevent_obj(kobj); + if (!attribute->store) + return -EIO; + + return attribute->store(any_umevent_obj, attribute, buf, len); +} +/** + * psb_umevent_obj_release - kobject release funtion + * + * @kobj: kobject to be released. + */ +void psb_umevent_obj_release(struct kobject *kobj) +{ + struct umevent_obj *any_umevent_obj; + any_umevent_obj = to_umevent_obj(kobj); + kfree(any_umevent_obj); +} +/** + * psb_umevent_attr_show_imp - attribute show implementation + * + * @any_umevent_obj: kobject managed data to read from + * @attr: attribute being requested + * @buf: pointer to the return buffer + * + */ +ssize_t psb_umevent_attr_show_imp(struct umevent_obj + *any_umevent_obj, + struct umevent_attribute *attr, + char *buf) +{ + int var; + + if (strcmp(attr->attr.name, "data_0_val") == 0) + var = any_umevent_obj->data_0_val; + else if (strcmp(attr->attr.name, "data_1_val") == 0) + var = any_umevent_obj->data_1_val; + else if (strcmp(attr->attr.name, "data_2_val") == 0) + var = any_umevent_obj->data_2_val; + else if (strcmp(attr->attr.name, "data_3_val") == 0) + var = any_umevent_obj->data_3_val; + else if (strcmp(attr->attr.name, "data_4_val") == 0) + var = any_umevent_obj->data_4_val; + else if (strcmp(attr->attr.name, "data_5_val") == 0) + var = any_umevent_obj->data_5_val; + else if (strcmp(attr->attr.name, "data_6_val") == 0) + var = any_umevent_obj->data_6_val; + else + var = any_umevent_obj->data_7_val; + + return sprintf(buf, "%d\n", var); +} +/** + * psb_umevent_attr_store_imp - attribute store implementation + * + * @any_umevent_obj: kobject managed data to write to + * @attr: attribute being requested + * @buf: input data to write to attribute + * @count: character count + * + */ +ssize_t psb_umevent_attr_store_imp(struct umevent_obj + *any_umevent_obj, + struct umevent_attribute *attr, + const char *buf, size_t count) +{ + int var; + + sscanf(buf, "%du", &var); + if (strcmp(attr->attr.name, "data_0_val") == 0) + any_umevent_obj->data_0_val = var; + else if (strcmp(attr->attr.name, "data_1_val") == 0) + any_umevent_obj->data_1_val = var; + else if (strcmp(attr->attr.name, "data_2_val") == 0) + any_umevent_obj->data_2_val = var; + else if (strcmp(attr->attr.name, "data_3_val") == 0) + any_umevent_obj->data_3_val = var; + else if (strcmp(attr->attr.name, "data_4_val") == 0) + any_umevent_obj->data_4_val = var; + else if (strcmp(attr->attr.name, "data_5_val") == 0) + any_umevent_obj->data_5_val = var; + else if (strcmp(attr->attr.name, "data_6_val") == 0) + any_umevent_obj->data_6_val = var; + else + any_umevent_obj->data_7_val = var; + return count; +} +/** + * psb_create_umevent_obj - create and track new event objects + * + * @name: name to give to new sysfs / kobject entry + * @list: event object list to track the kobject in + */ +struct umevent_obj *psb_create_umevent_obj(const char *name, + struct umevent_list + *list) +{ + struct umevent_obj *new_umevent_obj; + int retval; + new_umevent_obj = kzalloc(sizeof(*new_umevent_obj), + GFP_KERNEL); + if (!new_umevent_obj) + return NULL; + + new_umevent_obj->kobj.kset = list->umevent_disp_pool; + retval = kobject_init_and_add(&new_umevent_obj->kobj, + &umevent_obj_ktype, NULL, + "%s", name); + if (retval) { + kobject_put(&new_umevent_obj->kobj); + return NULL; + } + psb_umevent_add_to_list(list, new_umevent_obj); + return new_umevent_obj; +} +EXPORT_SYMBOL(psb_create_umevent_obj); +/** + * psb_umevent_notify - info user mode of a new device + * + * @notify_disp_obj: event object to perform notification for + * + */ +void psb_umevent_notify(struct umevent_obj *notify_disp_obj) +{ + kobject_uevent(¬ify_disp_obj->kobj, KOBJ_ADD); +} +EXPORT_SYMBOL(psb_umevent_notify); +/** + * psb_umevent_notify_change - notify user mode of a change to a device + * + * @notify_disp_obj: event object to perform notification for + * + */ +void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj) +{ + kobject_uevent(¬ify_disp_obj->kobj, KOBJ_CHANGE); +} +EXPORT_SYMBOL(psb_umevent_notify_change); +/** + * psb_umevent_notify_change - notify user mode of a change to a device + * + * @notify_disp_obj: event object to perform notification for + * + */ +void psb_umevent_notify_change_gfxsock(struct umevent_obj *notify_disp_obj) +{ + psb_kobject_uevent(¬ify_disp_obj->kobj, KOBJ_CHANGE); +} +EXPORT_SYMBOL(psb_umevent_notify_change_gfxsock); +/** + * psb_destroy_umvent_obj - decrement ref count on event so kernel can kill it + * + * @any_umevent_obj: event object to destroy + * + */ +void psb_destroy_umevent_obj(struct umevent_obj + *any_umevent_obj) +{ + kobject_put(&any_umevent_obj->kobj); +} +/** + * + * psb_umevent_init - init the event pool + * + * @parent_kobj: parent kobject to associate new kset with + * @new_umevent_list: event list to associate kset with + * @name: name to give to new sysfs entry + * + */ +int psb_umevent_init(struct kobject *parent_kobj, + struct umevent_list *new_umevent_list, + const char *name) +{ + psb_umevent_init_list(new_umevent_list); + new_umevent_list->umevent_disp_pool = kset_create_and_add(name, NULL, + parent_kobj); + if (!new_umevent_list->umevent_disp_pool) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(psb_umevent_init); +/** + * + * psb_umevent_cleanup - cleanup all event objects + * + * @kill_list: list of events to destroy + * + */ +void psb_umevent_cleanup(struct umevent_list *kill_list) +{ + psb_umevent_destroy_list(kill_list); +} +EXPORT_SYMBOL(psb_umevent_cleanup); +/** + * psb_umevent_add_to_list - add an event to the event list + * + * @list: list to add the event to + * @umevent_obj_to_add: event to add + * + */ +void psb_umevent_add_to_list(struct umevent_list *list, + struct umevent_obj *umevent_obj_to_add) +{ + unsigned long flags; + spin_lock_irqsave(&list->list_lock, flags); + list_add(&umevent_obj_to_add->head, &list->head); + spin_unlock_irqrestore(&list->list_lock, flags); +} +/** + * psb_umevent_init_list - initialize event list + * + * @list: list to initialize + * + */ +void psb_umevent_init_list(struct umevent_list *list) +{ + spin_lock_init(&list->list_lock); + INIT_LIST_HEAD(&list->head); +} +/** + * psb_umevent_create_list - allocate an event list + * + */ +struct umevent_list *psb_umevent_create_list() +{ + struct umevent_list *new_umevent_list; + new_umevent_list = NULL; + new_umevent_list = kmalloc(sizeof(struct umevent_list), + GFP_ATOMIC); + return new_umevent_list; +} +EXPORT_SYMBOL(psb_umevent_create_list); +/** + * psb_umevent_destroy_list - destroy a list and clean up all mem + * + * @list: list to destroy and clean up after + * + */ +void psb_umevent_destroy_list(struct umevent_list *list) +{ + struct umevent_obj *umevent_obj_curr; + struct list_head *node; + struct list_head *node_kill; + int i; + i = 0; + node = NULL; + node_kill = NULL; + node = list->head.next; + while (node != (&list->head)) { + umevent_obj_curr = list_entry(node, + struct umevent_obj, + head); + node_kill = node; + node = umevent_obj_curr->head.next; + psb_destroy_umevent_obj(umevent_obj_curr); + umevent_obj_curr = NULL; + list_del(node_kill); + i++; + } + kset_unregister(list->umevent_disp_pool); + kfree(list); +} +/** + * psb_umevent_remove_from_list - remove an event from tracking list + * + * @list: list to remove the event from + * @disp_to_remove: name of event to remove. + * + */ +void psb_umevent_remove_from_list(struct umevent_list *list, + const char *disp_to_remove) +{ + struct umevent_obj *umevent_obj_curr = NULL; + struct list_head *node = NULL; + struct list_head *node_kill = NULL; + int i = 0; + int found_match = 0; + i = 0; + node = NULL; + node_kill = NULL; + node = list->head.next; + while (node != (&list->head)) { + umevent_obj_curr = list_entry(node, + struct umevent_obj, head); + if (strcmp(umevent_obj_curr->kobj.name, + disp_to_remove) == 0) { + found_match = 1; + break; + } + node = NULL; + node = umevent_obj_curr->head.next; + i++; + } + if (found_match == 1) { + node_kill = node; + node = umevent_obj_curr->head.next; + psb_destroy_umevent_obj(umevent_obj_curr); + umevent_obj_curr = NULL; + list_del(node_kill); + } +} +EXPORT_SYMBOL(psb_umevent_remove_from_list); +/** + * psb_umevent_find_obj - find an event in a tracking list + * + * @name: name of the event to find + * @list: list to find the event in + * + */ +struct umevent_obj *psb_umevent_find_obj(const char *name, + struct umevent_list *list) +{ + struct umevent_obj *umevent_obj_curr = NULL; + struct list_head *node = NULL; + struct list_head *node_find = NULL; + int i = 0; + int found_match = 0; + i = 0; + node = NULL; + node_find = NULL; + node = list->head.next; + while (node != (&list->head)) { + umevent_obj_curr = list_entry(node, + struct umevent_obj, head); + if (strcmp(umevent_obj_curr->kobj.name, + name) == 0) { + found_match = 1; + break; + } + node = NULL; + node = umevent_obj_curr->head.next; + i++; + } + if (found_match == 1) + return umevent_obj_curr; + + return NULL; +} +EXPORT_SYMBOL(psb_umevent_find_obj); +/** + * psb_umevent_debug_dump_list - debug list dump + * + * @list: list to dump + * + */ +void psb_umevent_debug_dump_list(struct umevent_list *list) +{ + struct umevent_obj *umevent_obj_curr; + unsigned long flags; + struct list_head *node; + int i; + spin_lock_irqsave(&list->list_lock, flags); + i = 0; + node = NULL; + node = list->head.next; + while (node != (&list->head)) { + umevent_obj_curr = list_entry(node, + struct umevent_obj, + head); + /*TBD: DUMP ANY REQUIRED VALUES WITH PRINTK*/ + node = NULL; + node = umevent_obj_curr->head.next; + i++; + } + spin_unlock_irqrestore(&list->list_lock, flags); +} diff --git a/drivers/gpu/drm/psb/psb_umevents.h b/drivers/gpu/drm/psb/psb_umevents.h new file mode 100644 index 0000000..05dbc8b --- /dev/null +++ b/drivers/gpu/drm/psb/psb_umevents.h @@ -0,0 +1,150 @@ +/* + * Copyright © 2009 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * James C. Gualario + * + */ +#ifndef _PSB_UMEVENT_H_ +#define _PSB_UMEVENT_H_ +/** + * required includes + * + */ +#include +#include +#include +#include +#include +#include +#include +/** + * event structure managed by kobjects + * + */ +struct umevent_obj { + struct kobject kobj; + struct list_head head; + int data_0_val; + int data_1_val; + int data_2_val; + int data_3_val; + int data_4_val; + int data_5_val; + int data_6_val; + int data_7_val; +}; +/** + * event tracking list element + * + */ +struct umevent_list{ + struct list_head head; + struct kset *umevent_disp_pool; + spinlock_t list_lock; +}; +/** + * to go back and forth between kobjects and their main container + * + */ +#define to_umevent_obj(x) \ + container_of(x, struct umevent_obj, kobj) + +/** + * event attributes exposed via sysfs + * + */ +struct umevent_attribute { + struct attribute attr; + ssize_t (*show)(struct umevent_obj *any_umevent_obj, + struct umevent_attribute *attr, char *buf); + ssize_t (*store)(struct umevent_obj *any_umevent_obj, + struct umevent_attribute *attr, + const char *buf, size_t count); +}; +/** + * to go back and forth between the attribute passed to us by the OS + * and the umevent_attribute + * + */ +#define to_umevent_attr(x) \ + container_of(x, struct umevent_attribute, \ + attr) + +/** + * umevent function prototypes + * + */ +extern struct umevent_obj *psb_create_umevent_obj(const char *name, + struct umevent_list + *list); +extern ssize_t psb_umevent_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf); +extern ssize_t psb_umevent_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len); +extern ssize_t psb_umevent_attr_show_imp(struct umevent_obj + *any_umevent_obj, + struct umevent_attribute *attr, + char *buf); +extern ssize_t psb_umevent_attr_store_imp(struct umevent_obj + *any_umevent_obj, + struct umevent_attribute *attr, + const char *buf, size_t count); +extern void psb_umevent_cleanup(struct umevent_list *kill_list); +extern int psb_umevent_init(struct kobject *parent_kobj, + struct umevent_list *new_umevent_list, + const char *name); +extern void psb_umevent_init_list(struct umevent_list *list); +extern void psb_umevent_debug_dump_list(struct umevent_list *list); +extern void psb_umevent_add_to_list(struct umevent_list *list, + struct umevent_obj + *umevent_obj_to_add); +extern void psb_umevent_destroy_list(struct umevent_list *list); +extern struct umevent_list *psb_umevent_create_list(void); +extern void psb_umevent_notify(struct umevent_obj *notify_disp_obj); +extern void psb_umevent_obj_release(struct kobject *kobj); +extern void psb_umevent_remove_from_list(struct umevent_list *list, + const char *disp_to_remove); +extern void psb_umevent_workqueue_dispatch(int work_type, const char *name, + struct umevent_list *list); +extern void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj); +extern void psb_umevent_notify_change_gfxsock(struct umevent_obj + *notify_disp_obj); +extern struct umevent_obj *psb_umevent_find_obj(const char *name, + struct umevent_list + *list); +/** + * socket function prototypes + * + */ +extern int psb_kobject_uevent(struct kobject *kobj, + enum kobject_action action); +extern int psb_kobject_uevent_env(struct kobject *kobj, + enum kobject_action action, + char *envp[]); +int psb_add_uevent_var(struct kobj_uevent_env *env, + const char *format, ...) + __attribute__((format (printf, 2, 3))); +int psb_kobject_action_type(const char *buf, + size_t count, enum kobject_action *type); +#endif diff --git a/drivers/gpu/drm/psb/psb_xhw.c b/drivers/gpu/drm/psb/psb_xhw.c new file mode 100644 index 0000000..58ce493 --- /dev/null +++ b/drivers/gpu/drm/psb/psb_xhw.c @@ -0,0 +1,652 @@ +/************************************************************************** + *Copyright (c) 2007-2008, Intel Corporation. + *All Rights Reserved. + * + *This program is free software; you can redistribute it and/or modify it + *under the terms and conditions of the GNU General Public License, + *version 2, as published by the Free Software Foundation. + * + *This program is distributed in the hope it will be useful, but WITHOUT + *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + *more details. + * + *You should have received a copy of the GNU General Public License along with + *this program; if not, write to the Free Software Foundation, Inc., + *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to + *develop this driver. + * + **************************************************************************/ +/* + *Make calls into closed source X server code. + */ + +#include +#include "psb_drv.h" +#include "ttm/ttm_userobj_api.h" +#include "psb_powermgmt.h" + +void +psb_xhw_clean_buf(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + list_del_init(&buf->head); + if (dev_priv->xhw_cur_buf == buf) + dev_priv->xhw_cur_buf = NULL; + atomic_set(&buf->done, 1); + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); +} + +static inline int psb_xhw_add(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + atomic_set(&buf->done, 0); + if (unlikely(!dev_priv->xhw_submit_ok)) { + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + DRM_ERROR("No Xpsb 3D extension available.\n"); + return -EINVAL; + } + if (!list_empty(&buf->head)) { + DRM_ERROR("Recursive list adding.\n"); + goto out; + } + list_add_tail(&buf->head, &dev_priv->xhw_in); + wake_up_interruptible(&dev_priv->xhw_queue); +out: + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + return 0; +} + +int psb_xhw_scene_info(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t w, + uint32_t h, + uint32_t *hw_cookie, + uint32_t *bo_size, + uint32_t *clear_p_start, + uint32_t *clear_num_pages) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + int ret; + + buf->copy_back = 1; + xa->op = PSB_XHW_SCENE_INFO; + xa->irq_op = 0; + xa->issue_irq = 0; + xa->arg.si.w = w; + xa->arg.si.h = h; + + ret = psb_xhw_add(dev_priv, buf); + if (ret) + return ret; + + (void) wait_event_timeout(dev_priv->xhw_caller_queue, + atomic_read(&buf->done), DRM_HZ); + + if (!atomic_read(&buf->done)) { + psb_xhw_clean_buf(dev_priv, buf); + return -EBUSY; + } + + if (!xa->ret) { + memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie)); + *bo_size = xa->arg.si.size; + *clear_p_start = xa->arg.si.clear_p_start; + *clear_num_pages = xa->arg.si.clear_num_pages; + } + return xa->ret; +} + +int psb_xhw_fire_raster(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, uint32_t fire_flags) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + + buf->copy_back = 0; + xa->op = PSB_XHW_FIRE_RASTER; + xa->issue_irq = 0; + xa->arg.sb.fire_flags = 0; + + return psb_xhw_add(dev_priv, buf); +} + +int psb_xhw_vistest(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + + buf->copy_back = 1; + xa->op = PSB_XHW_VISTEST; + /* + *Could perhaps decrease latency somewhat by + *issuing an irq in this case. + */ + xa->issue_irq = 0; + xa->irq_op = PSB_UIRQ_VISTEST; + return psb_xhw_add(dev_priv, buf); +} + +int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t fire_flags, + uint32_t hw_context, + uint32_t *cookie, + uint32_t *oom_cmds, + uint32_t num_oom_cmds, + uint32_t offset, uint32_t engine, + uint32_t flags) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + + buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM); + xa->op = PSB_XHW_SCENE_BIND_FIRE; + xa->issue_irq = (buf->copy_back) ? 1 : 0; + if (unlikely(buf->copy_back)) + xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ? + PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY; + else + xa->irq_op = 0; + xa->arg.sb.fire_flags = fire_flags; + xa->arg.sb.hw_context = hw_context; + xa->arg.sb.offset = offset; + xa->arg.sb.engine = engine; + xa->arg.sb.flags = flags; + xa->arg.sb.num_oom_cmds = num_oom_cmds; + memcpy(xa->cookie, cookie, sizeof(xa->cookie)); + if (num_oom_cmds) + memcpy(xa->arg.sb.oom_cmds, oom_cmds, + sizeof(uint32_t) * num_oom_cmds); + return psb_xhw_add(dev_priv, buf); +} + +int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + int ret; + + buf->copy_back = 1; + xa->op = PSB_XHW_RESET_DPM; + xa->issue_irq = 0; + xa->irq_op = 0; + + ret = psb_xhw_add(dev_priv, buf); + if (ret) + return ret; + + (void) wait_event_timeout(dev_priv->xhw_caller_queue, + atomic_read(&buf->done), 3 * DRM_HZ); + + if (!atomic_read(&buf->done)) { + psb_xhw_clean_buf(dev_priv, buf); + return -EBUSY; + } + + return xa->ret; +} + +int psb_xhw_check_lockup(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, uint32_t *value) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + int ret; + + *value = 0; + + buf->copy_back = 1; + xa->op = PSB_XHW_CHECK_LOCKUP; + xa->issue_irq = 0; + xa->irq_op = 0; + + ret = psb_xhw_add(dev_priv, buf); + if (ret) + return ret; + + (void) wait_event_timeout(dev_priv->xhw_caller_queue, + atomic_read(&buf->done), DRM_HZ * 3); + + if (!atomic_read(&buf->done)) { + psb_xhw_clean_buf(dev_priv, buf); + return -EBUSY; + } + + if (!xa->ret) + *value = xa->arg.cl.value; + + return xa->ret; +} + +static int psb_xhw_terminate(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + unsigned long irq_flags; + + buf->copy_back = 0; + xa->op = PSB_XHW_TERMINATE; + xa->issue_irq = 0; + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + dev_priv->xhw_submit_ok = 0; + atomic_set(&buf->done, 0); + if (!list_empty(&buf->head)) { + DRM_ERROR("Recursive list adding.\n"); + goto out; + } + list_add_tail(&buf->head, &dev_priv->xhw_in); +out: + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + wake_up_interruptible(&dev_priv->xhw_queue); + + (void) wait_event_timeout(dev_priv->xhw_caller_queue, + atomic_read(&buf->done), DRM_HZ / 10); + + if (!atomic_read(&buf->done)) { + DRM_ERROR("Xpsb terminate timeout.\n"); + psb_xhw_clean_buf(dev_priv, buf); + return -EBUSY; + } + + return 0; +} + +int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t pages, uint32_t * hw_cookie, + uint32_t * size, + uint32_t * ta_min_size) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + int ret; + + buf->copy_back = 1; + xa->op = PSB_XHW_TA_MEM_INFO; + xa->issue_irq = 0; + xa->irq_op = 0; + xa->arg.bi.pages = pages; + + ret = psb_xhw_add(dev_priv, buf); + if (ret) + return ret; + + (void) wait_event_timeout(dev_priv->xhw_caller_queue, + atomic_read(&buf->done), DRM_HZ); + + if (!atomic_read(&buf->done)) { + psb_xhw_clean_buf(dev_priv, buf); + return -EBUSY; + } + + if (!xa->ret) + memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie)); + + *size = xa->arg.bi.size; + *ta_min_size = xa->arg.bi.ta_min_size; + return xa->ret; +} + +int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t flags, + uint32_t param_offset, + uint32_t pt_offset, uint32_t *hw_cookie) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + int ret; + + buf->copy_back = 1; + xa->op = PSB_XHW_TA_MEM_LOAD; + xa->issue_irq = 0; + xa->irq_op = 0; + xa->arg.bl.flags = flags; + xa->arg.bl.param_offset = param_offset; + xa->arg.bl.pt_offset = pt_offset; + memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie)); + + ret = psb_xhw_add(dev_priv, buf); + if (ret) + return ret; + + (void) wait_event_timeout(dev_priv->xhw_caller_queue, + atomic_read(&buf->done), 3 * DRM_HZ); + + if (!atomic_read(&buf->done)) { + psb_xhw_clean_buf(dev_priv, buf); + return -EBUSY; + } + + if (!xa->ret) + memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie)); + + return xa->ret; +} + +int psb_xhw_ta_oom(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, uint32_t *cookie) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + + /* + *This calls the extensive closed source + *OOM handler, which resolves the condition and + *sends a reply telling the scheduler what to do + *with the task. + */ + + buf->copy_back = 1; + xa->op = PSB_XHW_OOM; + xa->issue_irq = 1; + xa->irq_op = PSB_UIRQ_OOM_REPLY; + memcpy(xa->cookie, cookie, sizeof(xa->cookie)); + + return psb_xhw_add(dev_priv, buf); +} + +void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, + uint32_t *cookie, + uint32_t *bca, uint32_t *rca, uint32_t *flags) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + + /* + *Get info about how to schedule an OOM task. + */ + + memcpy(cookie, xa->cookie, sizeof(xa->cookie)); + *bca = xa->arg.oom.bca; + *rca = xa->arg.oom.rca; + *flags = xa->arg.oom.flags; +} + +void psb_xhw_fire_reply(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf, uint32_t *cookie) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + + memcpy(cookie, xa->cookie, sizeof(xa->cookie)); +} + +int psb_xhw_resume(struct drm_psb_private *dev_priv, + struct psb_xhw_buf *buf) +{ + struct drm_psb_xhw_arg *xa = &buf->arg; + int ret; + /* + *For D0i3, force resume to complete + */ + buf->copy_back = 1; + xa->op = PSB_XHW_RESUME; + xa->issue_irq = 0; + xa->irq_op = 0; + ret = psb_xhw_add(dev_priv, buf); + if (ret) + return ret; + (void) wait_event_timeout(dev_priv->xhw_caller_queue, + atomic_read(&buf->done), 3 * DRM_HZ); + + if (!atomic_read(&buf->done)) { + psb_xhw_clean_buf(dev_priv, buf); + DRM_ERROR("Xpsb resume fail\n"); + return -EBUSY; + } + return ret; +} + +void psb_xhw_takedown(struct drm_psb_private *dev_priv) +{ +} + +int psb_xhw_init(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + unsigned long irq_flags; + + INIT_LIST_HEAD(&dev_priv->xhw_in); + spin_lock_init(&dev_priv->xhw_lock); + atomic_set(&dev_priv->xhw_client, 0); + init_waitqueue_head(&dev_priv->xhw_queue); + init_waitqueue_head(&dev_priv->xhw_caller_queue); + mutex_init(&dev_priv->xhw_mutex); + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + dev_priv->xhw_on = 0; + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + + return 0; +} + +static int psb_xhw_init_init(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_psb_xhw_init_arg *arg) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; + int ret; + bool is_iomem; + + if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) { + unsigned long irq_flags; + + dev_priv->xhw_bo = + ttm_buffer_object_lookup(tfile, arg->buffer_handle); + if (!dev_priv->xhw_bo) { + ret = -EINVAL; + goto out_err; + } + ret = ttm_bo_kmap(dev_priv->xhw_bo, 0, + dev_priv->xhw_bo->num_pages, + &dev_priv->xhw_kmap); + if (ret) { + DRM_ERROR("Failed mapping X server " + "communications buffer.\n"); + goto out_err0; + } + dev_priv->xhw = + ttm_kmap_obj_virtual(&dev_priv->xhw_kmap, &is_iomem); + if (is_iomem) { + DRM_ERROR("X server communications buffer" + "is in device memory.\n"); + ret = -EINVAL; + goto out_err1; + } + dev_priv->xhw_file = file_priv; + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + dev_priv->xhw_on = 1; + dev_priv->xhw_submit_ok = 1; + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + return 0; + } else { + DRM_ERROR("Xhw is already initialized.\n"); + return -EBUSY; + } +out_err1: + dev_priv->xhw = NULL; + ttm_bo_kunmap(&dev_priv->xhw_kmap); +out_err0: + ttm_bo_unref(&dev_priv->xhw_bo); +out_err: + atomic_dec(&dev_priv->xhw_client); + return ret; +} + +static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv) +{ + struct psb_xhw_buf *cur_buf, *next; + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + dev_priv->xhw_submit_ok = 0; + + list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) { + list_del_init(&cur_buf->head); + if (cur_buf->copy_back) + cur_buf->arg.ret = -EINVAL; + atomic_set(&cur_buf->done, 1); + } + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + wake_up(&dev_priv->xhw_caller_queue); +} + +void psb_xhw_init_takedown(struct drm_psb_private *dev_priv, + struct drm_file *file_priv, int closing) +{ + + if (dev_priv->xhw_file == file_priv && + atomic_add_unless(&dev_priv->xhw_client, -1, 0)) { + + if (closing) + psb_xhw_queue_empty(dev_priv); + else { + struct psb_xhw_buf buf; + INIT_LIST_HEAD(&buf.head); + + psb_xhw_terminate(dev_priv, &buf); + psb_xhw_queue_empty(dev_priv); + } + + dev_priv->xhw = NULL; + ttm_bo_kunmap(&dev_priv->xhw_kmap); + ttm_bo_unref(&dev_priv->xhw_bo); + dev_priv->xhw_file = NULL; + } +} + +int psb_xhw_init_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_xhw_init_arg *arg = + (struct drm_psb_xhw_init_arg *) data; + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + int ret = 0; + powermgmt_using_hw_begin(dev->pdev, PSB_GRAPHICS_ISLAND, true); + switch (arg->operation) { + case PSB_XHW_INIT: + ret = psb_xhw_init_init(dev, file_priv, arg); + break; + case PSB_XHW_TAKEDOWN: + psb_xhw_init_takedown(dev_priv, file_priv, 0); + break; + } + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + return ret; +} + +static int psb_xhw_in_empty(struct drm_psb_private *dev_priv) +{ + int empty; + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + empty = list_empty(&dev_priv->xhw_in); + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + return empty; +} + +int psb_xhw_handler(struct drm_psb_private *dev_priv) +{ + unsigned long irq_flags; + struct drm_psb_xhw_arg *xa; + struct psb_xhw_buf *buf; + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + + if (!dev_priv->xhw_on) { + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + return -EINVAL; + } + + buf = dev_priv->xhw_cur_buf; + if (buf && buf->copy_back) { + xa = &buf->arg; + /*w/a for resume, save this memcpy for perfmance*/ + if (xa->op != PSB_XHW_RESUME) + memcpy(xa, dev_priv->xhw, sizeof(*xa)); + dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op; + atomic_set(&buf->done, 1); + wake_up(&dev_priv->xhw_caller_queue); + } else + dev_priv->comm[PSB_COMM_USER_IRQ] = 0; + + dev_priv->xhw_cur_buf = 0; + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + return 0; +} + +int psb_xhw_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_psb_private *dev_priv = + (struct drm_psb_private *) dev->dev_private; + unsigned long irq_flags; + struct drm_psb_xhw_arg *xa; + int ret; + struct list_head *list; + struct psb_xhw_buf *buf; + static int firsttime = 1; + + if (!dev_priv) + return -EINVAL; + + /*tricky fix for sgx HW access from user space when XPSB is load*/ + if(firsttime) { + firsttime = 0; + powermgmt_using_hw_end(PSB_GRAPHICS_ISLAND); + } + + if (mutex_lock_interruptible(&dev_priv->xhw_mutex)) + return -ERESTART; + + if (psb_forced_user_interrupt(dev_priv)) { + mutex_unlock(&dev_priv->xhw_mutex); + return -EINVAL; + } + + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + while (list_empty(&dev_priv->xhw_in)) { + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + ret = wait_event_interruptible_timeout(dev_priv->xhw_queue, + !psb_xhw_in_empty + (dev_priv), DRM_HZ); + if (ret == -ERESTARTSYS || ret == 0) { + mutex_unlock(&dev_priv->xhw_mutex); + return -ERESTART; + } + spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); + } + + list = dev_priv->xhw_in.next; + list_del_init(list); + + buf = list_entry(list, struct psb_xhw_buf, head); + xa = &buf->arg; + memcpy(dev_priv->xhw, xa, sizeof(*xa)); + + if (unlikely(buf->copy_back)) + dev_priv->xhw_cur_buf = buf; + else { + atomic_set(&buf->done, 1); + dev_priv->xhw_cur_buf = NULL; + } + + if (xa->op == PSB_XHW_TERMINATE) { + dev_priv->xhw_on = 0; + wake_up(&dev_priv->xhw_caller_queue); + } + spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); + + mutex_unlock(&dev_priv->xhw_mutex); + + return 0; +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c new file mode 100644 index 0000000..28fbe3b --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c @@ -0,0 +1,149 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + * Keith Packard. + */ + +#include "ttm/ttm_bo_driver.h" +#ifdef TTM_HAS_AGP +#include "ttm/ttm_placement_common.h" +#include +#include +#include + +struct ttm_agp_backend { + struct ttm_backend backend; + struct agp_memory *mem; + struct agp_bridge_data *bridge; +}; + +static int ttm_agp_populate(struct ttm_backend *backend, + unsigned long num_pages, struct page **pages, + struct page *dummy_read_page) +{ + struct ttm_agp_backend *agp_be = + container_of(backend, struct ttm_agp_backend, backend); + struct page **cur_page, **last_page = pages + num_pages; + struct agp_memory *mem; + + mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); + if (unlikely(mem == NULL)) + return -ENOMEM; + + mem->page_count = 0; + for (cur_page = pages; cur_page < last_page; ++cur_page) { + struct page *page = *cur_page; + if (!page) { + page = dummy_read_page; + } + mem->memory[mem->page_count++] = + phys_to_gart(page_to_phys(page)); + } + agp_be->mem = mem; + return 0; +} + +static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) +{ + struct ttm_agp_backend *agp_be = + container_of(backend, struct ttm_agp_backend, backend); + struct agp_memory *mem = agp_be->mem; + int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED); + int ret; + + mem->is_flushed = 1; + mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; + + ret = agp_bind_memory(mem, bo_mem->mm_node->start); + if (ret) + printk(KERN_ERR "AGP Bind memory failed.\n"); + + return ret; +} + +static int ttm_agp_unbind(struct ttm_backend *backend) +{ + struct ttm_agp_backend *agp_be = + container_of(backend, struct ttm_agp_backend, backend); + + if (agp_be->mem->is_bound) + return agp_unbind_memory(agp_be->mem); + else + return 0; +} + +static void ttm_agp_clear(struct ttm_backend *backend) +{ + struct ttm_agp_backend *agp_be = + container_of(backend, struct ttm_agp_backend, backend); + struct agp_memory *mem = agp_be->mem; + + if (mem) { + ttm_agp_unbind(backend); + agp_free_memory(mem); + } + agp_be->mem = NULL; +} + +static void ttm_agp_destroy(struct ttm_backend *backend) +{ + struct ttm_agp_backend *agp_be = + container_of(backend, struct ttm_agp_backend, backend); + + if (agp_be->mem) + ttm_agp_clear(backend); + kfree(agp_be); +} + +static struct ttm_backend_func ttm_agp_func = { + .populate = ttm_agp_populate, + .clear = ttm_agp_clear, + .bind = ttm_agp_bind, + .unbind = ttm_agp_unbind, + .destroy = ttm_agp_destroy, +}; + +struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, + struct agp_bridge_data *bridge) +{ + struct ttm_agp_backend *agp_be; + + agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL); + if (!agp_be) + return NULL; + + agp_be->mem = NULL; + agp_be->bridge = bridge; + agp_be->backend.func = &ttm_agp_func; + agp_be->backend.bdev = bdev; + return &agp_be->backend; +} + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo.c b/drivers/gpu/drm/psb/ttm/ttm_bo.c new file mode 100644 index 0000000..7cdbd45 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_bo.c @@ -0,0 +1,1716 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement_common.h" +#include +#include +#include +#include +#include + +#define TTM_ASSERT_LOCKED(param) +#define TTM_DEBUG(fmt, arg...) +#define TTM_BO_HASH_ORDER 13 + +static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); +static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); +static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); + +static inline uint32_t ttm_bo_type_flags(unsigned type) +{ + return (1 << (type)); +} + +static void ttm_bo_release_list(struct kref *list_kref) +{ + struct ttm_buffer_object *bo = + container_of(list_kref, struct ttm_buffer_object, list_kref); + struct ttm_bo_device *bdev = bo->bdev; + + BUG_ON(atomic_read(&bo->list_kref.refcount)); + BUG_ON(atomic_read(&bo->kref.refcount)); + BUG_ON(atomic_read(&bo->cpu_writers)); + BUG_ON(bo->sync_obj != NULL); + BUG_ON(bo->mem.mm_node != NULL); + BUG_ON(!list_empty(&bo->lru)); + BUG_ON(!list_empty(&bo->ddestroy)); + + if (bo->ttm) + ttm_tt_destroy(bo->ttm); + if (bo->destroy) + bo->destroy(bo); + else { + ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); + kfree(bo); + } +} + +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) +{ + + if (interruptible) { + int ret = 0; + + ret = wait_event_interruptible(bo->event_queue, + atomic_read(&bo->reserved) == 0); + if (unlikely(ret != 0)) + return -ERESTART; + } else { + wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); + } + return 0; +} + +static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man; + + BUG_ON(!atomic_read(&bo->reserved)); + + if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) { + + BUG_ON(!list_empty(&bo->lru)); + + man = &bdev->man[bo->mem.mem_type]; + list_add_tail(&bo->lru, &man->lru); + kref_get(&bo->list_kref); + + if (bo->ttm != NULL) { + list_add_tail(&bo->swap, &bdev->swap_lru); + kref_get(&bo->list_kref); + } + } +} + +/* + * Call with bdev->lru_lock and bdev->global->swap_lock held.. + */ + +static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +{ + int put_count = 0; + + if (!list_empty(&bo->swap)) { + list_del_init(&bo->swap); + ++put_count; + } + if (!list_empty(&bo->lru)) { + list_del_init(&bo->lru); + ++put_count; + } + + /* + * TODO: Add a driver hook to delete from + * driver-specific LRU's here. + */ + + return put_count; +} + +int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait, bool use_sequence, uint32_t sequence) +{ + struct ttm_bo_device *bdev = bo->bdev; + int ret; + + while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { + if (use_sequence && bo->seq_valid && + (sequence - bo->val_seq < (1 << 31))) { + return -EAGAIN; + } + + if (no_wait) + return -EBUSY; + + spin_unlock(&bdev->lru_lock); + ret = ttm_bo_wait_unreserved(bo, interruptible); + spin_lock(&bdev->lru_lock); + + if (unlikely(ret)) + return ret; + } + + if (use_sequence) { + bo->val_seq = sequence; + bo->seq_valid = true; + } else { + bo->seq_valid = false; + } + + return 0; +} + +static void ttm_bo_ref_bug(struct kref *list_kref) +{ + BUG(); +} + +int ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait, bool use_sequence, uint32_t sequence) +{ + struct ttm_bo_device *bdev = bo->bdev; + int put_count = 0; + int ret; + + spin_lock(&bdev->lru_lock); + ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, + sequence); + if (likely(ret == 0)) + put_count = ttm_bo_del_from_lru(bo); + spin_unlock(&bdev->lru_lock); + + while (put_count--) + kref_put(&bo->list_kref, ttm_bo_ref_bug); + + return ret; +} + +void ttm_bo_unreserve(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + + spin_lock(&bdev->lru_lock); + ttm_bo_add_to_lru(bo); + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + spin_unlock(&bdev->lru_lock); +} + +/* + * Call bo->mutex locked. + */ + +static int ttm_bo_add_ttm(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + int ret = 0; + uint32_t page_flags = 0; + + TTM_ASSERT_LOCKED(&bo->mutex); + bo->ttm = NULL; + + switch (bo->type) { + case ttm_bo_type_device: + case ttm_bo_type_kernel: + bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, + page_flags, bdev->dummy_read_page); + if (unlikely(bo->ttm == NULL)) + ret = -ENOMEM; + break; + case ttm_bo_type_user: + bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, + page_flags | TTM_PAGE_FLAG_USER, + bdev->dummy_read_page); + if (unlikely(bo->ttm == NULL)) + ret = -ENOMEM; + break; + + ret = ttm_tt_set_user(bo->ttm, current, + bo->buffer_start, bo->num_pages); + if (unlikely(ret != 0)) + ttm_tt_destroy(bo->ttm); + break; + default: + printk(KERN_ERR "Illegal buffer object type\n"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, + bool evict, bool interruptible, bool no_wait) +{ + struct ttm_bo_device *bdev = bo->bdev; + bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); + bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); + struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; + struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; + int ret = 0; + + if (old_is_pci || new_is_pci || + ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0)) + ttm_bo_unmap_virtual(bo); + + /* + * Create and bind a ttm if required. + */ + + if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { + ret = ttm_bo_add_ttm(bo); + if (ret) + goto out_err; + + ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags); + if (ret) + return ret; + + if (mem->mem_type != TTM_PL_SYSTEM) { + ret = ttm_tt_bind(bo->ttm, mem); + if (ret) + goto out_err; + } + + if (bo->mem.mem_type == TTM_PL_SYSTEM) { + + struct ttm_mem_reg *old_mem = &bo->mem; + uint32_t save_flags = old_mem->flags; + uint32_t save_proposed_flags = old_mem->proposed_flags; + + *old_mem = *mem; + mem->mm_node = NULL; + old_mem->proposed_flags = save_proposed_flags; + ttm_flag_masked(&save_flags, mem->flags, + TTM_PL_MASK_MEMTYPE); + goto moved; + } + + } + + if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && + !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) + ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); + else if (bdev->driver->move) + ret = bdev->driver->move(bo, evict, interruptible, + no_wait, mem); + else + ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); + + if (ret) + goto out_err; + + moved: + if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) { + ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags); + if (ret) + printk(KERN_ERR "Can not flush read caches\n"); + } + + ttm_flag_masked(&bo->priv_flags, + (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0, + TTM_BO_PRIV_FLAG_EVICTED); + + if (bo->mem.mm_node) + bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + + bdev->man[bo->mem.mem_type].gpu_offset; + + return 0; + + out_err: + new_man = &bdev->man[bo->mem.mem_type]; + if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { + ttm_tt_unbind(bo->ttm); + ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; + } + + return ret; +} + +static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo, + bool allow_errors) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_driver *driver = bdev->driver; + + if (bo->sync_obj) { + if (bdev->nice_mode) { + unsigned long _end = jiffies + 3 * HZ; + int ret; + do { + ret = ttm_bo_wait(bo, false, false, false); + if (ret && allow_errors) + return ret; + + } while (ret && !time_after_eq(jiffies, _end)); + + if (bo->sync_obj) { + bdev->nice_mode = false; + printk(KERN_ERR "Detected probable GPU lockup. " + "Evicting buffer.\n"); + } + } + if (bo->sync_obj) { + driver->sync_obj_unref(&bo->sync_obj); + bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; + } + } + return 0; +} + +/** + * If bo idle, remove from delayed- and lru lists, and unref. + * If not idle, and already on delayed list, do nothing. + * If not idle, and not on delayed list, put on delayed list, + * up the list_kref and schedule a delayed list check. + */ + +static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_driver *driver = bdev->driver; + + mutex_lock(&bo->mutex); + + if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj, + bo->sync_obj_arg)) { + driver->sync_obj_unref(&bo->sync_obj); + bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; + } + + if (bo->sync_obj && remove_all) + (void)ttm_bo_expire_sync_obj(bo, false); + + if (!bo->sync_obj) { + int put_count; + + if (bo->ttm) + ttm_tt_unbind(bo->ttm); + spin_lock(&bdev->lru_lock); + if (!list_empty(&bo->ddestroy)) { + list_del_init(&bo->ddestroy); + kref_put(&bo->list_kref, ttm_bo_ref_bug); + } + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; + } + put_count = ttm_bo_del_from_lru(bo); + spin_unlock(&bdev->lru_lock); + mutex_unlock(&bo->mutex); + while (put_count--) + kref_put(&bo->list_kref, ttm_bo_release_list); + + return; + } + + spin_lock(&bdev->lru_lock); + if (list_empty(&bo->ddestroy)) { + spin_unlock(&bdev->lru_lock); + driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg); + spin_lock(&bdev->lru_lock); + if (list_empty(&bo->ddestroy)) { + kref_get(&bo->list_kref); + list_add_tail(&bo->ddestroy, &bdev->ddestroy); + } + spin_unlock(&bdev->lru_lock); + schedule_delayed_work(&bdev->wq, + ((HZ / 100) < 1) ? 1 : HZ / 100); + } else + spin_unlock(&bdev->lru_lock); + + mutex_unlock(&bo->mutex); + return; +} + +/** + * Traverse the delayed list, and call ttm_bo_cleanup_refs on all + * encountered buffers. + */ + +static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) +{ + struct ttm_buffer_object *entry, *nentry; + struct list_head *list, *next; + int ret; + + spin_lock(&bdev->lru_lock); + list_for_each_safe(list, next, &bdev->ddestroy) { + entry = list_entry(list, struct ttm_buffer_object, ddestroy); + nentry = NULL; + + /* + * Protect the next list entry from destruction while we + * unlock the lru_lock. + */ + + if (next != &bdev->ddestroy) { + nentry = list_entry(next, struct ttm_buffer_object, + ddestroy); + kref_get(&nentry->list_kref); + } + kref_get(&entry->list_kref); + + spin_unlock(&bdev->lru_lock); + ttm_bo_cleanup_refs(entry, remove_all); + kref_put(&entry->list_kref, ttm_bo_release_list); + spin_lock(&bdev->lru_lock); + + if (nentry) { + bool next_onlist = !list_empty(next); + kref_put(&nentry->list_kref, ttm_bo_release_list); + + /* + * Someone might have raced us and removed the + * next entry from the list. We don't bother restarting + * list traversal. + */ + + if (!next_onlist) + break; + } + } + ret = !list_empty(&bdev->ddestroy); + spin_unlock(&bdev->lru_lock); + + return ret; +} + +static void ttm_bo_delayed_workqueue(struct work_struct *work) +{ + struct ttm_bo_device *bdev = + container_of(work, struct ttm_bo_device, wq.work); + + if (ttm_bo_delayed_delete(bdev, false)) { + schedule_delayed_work(&bdev->wq, + ((HZ / 100) < 1) ? 1 : HZ / 100); + } +} + +static void ttm_bo_release(struct kref *kref) +{ + struct ttm_buffer_object *bo = + container_of(kref, struct ttm_buffer_object, kref); + struct ttm_bo_device *bdev = bo->bdev; + + if (likely(bo->vm_node != NULL)) { + rb_erase(&bo->vm_rb, &bdev->addr_space_rb); + drm_mm_put_block(bo->vm_node); + } + write_unlock(&bdev->vm_lock); + ttm_bo_cleanup_refs(bo, false); + kref_put(&bo->list_kref, ttm_bo_release_list); + write_lock(&bdev->vm_lock); +} + +void ttm_bo_unref(struct ttm_buffer_object **p_bo) +{ + struct ttm_buffer_object *bo = *p_bo; + struct ttm_bo_device *bdev = bo->bdev; + + *p_bo = NULL; + write_lock(&bdev->vm_lock); + kref_put(&bo->kref, ttm_bo_release); + write_unlock(&bdev->vm_lock); +} + +static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, + bool interruptible, bool no_wait) +{ + int ret = 0; + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_reg evict_mem; + + if (bo->mem.mem_type != mem_type) + goto out; + + ret = ttm_bo_wait(bo, false, interruptible, no_wait); + if (ret && ret != -ERESTART) { + printk(KERN_ERR "Failed to expire sync object before " + "buffer eviction.\n"); + goto out; + } + + BUG_ON(!atomic_read(&bo->reserved)); + + evict_mem = bo->mem; + evict_mem.mm_node = NULL; + + evict_mem.proposed_flags = bdev->driver->evict_flags(bo); + BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags); + + ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait); + if (unlikely(ret != 0 && ret != -ERESTART)) { + evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM; + BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags); + ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait); + } + + if (ret) { + if (ret != -ERESTART) + printk(KERN_ERR "Failed to find memory space for " + "buffer 0x%p eviction.\n", bo); + goto out; + } + + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait); + if (ret) { + if (ret != -ERESTART) + printk(KERN_ERR "Buffer eviction failed\n"); + goto out; + } + + spin_lock(&bdev->lru_lock); + if (evict_mem.mm_node) { + drm_mm_put_block(evict_mem.mm_node); + evict_mem.mm_node = NULL; + } + spin_unlock(&bdev->lru_lock); + + ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED, + TTM_BO_PRIV_FLAG_EVICTED); + + out: + return ret; +} + +/** + * Repeatedly evict memory from the LRU for @mem_type until we create enough + * space, or we've evicted everything and there isn't enough space. + */ +static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem, + uint32_t mem_type, + bool interruptible, bool no_wait) +{ + struct drm_mm_node *node; + struct ttm_buffer_object *entry; + struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct list_head *lru; + unsigned long num_pages = mem->num_pages; + int put_count = 0; + int ret; + + retry_pre_get: + ret = drm_mm_pre_get(&man->manager); + if (unlikely(ret != 0)) + return ret; + + spin_lock(&bdev->lru_lock); + do { + node = drm_mm_search_free(&man->manager, num_pages, + mem->page_alignment, 1); + if (node) + break; + + lru = &man->lru; + if (list_empty(lru)) + break; + + entry = list_first_entry(lru, struct ttm_buffer_object, lru); + kref_get(&entry->list_kref); + + ret = + ttm_bo_reserve_locked(entry, interruptible, no_wait, false, 0); + + if (likely(ret == 0)) + put_count = ttm_bo_del_from_lru(entry); + + spin_unlock(&bdev->lru_lock); + + if (unlikely(ret != 0)) + return ret; + + while (put_count--) + kref_put(&entry->list_kref, ttm_bo_ref_bug); + + mutex_lock(&entry->mutex); + ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); + mutex_unlock(&entry->mutex); + + ttm_bo_unreserve(entry); + + kref_put(&entry->list_kref, ttm_bo_release_list); + if (ret) + return ret; + + spin_lock(&bdev->lru_lock); + } while (1); + + if (!node) { + spin_unlock(&bdev->lru_lock); + return -ENOMEM; + } + + node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); + if (unlikely(!node)) { + spin_unlock(&bdev->lru_lock); + goto retry_pre_get; + } + + spin_unlock(&bdev->lru_lock); + mem->mm_node = node; + mem->mem_type = mem_type; + return 0; +} + +static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, + bool disallow_fixed, + uint32_t mem_type, + uint32_t mask, uint32_t * res_mask) +{ + uint32_t cur_flags = ttm_bo_type_flags(mem_type); + + if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) + return false; + + if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) + return false; + + if ((mask & man->available_caching) == 0) + return false; + if (mask & man->default_caching) + cur_flags |= man->default_caching; + else if (mask & TTM_PL_FLAG_CACHED) + cur_flags |= TTM_PL_FLAG_CACHED; + else if (mask & TTM_PL_FLAG_WC) + cur_flags |= TTM_PL_FLAG_WC; + else + cur_flags |= TTM_PL_FLAG_UNCACHED; + + *res_mask = cur_flags; + return true; +} + +/** + * Creates space for memory region @mem according to its type. + * + * This function first searches for free space in compatible memory types in + * the priority order defined by the driver. If free space isn't found, then + * ttm_bo_mem_force_space is attempted in priority order to evict and find + * space. + */ +int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, bool interruptible, bool no_wait) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man; + + uint32_t num_prios = bdev->driver->num_mem_type_prio; + const uint32_t *prios = bdev->driver->mem_type_prio; + uint32_t i; + uint32_t mem_type = TTM_PL_SYSTEM; + uint32_t cur_flags = 0; + bool type_found = false; + bool type_ok = false; + bool has_eagain = false; + struct drm_mm_node *node = NULL; + int ret; + + mem->mm_node = NULL; + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bdev->man[mem_type]; + + type_ok = ttm_bo_mt_compatible(man, + bo->type == ttm_bo_type_user, + mem_type, mem->proposed_flags, + &cur_flags); + + if (!type_ok) + continue; + + if (mem_type == TTM_PL_SYSTEM) + break; + + if (man->has_type && man->use_type) { + type_found = true; + do { + ret = drm_mm_pre_get(&man->manager); + if (unlikely(ret)) + return ret; + + spin_lock(&bdev->lru_lock); + node = drm_mm_search_free(&man->manager, + mem->num_pages, + mem->page_alignment, + 1); + if (unlikely(!node)) { + spin_unlock(&bdev->lru_lock); + break; + } + node = drm_mm_get_block_atomic(node, + mem->num_pages, + mem-> + page_alignment); + spin_unlock(&bdev->lru_lock); + } while (!node); + } + if (node) + break; + } + + if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { + mem->mm_node = node; + mem->mem_type = mem_type; + mem->flags = cur_flags; + return 0; + } + + if (!type_found) + return -EINVAL; + + num_prios = bdev->driver->num_mem_busy_prio; + prios = bdev->driver->mem_busy_prio; + + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bdev->man[mem_type]; + + if (!man->has_type) + continue; + + if (!ttm_bo_mt_compatible(man, + bo->type == ttm_bo_type_user, + mem_type, + mem->proposed_flags, &cur_flags)) + continue; + + ret = ttm_bo_mem_force_space(bdev, mem, mem_type, + interruptible, no_wait); + + if (ret == 0 && mem->mm_node) { + mem->flags = cur_flags; + return 0; + } + + if (ret == -ERESTART) + has_eagain = true; + } + + ret = (has_eagain) ? -ERESTART : -ENOMEM; + return ret; +} + +/* + * Call bo->mutex locked. + * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. + */ + +static int ttm_bo_busy(struct ttm_buffer_object *bo) +{ + void *sync_obj = bo->sync_obj; + struct ttm_bo_driver *driver = bo->bdev->driver; + + if (sync_obj) { + if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) { + driver->sync_obj_unref(&bo->sync_obj); + bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; + return 0; + } + driver->sync_obj_flush(sync_obj, bo->sync_obj_arg); + if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) { + driver->sync_obj_unref(&bo->sync_obj); + bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; + return 0; + } + return 1; + } + return 0; +} + +int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) +{ + int ret = 0; + + if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) + return -EBUSY; + + ret = wait_event_interruptible(bo->event_queue, + atomic_read(&bo->cpu_writers) == 0); + + if (ret == -ERESTARTSYS) + ret = -ERESTART; + + return ret; +} + +/* + * bo->mutex locked. + * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags. + */ + +int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags, + bool interruptible, bool no_wait) +{ + struct ttm_bo_device *bdev = bo->bdev; + int ret = 0; + struct ttm_mem_reg mem; + + BUG_ON(!atomic_read(&bo->reserved)); + + /* + * FIXME: It's possible to pipeline buffer moves. + * Have the driver move function wait for idle when necessary, + * instead of doing it here. + */ + + ttm_bo_busy(bo); + ret = ttm_bo_wait(bo, false, interruptible, no_wait); + if (ret) + return ret; + + mem.num_pages = bo->num_pages; + mem.size = mem.num_pages << PAGE_SHIFT; + mem.proposed_flags = new_mem_flags; + mem.page_alignment = bo->mem.page_alignment; + + /* + * Determine where to move the buffer. + */ + + ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait); + if (ret) + goto out_unlock; + + ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); + + out_unlock: + if (ret && mem.mm_node) { + spin_lock(&bdev->lru_lock); + drm_mm_put_block(mem.mm_node); + spin_unlock(&bdev->lru_lock); + } + return ret; +} + +static int ttm_bo_mem_compat(struct ttm_mem_reg *mem) +{ + if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0) + return 0; + if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0) + return 0; + + return 1; +} + +int ttm_buffer_object_validate(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait) +{ + int ret; + + BUG_ON(!atomic_read(&bo->reserved)); + bo->mem.proposed_flags = bo->proposed_flags; + + TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n", + (unsigned long)bo->mem.proposed_flags, + (unsigned long)bo->mem.flags); + + /* + * Check whether we need to move buffer. + */ + + if (!ttm_bo_mem_compat(&bo->mem)) { + ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags, + interruptible, no_wait); + if (ret) { + if (ret != -ERESTART) + printk(KERN_ERR "Failed moving buffer. " + "Proposed placement 0x%08x\n", + bo->mem.proposed_flags); + if (ret == -ENOMEM) + printk(KERN_ERR "Out of aperture space or " + "DRM memory quota.\n"); + return ret; + } + } + + /* + * We might need to add a TTM. + */ + + if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { + ret = ttm_bo_add_ttm(bo); + if (ret) + return ret; + } + /* + * Validation has succeeded, move the access and other + * non-mapping-related flag bits from the proposed flags to + * the active flags + */ + + ttm_flag_masked(&bo->mem.flags, bo->proposed_flags, + ~TTM_PL_MASK_MEMTYPE); + + return 0; +} + +int +ttm_bo_check_placement(struct ttm_buffer_object *bo, + uint32_t set_flags, uint32_t clr_flags) +{ + uint32_t new_mask = set_flags | clr_flags; + + if ((bo->type == ttm_bo_type_user) && (clr_flags & TTM_PL_FLAG_CACHED)) { + printk(KERN_ERR + "User buffers require cache-coherent memory.\n"); + return -EINVAL; + } + + if (!capable(CAP_SYS_ADMIN)) { + if (new_mask & TTM_PL_FLAG_NO_EVICT) { + printk(KERN_ERR "Need to be root to modify" + " NO_EVICT status.\n"); + return -EINVAL; + } + + if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) && + (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) { + printk(KERN_ERR "Incompatible memory specification" + " for NO_EVICT buffer.\n"); + return -EINVAL; + } + } + return 0; +} + +int ttm_buffer_object_init(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + uint32_t flags, + uint32_t page_alignment, + unsigned long buffer_start, + bool interruptible, + struct file *persistant_swap_storage, + size_t acc_size, + void (*destroy) (struct ttm_buffer_object *)) +{ + int ret = 0; + unsigned long num_pages; + + size += buffer_start & ~PAGE_MASK; + num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (num_pages == 0) { + printk(KERN_ERR "Illegal buffer object size.\n"); + return -EINVAL; + } + bo->destroy = destroy; + + mutex_init(&bo->mutex); + mutex_lock(&bo->mutex); + kref_init(&bo->kref); + kref_init(&bo->list_kref); + atomic_set(&bo->cpu_writers, 0); + atomic_set(&bo->reserved, 1); + init_waitqueue_head(&bo->event_queue); + INIT_LIST_HEAD(&bo->lru); + INIT_LIST_HEAD(&bo->ddestroy); + INIT_LIST_HEAD(&bo->swap); + bo->bdev = bdev; + bo->type = type; + bo->num_pages = num_pages; + bo->mem.mem_type = TTM_PL_SYSTEM; + bo->mem.num_pages = bo->num_pages; + bo->mem.mm_node = NULL; + bo->mem.page_alignment = page_alignment; + bo->buffer_start = buffer_start & PAGE_MASK; + bo->priv_flags = 0; + bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); + bo->seq_valid = false; + bo->persistant_swap_storage = persistant_swap_storage; + bo->acc_size = acc_size; + + ret = ttm_bo_check_placement(bo, flags, 0ULL); + if (unlikely(ret != 0)) + goto out_err; + + /* + * If no caching attributes are set, accept any form of caching. + */ + + if ((flags & TTM_PL_MASK_CACHING) == 0) + flags |= TTM_PL_MASK_CACHING; + + bo->proposed_flags = flags; + bo->mem.proposed_flags = flags; + + /* + * For ttm_bo_type_device buffers, allocate + * address space from the device. + */ + + if (bo->type == ttm_bo_type_device) { + ret = ttm_bo_setup_vm(bo); + if (ret) + goto out_err; + } + + ret = ttm_buffer_object_validate(bo, interruptible, false); + if (ret) + goto out_err; + + mutex_unlock(&bo->mutex); + ttm_bo_unreserve(bo); + return 0; + + out_err: + mutex_unlock(&bo->mutex); + ttm_bo_unreserve(bo); + ttm_bo_unref(&bo); + + return ret; +} + +static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, + unsigned long num_pages) +{ + size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & + PAGE_MASK; + + return bdev->ttm_bo_size + 2 * page_array_size; +} + +int ttm_buffer_object_create(struct ttm_bo_device *bdev, + unsigned long size, + enum ttm_bo_type type, + uint32_t flags, + uint32_t page_alignment, + unsigned long buffer_start, + bool interruptible, + struct file *persistant_swap_storage, + struct ttm_buffer_object **p_bo) +{ + struct ttm_buffer_object *bo; + int ret; + struct ttm_mem_global *mem_glob = bdev->mem_glob; + + size_t acc_size = + ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); + if (unlikely(ret != 0)) + return ret; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + + if (unlikely(bo == NULL)) { + ttm_mem_global_free(mem_glob, acc_size, false); + return -ENOMEM; + } + + ret = ttm_buffer_object_init(bdev, bo, size, type, flags, + page_alignment, buffer_start, + interruptible, + persistant_swap_storage, acc_size, NULL); + if (likely(ret == 0)) + *p_bo = bo; + + return ret; +} + +static int ttm_bo_leave_list(struct ttm_buffer_object *bo, + uint32_t mem_type, bool allow_errors) +{ + int ret; + + mutex_lock(&bo->mutex); + + ret = ttm_bo_expire_sync_obj(bo, allow_errors); + if (ret) + goto out; + + if (bo->mem.mem_type == mem_type) + ret = ttm_bo_evict(bo, mem_type, false, false); + + if (ret) { + if (allow_errors) { + goto out; + } else { + ret = 0; + printk(KERN_ERR "Cleanup eviction failed\n"); + } + } + + out: + mutex_unlock(&bo->mutex); + return ret; +} + +static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, + struct list_head *head, + unsigned mem_type, bool allow_errors) +{ + struct ttm_buffer_object *entry; + int ret; + int put_count; + + /* + * Can't use standard list traversal since we're unlocking. + */ + + spin_lock(&bdev->lru_lock); + + while (!list_empty(head)) { + entry = list_first_entry(head, struct ttm_buffer_object, lru); + kref_get(&entry->list_kref); + ret = ttm_bo_reserve_locked(entry, false, false, false, 0); + put_count = ttm_bo_del_from_lru(entry); + spin_unlock(&bdev->lru_lock); + while (put_count--) + kref_put(&entry->list_kref, ttm_bo_ref_bug); + BUG_ON(ret); + ret = ttm_bo_leave_list(entry, mem_type, allow_errors); + ttm_bo_unreserve(entry); + kref_put(&entry->list_kref, ttm_bo_release_list); + spin_lock(&bdev->lru_lock); + } + + spin_unlock(&bdev->lru_lock); + + return 0; +} + +int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + int ret = -EINVAL; + + if (mem_type >= TTM_NUM_MEM_TYPES) { + printk(KERN_ERR "Illegal memory type %d\n", mem_type); + return ret; + } + + if (!man->has_type) { + printk(KERN_ERR "Trying to take down uninitialized " + "memory manager type %u\n", mem_type); + return ret; + } + + man->use_type = false; + man->has_type = false; + + ret = 0; + if (mem_type > 0) { + ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); + + spin_lock(&bdev->lru_lock); + if (drm_mm_clean(&man->manager)) { + drm_mm_takedown(&man->manager); + } else { + ret = -EBUSY; + } + spin_unlock(&bdev->lru_lock); + } + + return ret; +} + +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + + if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { + printk(KERN_ERR "Illegal memory manager memory type %u.\n", + mem_type); + return -EINVAL; + } + + if (!man->has_type) { + printk(KERN_ERR "Memory type %u has not been initialized.\n", + mem_type); + return 0; + } + + return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); +} + +int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, + unsigned long p_offset, unsigned long p_size) +{ + int ret = -EINVAL; + struct ttm_mem_type_manager *man; + + if (type >= TTM_NUM_MEM_TYPES) { + printk(KERN_ERR "Illegal memory type %d\n", type); + return ret; + } + + man = &bdev->man[type]; + if (man->has_type) { + printk(KERN_ERR + "Memory manager already initialized for type %d\n", + type); + return ret; + } + + ret = bdev->driver->init_mem_type(bdev, type, man); + if (ret) + return ret; + + ret = 0; + if (type != TTM_PL_SYSTEM) { + if (!p_size) { + printk(KERN_ERR "Zero size memory manager type %d\n", + type); + return ret; + } + ret = drm_mm_init(&man->manager, p_offset, p_size); + if (ret) + return ret; + } + man->has_type = true; + man->use_type = true; + man->size = p_size; + + INIT_LIST_HEAD(&man->lru); + + return 0; +} + +int ttm_bo_device_release(struct ttm_bo_device *bdev) +{ + int ret = 0; + unsigned i = TTM_NUM_MEM_TYPES; + struct ttm_mem_type_manager *man; + + while (i--) { + man = &bdev->man[i]; + if (man->has_type) { + man->use_type = false; + if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { + ret = -EBUSY; + printk(KERN_ERR "DRM memory manager type %d " + "is not clean.\n", i); + } + man->has_type = false; + } + } + + if (!cancel_delayed_work(&bdev->wq)) + flush_scheduled_work(); + + while (ttm_bo_delayed_delete(bdev, true)) ; + + spin_lock(&bdev->lru_lock); + if (list_empty(&bdev->ddestroy)) + TTM_DEBUG("Delayed destroy list was clean\n"); + + if (list_empty(&bdev->man[0].lru)) + TTM_DEBUG("Swap list was clean\n"); + spin_unlock(&bdev->lru_lock); + + ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink); + BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); + write_lock(&bdev->vm_lock); + drm_mm_takedown(&bdev->addr_space_mm); + write_unlock(&bdev->vm_lock); + + __free_page(bdev->dummy_read_page); + return ret; +} + +/* + * This function is intended to be called on drm driver load. + * If you decide to call it from firstopen, you must protect the call + * from a potentially racing ttm_bo_driver_finish in lastclose. + * (This may happen on X server restart). + */ + +int ttm_bo_device_init(struct ttm_bo_device *bdev, + struct ttm_mem_global *mem_glob, + struct ttm_bo_driver *driver, uint64_t file_page_offset) +{ + int ret = -EINVAL; + + bdev->dummy_read_page = NULL; + rwlock_init(&bdev->vm_lock); + spin_lock_init(&bdev->lru_lock); + + bdev->driver = driver; + bdev->mem_glob = mem_glob; + + memset(bdev->man, 0, sizeof(bdev->man)); + + bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); + if (unlikely(bdev->dummy_read_page == NULL)) { + ret = -ENOMEM; + goto out_err0; + } + + /* + * Initialize the system memory buffer type. + * Other types need to be driver / IOCTL initialized. + */ + ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); + if (unlikely(ret != 0)) + goto out_err1; + + bdev->addr_space_rb = RB_ROOT; + ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); + if (unlikely(ret != 0)) + goto out_err2; + + INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); + bdev->nice_mode = true; + INIT_LIST_HEAD(&bdev->ddestroy); + INIT_LIST_HEAD(&bdev->swap_lru); + bdev->dev_mapping = NULL; + ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); + ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); + if (unlikely(ret != 0)) { + printk(KERN_ERR "Could not register buffer object swapout.\n"); + goto out_err2; + } + return 0; + out_err2: + ttm_bo_clean_mm(bdev, 0); + out_err1: + __free_page(bdev->dummy_read_page); + out_err0: + return ret; +} + +/* + * buffer object vm functions. + */ + +bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + + if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { + if (mem->mem_type == TTM_PL_SYSTEM) + return false; + + if (man->flags & TTM_MEMTYPE_FLAG_CMA) + return false; + + if (mem->flags & TTM_PL_FLAG_CACHED) + return false; + } + return true; +} + +int ttm_bo_pci_offset(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem, + unsigned long *bus_base, + unsigned long *bus_offset, unsigned long *bus_size) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + + *bus_size = 0; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + + if (ttm_mem_reg_is_pci(bdev, mem)) { + *bus_offset = mem->mm_node->start << PAGE_SHIFT; + *bus_size = mem->num_pages << PAGE_SHIFT; + *bus_base = man->io_offset; + } + + return 0; +} + +/** + * \c Kill all user-space virtual mappings of this buffer object. + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + loff_t offset = (loff_t) bo->addr_space_offset; + loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; + + if (!bdev->dev_mapping) + return; + + unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); +} + +static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct rb_node **cur = &bdev->addr_space_rb.rb_node; + struct rb_node *parent = NULL; + struct ttm_buffer_object *cur_bo; + unsigned long offset = bo->vm_node->start; + unsigned long cur_offset; + + while (*cur) { + parent = *cur; + cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); + cur_offset = cur_bo->vm_node->start; + if (offset < cur_offset) + cur = &parent->rb_left; + else if (offset > cur_offset) + cur = &parent->rb_right; + else + BUG(); + } + + rb_link_node(&bo->vm_rb, parent, cur); + rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); +} + +/** + * ttm_bo_setup_vm: + * + * @bo: the buffer to allocate address space for + * + * Allocate address space in the drm device so that applications + * can mmap the buffer and access the contents. This only + * applies to ttm_bo_type_device objects as others are not + * placed in the drm device address space. + */ + +static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + int ret; + + retry_pre_get: + ret = drm_mm_pre_get(&bdev->addr_space_mm); + if (unlikely(ret != 0)) + return ret; + + write_lock(&bdev->vm_lock); + bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, + bo->mem.num_pages, 0, 0); + + if (unlikely(bo->vm_node == NULL)) { + ret = -ENOMEM; + goto out_unlock; + } + + bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, + bo->mem.num_pages, 0); + + if (unlikely(bo->vm_node == NULL)) { + write_unlock(&bdev->vm_lock); + goto retry_pre_get; + } + + ttm_bo_vm_insert_rb(bo); + write_unlock(&bdev->vm_lock); + bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; + + return 0; + out_unlock: + write_unlock(&bdev->vm_lock); + return ret; +} + +int ttm_bo_wait(struct ttm_buffer_object *bo, + bool lazy, bool interruptible, bool no_wait) +{ + struct ttm_bo_driver *driver = bo->bdev->driver; + void *sync_obj; + void *sync_obj_arg; + int ret = 0; + + while (bo->sync_obj) { + if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { + driver->sync_obj_unref(&bo->sync_obj); + bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; + goto out; + } + if (no_wait) { + ret = -EBUSY; + goto out; + } + sync_obj = driver->sync_obj_ref(bo->sync_obj); + sync_obj_arg = bo->sync_obj_arg; + mutex_unlock(&bo->mutex); + ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, + lazy, interruptible); + + mutex_lock(&bo->mutex); + if (unlikely(ret != 0)) { + driver->sync_obj_unref(&sync_obj); + return ret; + } + + if (bo->sync_obj == sync_obj) { + driver->sync_obj_unref(&bo->sync_obj); + bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; + } + driver->sync_obj_unref(&sync_obj); + } + out: + return 0; +} + +void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) +{ + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); +} + +int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, + bool no_wait) +{ + int ret; + + while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { + if (no_wait) + return -EBUSY; + else if (interruptible) { + ret = wait_event_interruptible + (bo->event_queue, atomic_read(&bo->reserved) == 0); + if (unlikely(ret != 0)) + return -ERESTART; + } else { + wait_event(bo->event_queue, + atomic_read(&bo->reserved) == 0); + } + } + return 0; +} + +int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) +{ + int ret = 0; + + /* + * Using ttm_bo_reserve instead of ttm_bo_block_reservation + * makes sure the lru lists are updated. + */ + + ret = ttm_bo_reserve(bo, true, no_wait, false, 0); + if (unlikely(ret != 0)) + return ret; + mutex_lock(&bo->mutex); + ret = ttm_bo_wait(bo, false, true, no_wait); + if (unlikely(ret != 0)) + goto out_err0; + atomic_inc(&bo->cpu_writers); + out_err0: + mutex_unlock(&bo->mutex); + ttm_bo_unreserve(bo); + return ret; +} + +void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) +{ + if (atomic_dec_and_test(&bo->cpu_writers)) + wake_up_all(&bo->event_queue); +} + +/** + * A buffer object shrink method that tries to swap out the first + * buffer object on the bo_global::swap_lru list. + */ + +static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) +{ + struct ttm_bo_device *bdev = + container_of(shrink, struct ttm_bo_device, shrink); + struct ttm_buffer_object *bo; + int ret = -EBUSY; + int put_count; + uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); + + spin_lock(&bdev->lru_lock); + while (ret == -EBUSY) { + if (unlikely(list_empty(&bdev->swap_lru))) { + spin_unlock(&bdev->lru_lock); + return -EBUSY; + } + + bo = list_first_entry(&bdev->swap_lru, + struct ttm_buffer_object, swap); + kref_get(&bo->list_kref); + + /** + * Reserve buffer. Since we unlock while sleeping, we need + * to re-check that nobody removed us from the swap-list while + * we slept. + */ + + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + if (unlikely(ret == -EBUSY)) { + spin_unlock(&bdev->lru_lock); + ttm_bo_wait_unreserved(bo, false); + kref_put(&bo->list_kref, ttm_bo_release_list); + spin_lock(&bdev->lru_lock); + } + } + + BUG_ON(ret != 0); + put_count = ttm_bo_del_from_lru(bo); + spin_unlock(&bdev->lru_lock); + + while (put_count--) + kref_put(&bo->list_kref, ttm_bo_ref_bug); + + /** + * Wait for GPU, then move to system cached. + */ + + mutex_lock(&bo->mutex); + ret = ttm_bo_wait(bo, false, false, false); + if (unlikely(ret != 0)) + goto out; + + if ((bo->mem.flags & swap_placement) != swap_placement) { + struct ttm_mem_reg evict_mem; + + evict_mem = bo->mem; + evict_mem.mm_node = NULL; + evict_mem.proposed_flags = + TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; + evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; + evict_mem.mem_type = TTM_PL_SYSTEM; + + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false); + if (unlikely(ret != 0)) + goto out; + } + + ttm_bo_unmap_virtual(bo); + + /** + * Swap out. Buffer will be swapped in again as soon as + * anyone tries to access a ttm page. + */ + + ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); + out: + mutex_unlock(&bo->mutex); + + /** + * + * Unreserve without putting on LRU to avoid swapping out an + * already swapped buffer. + */ + + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + kref_put(&bo->list_kref, ttm_bo_release_list); + return ret; +} + +void ttm_bo_swapout_all(struct ttm_bo_device *bdev) +{ + while (ttm_bo_swapout(&bdev->shrink) == 0) ; +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h new file mode 100644 index 0000000..faf7475 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h @@ -0,0 +1,578 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_BO_API_H_ +#define _TTM_BO_API_H_ + +#include +#include +#include +#include +#include +#include +#include + +struct ttm_bo_device; + +struct drm_mm_node; + +/** + * struct ttm_mem_reg + * + * @mm_node: Memory manager node. + * @size: Requested size of memory region. + * @num_pages: Actual size of memory region in pages. + * @page_alignment: Page alignment. + * @flags: Placement flags. + * @proposed_flags: Proposed placement flags. + * + * Structure indicating the placement and space resources used by a + * buffer object. + */ + +struct ttm_mem_reg { + struct drm_mm_node *mm_node; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t flags; + uint32_t proposed_flags; +}; + +/** + * enum ttm_bo_type + * + * @ttm_bo_type_device: These are 'normal' buffers that can + * be mmapped by user space. Each of these bos occupy a slot in the + * device address space, that can be used for normal vm operations. + * + * @ttm_bo_type_user: These are user-space memory areas that are made + * available to the GPU by mapping the buffer pages into the GPU aperture + * space. These buffers cannot be mmaped from the device address space. + * + * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, + * but they cannot be accessed from user-space. For kernel-only use. + */ + +enum ttm_bo_type { + ttm_bo_type_device, + ttm_bo_type_user, + ttm_bo_type_kernel +}; + +struct ttm_tt; + +/** + * struct ttm_buffer_object + * + * @bdev: Pointer to the buffer object device structure. + * @kref: Reference count of this buffer object. When this refcount reaches + * zero, the object is put on the delayed delete list. + * @list_kref: List reference count of this buffer object. This member is + * used to avoid destruction while the buffer object is still on a list. + * Lru lists may keep one refcount, the delayed delete list, and kref != 0 + * keeps one refcount. When this refcount reaches zero, + * the object is destroyed. + * @proposed_flags: Proposed placement for the buffer. Changed only by the + * creator prior to validation as opposed to bo->mem.proposed_flags which is + * changed by the implementation prior to a buffer move if it wants to outsmart + * the buffer creator / user. This latter happens, for example, at eviction. + * @buffer_start: The virtual user-space start address of ttm_bo_type_user + * buffers. + * @type: The bo type. + * @offset: The current GPU offset, which can have different meanings + * depending on the memory type. For SYSTEM type memory, it should be 0. + * @mem: structure describing current placement. + * @val_seq: Sequence of the validation holding the @reserved lock. + * Used to avoid starvation when many processes compete to validate the + * buffer. This member is protected by the bo_device::lru_lock. + * @seq_valid: The value of @val_seq is valid. This value is protected by + * the bo_device::lru_lock. + * @lru: List head for the lru list. + * @ddestroy: List head for the delayed destroy list. + * @swap: List head for swap LRU list. + * @persistant_swap_storage: Usually the swap storage is deleted for buffers + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistant shmem object. + * @destroy: Destruction function. If NULL, kfree is used. + * @sync_obj_arg: Opaque argument to synchronization object function. + * @sync_obj: Pointer to a synchronization object. + * @priv_flags: Flags describing buffer object internal state. + * @event_queue: Queue for processes waiting on buffer object status change. + * @mutex: Lock protecting all members with the exception of constant members + * and list heads. We should really use a spinlock here. + * @num_pages: Actual number of pages. + * @ttm: TTM structure holding system pages. + * @vm_hash: Hash item for fast address space lookup. Need to change to a + * rb-tree node. + * @vm_node: Address space manager node. + * @addr_space_offset: Address space offset. + * @cpu_writes: For synchronization. Number of cpu writers. + * @reserved: Deadlock-free lock used for synchronization state transitions. + * @acc_size: Accounted size for this object. + * + * Base class for TTM buffer object, that deals with data placement and CPU + * mappings. GPU mappings are really up to the driver, but for simpler GPUs + * the driver can usually use the placement offset @offset directly as the + * GPU virtual address. For drivers implementing multiple + * GPU memory manager contexts, the driver should manage the address space + * in these contexts separately and use these objects to get the correct + * placement and caching for these GPU maps. This makes it possible to use + * these objects for even quite elaborate memory management schemes. + * The destroy member, the API visibility of this object makes it possible + * to derive driver specific types. + */ + +struct ttm_buffer_object { + struct ttm_bo_device *bdev; + struct kref kref; + struct kref list_kref; + + /* + * If there is a possibility that the usage variable is zero, + * then dev->struct_mutex should be locked before incrementing it. + */ + + uint32_t proposed_flags; + unsigned long buffer_start; + enum ttm_bo_type type; + unsigned long offset; + struct ttm_mem_reg mem; + uint32_t val_seq; + bool seq_valid; + + struct list_head lru; + struct list_head ddestroy; + struct list_head swap; + + struct file *persistant_swap_storage; + + void (*destroy) (struct ttm_buffer_object *); + + void *sync_obj_arg; + void *sync_obj; + + uint32_t priv_flags; + wait_queue_head_t event_queue; + struct mutex mutex; + unsigned long num_pages; + + struct ttm_tt *ttm; + struct rb_node vm_rb; + struct drm_mm_node *vm_node; + uint64_t addr_space_offset; + + atomic_t cpu_writers; + atomic_t reserved; + + size_t acc_size; +}; + +/** + * struct ttm_bo_kmap_obj + * + * @virtual: The current kernel virtual address. + * @page: The page when kmap'ing a single page. + * @bo_kmap_type: Type of bo_kmap. + * + * Object describing a kernel mapping. Since a TTM bo may be located + * in various memory types with various caching policies, the + * mapping can either be an ioremap, a vmap, a kmap or part of a + * premapped region. + */ + +struct ttm_bo_kmap_obj { + void *virtual; + struct page *page; + enum { + ttm_bo_map_iomap, + ttm_bo_map_vmap, + ttm_bo_map_kmap, + ttm_bo_map_premapped, + } bo_kmap_type; +}; + +/** + * ttm_bo_reference - reference a struct ttm_buffer_object + * + * @bo: The buffer object. + * + * Returns a refcounted pointer to a buffer object. + */ + +static inline struct ttm_buffer_object *ttm_bo_reference(struct + ttm_buffer_object *bo) +{ + kref_get(&bo->kref); + return bo; +} + +/** + * ttm_bo_wait - wait for buffer idle. + * + * @bo: The buffer object. + * @interruptible: Use interruptible wait. + * @no_wait: Return immediately if buffer is busy. + * + * This function must be called with the bo::mutex held, and makes + * sure any previous rendering to the buffer is completed. + * Note: It might be necessary to block validations before the + * wait by reserving the buffer. + * Returns -EBUSY if no_wait is true and the buffer is busy. + * Returns -ERESTART if interrupted by a signal. + */ +extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, + bool interruptible, bool no_wait); +/** + * ttm_buffer_object_validate + * + * @bo: The buffer object. + * @interruptible: Sleep interruptible if sleeping. + * @no_wait: Return immediately if the buffer is busy. + * + * Changes placement and caching policy of the buffer object + * according to bo::proposed_flags. + * Returns + * -EINVAL on invalid proposed_flags. + * -ENOMEM on out-of-memory condition. + * -EBUSY if no_wait is true and buffer busy. + * -ERESTART if interrupted by a signal. + */ +extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait); +/** + * ttm_bo_unref + * + * @bo: The buffer object. + * + * Unreference and clear a pointer to a buffer object. + */ +extern void ttm_bo_unref(struct ttm_buffer_object **bo); + +/** + * ttm_bo_synccpu_write_grab + * + * @bo: The buffer object: + * @no_wait: Return immediately if buffer is busy. + * + * Synchronizes a buffer object for CPU RW access. This means + * blocking command submission that affects the buffer and + * waiting for buffer idle. This lock is recursive. + * Returns + * -EBUSY if the buffer is busy and no_wait is true. + * -ERESTART if interrupted by a signal. + */ + +extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); +/** + * ttm_bo_synccpu_write_release: + * + * @bo : The buffer object. + * + * Releases a synccpu lock. + */ +extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); + +/** + * ttm_buffer_object_init + * + * @bdev: Pointer to a ttm_bo_device struct. + * @bo: Pointer to a ttm_buffer_object to be initialized. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @flags: Initial placement flags. + * @page_alignment: Data alignment in pages. + * @buffer_start: Virtual address of user space data backing a + * user buffer object. + * @interruptible: If needing to sleep to wait for GPU resources, + * sleep interruptible. + * @persistant_swap_storage: Usually the swap storage is deleted for buffers + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistant shmem object. Typically, this would + * point to the shmem object backing a GEM object if TTM is used to back a + * GEM user interface. + * @acc_size: Accounted size for this object. + * @destroy: Destroy function. Use NULL for kfree(). + * + * This function initializes a pre-allocated struct ttm_buffer_object. + * As this object may be part of a larger structure, this function, + * together with the @destroy function, + * enables driver-specific objects derived from a ttm_buffer_object. + * On successful return, the object kref and list_kref are set to 1. + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTART: Interrupted by signal while sleeping waiting for resources. + */ + +extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + uint32_t flags, + uint32_t page_alignment, + unsigned long buffer_start, + bool interrubtible, + struct file *persistant_swap_storage, + size_t acc_size, + void (*destroy) (struct ttm_buffer_object *)); +/** + * ttm_bo_synccpu_object_init + * + * @bdev: Pointer to a ttm_bo_device struct. + * @bo: Pointer to a ttm_buffer_object to be initialized. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @flags: Initial placement flags. + * @page_alignment: Data alignment in pages. + * @buffer_start: Virtual address of user space data backing a + * user buffer object. + * @interruptible: If needing to sleep while waiting for GPU resources, + * sleep interruptible. + * @persistant_swap_storage: Usually the swap storage is deleted for buffers + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistant shmem object. Typically, this would + * point to the shmem object backing a GEM object if TTM is used to back a + * GEM user interface. + * @p_bo: On successful completion *p_bo points to the created object. + * + * This function allocates a ttm_buffer_object, and then calls + * ttm_buffer_object_init on that object. + * The destroy function is set to kfree(). + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTART: Interrupted by signal while waiting for resources. + */ + +extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, + unsigned long size, + enum ttm_bo_type type, + uint32_t flags, + uint32_t page_alignment, + unsigned long buffer_start, + bool interruptible, + struct file *persistant_swap_storage, + struct ttm_buffer_object **p_bo); + +/** + * ttm_bo_check_placement + * + * @bo: the buffer object. + * @set_flags: placement flags to set. + * @clr_flags: placement flags to clear. + * + * Performs minimal validity checking on an intended change of + * placement flags. + * Returns + * -EINVAL: Intended change is invalid or not allowed. + */ + +extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, + uint32_t set_flags, uint32_t clr_flags); + +/** + * ttm_bo_init_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * @p_offset: offset for managed area in pages. + * @p_size: size managed area in pages. + * + * Initialize a manager for a given memory type. + * Note: if part of driver firstopen, it must be protected from a + * potentially racing lastclose. + * Returns: + * -EINVAL: invalid size or memory type. + * -ENOMEM: Not enough memory. + * May also return driver-specified errors. + */ + +extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, + unsigned long p_offset, unsigned long p_size); +/** + * ttm_bo_clean_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * + * Take down a manager for a given memory type after first walking + * the LRU list to evict any buffers left alive. + * + * Normally, this function is part of lastclose() or unload(), and at that + * point there shouldn't be any buffers left created by user-space, since + * there should've been removed by the file descriptor release() method. + * However, before this function is run, make sure to signal all sync objects, + * and verify that the delayed delete queue is empty. The driver must also + * make sure that there are no NO_EVICT buffers present in this memory type + * when the call is made. + * + * If this function is part of a VT switch, the caller must make sure that + * there are no appications currently validating buffers before this + * function is called. The caller can do that by first taking the + * struct ttm_bo_device::ttm_lock in write mode. + * + * Returns: + * -EINVAL: invalid or uninitialized memory type. + * -EBUSY: There are still buffers left in this memory type. + */ + +extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); + +/** + * ttm_bo_evict_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * + * Evicts all buffers on the lru list of the memory type. + * This is normally part of a VT switch or an + * out-of-memory-space-due-to-fragmentation handler. + * The caller must make sure that there are no other processes + * currently validating buffers, and can do that by taking the + * struct ttm_bo_device::ttm_lock in write mode. + * + * Returns: + * -EINVAL: Invalid or uninitialized memory type. + * -ERESTART: The call was interrupted by a signal while waiting to + * evict a buffer. + */ + +extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); + +/** + * ttm_kmap_obj_virtual + * + * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. + * @is_iomem: Pointer to an integer that on return indicates 1 if the + * virtual map is io memory, 0 if normal memory. + * + * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. + * If *is_iomem is 1 on return, the virtual address points to an io memory area, + * that should strictly be accessed by the iowriteXX() and similar functions. + */ + +static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, + bool *is_iomem) +{ + *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap || + map->bo_kmap_type == ttm_bo_map_premapped); + return map->virtual; +} + +/** + * ttm_bo_kmap + * + * @bo: The buffer object. + * @start_page: The first page to map. + * @num_pages: Number of pages to map. + * @map: pointer to a struct ttm_bo_kmap_obj representing the map. + * + * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the + * data in the buffer object. The ttm_kmap_obj_virtual function can then be + * used to obtain a virtual address to the data. + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid range. + */ + +extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct ttm_bo_kmap_obj *map); + +/** + * ttm_bo_kunmap + * + * @map: Object describing the map to unmap. + * + * Unmaps a kernel map set up by ttm_bo_kmap. + */ + +extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); + +#if 0 +#endif + +/** + * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. + * + * @vma: vma as input from the fbdev mmap method. + * @bo: The bo backing the address space. The address space will + * have the same size as the bo, and start at offset 0. + * + * This function is intended to be called by the fbdev mmap method + * if the fbdev address space is to be backed by a bo. + */ + +extern int ttm_fbdev_mmap(struct vm_area_struct *vma, + struct ttm_buffer_object *bo); + +/** + * ttm_bo_mmap - mmap out of the ttm device address space. + * + * @filp: filp as input from the mmap method. + * @vma: vma as input from the mmap method. + * @bdev: Pointer to the ttm_bo_device with the address space manager. + * + * This function is intended to be called by the device mmap method. + * if the device address space is to be backed by the bo manager. + */ + +extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, + struct ttm_bo_device *bdev); + +/** + * ttm_bo_io + * + * @bdev: Pointer to the struct ttm_bo_device. + * @filp: Pointer to the struct file attempting to read / write. + * @wbuf: User-space pointer to address of buffer to write. NULL on read. + * @rbuf: User-space pointer to address of buffer to read into. Null on write. + * @count: Number of bytes to read / write. + * @f_pos: Pointer to current file position. + * @write: 1 for read, 0 for write. + * + * This function implements read / write into ttm buffer objects, and is intended to + * be called from the fops::read and fops::write method. + * Returns: + * See man (2) write, man(2) read. In particular, the function may return -EINTR if + * interrupted by a signal. + */ + +extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, + const char __user * wbuf, char __user * rbuf, + size_t count, loff_t * f_pos, bool write); + +extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h new file mode 100644 index 0000000..f7efb45 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h @@ -0,0 +1,859 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +#ifndef _TTM_BO_DRIVER_H_ +#define _TTM_BO_DRIVER_H_ + +#include "ttm/ttm_bo_api.h" +#include "ttm/ttm_memory.h" +#include +#include "linux/workqueue.h" +#include "linux/fs.h" +#include "linux/spinlock.h" + +struct ttm_backend; + +struct ttm_backend_func { + /** + * struct ttm_backend_func member populate + * + * @backend: Pointer to a struct ttm_backend. + * @num_pages: Number of pages to populate. + * @pages: Array of pointers to ttm pages. + * @dummy_read_page: Page to be used instead of NULL pages in the + * array @pages. + * + * Populate the backend with ttm pages. Depending on the backend, + * it may or may not copy the @pages array. + */ + int (*populate) (struct ttm_backend * backend, + unsigned long num_pages, struct page ** pages, + struct page * dummy_read_page); + /** + * struct ttm_backend_func member clear + * + * @backend: Pointer to a struct ttm_backend. + * + * This is an "unpopulate" function. Release all resources + * allocated with populate. + */ + void (*clear) (struct ttm_backend * backend); + + /** + * struct ttm_backend_func member bind + * + * @backend: Pointer to a struct ttm_backend. + * @bo_mem: Pointer to a struct ttm_mem_reg describing the + * memory type and location for binding. + * + * Bind the backend pages into the aperture in the location + * indicated by @bo_mem. This function should be able to handle + * differences between aperture- and system page sizes. + */ + int (*bind) (struct ttm_backend * backend, struct ttm_mem_reg * bo_mem); + + /** + * struct ttm_backend_func member unbind + * + * @backend: Pointer to a struct ttm_backend. + * + * Unbind previously bound backend pages. This function should be + * able to handle differences between aperture- and system page sizes. + */ + int (*unbind) (struct ttm_backend * backend); + + /** + * struct ttm_backend_func member destroy + * + * @backend: Pointer to a struct ttm_backend. + * + * Destroy the backend. + */ + void (*destroy) (struct ttm_backend * backend); +}; + +/** + * struct ttm_backend + * + * @bdev: Pointer to a struct ttm_bo_device. + * @flags: For driver use. + * @func: Pointer to a struct ttm_backend_func that describes + * the backend methods. + * + */ + +struct ttm_backend { + struct ttm_bo_device *bdev; + uint32_t flags; + struct ttm_backend_func *func; +}; + +#define TTM_PAGE_FLAG_VMALLOC (1 << 0) +#define TTM_PAGE_FLAG_USER (1 << 1) +#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) +#define TTM_PAGE_FLAG_WRITE (1 << 3) +#define TTM_PAGE_FLAG_SWAPPED (1 << 4) +#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) + +enum ttm_caching_state { + tt_uncached, + tt_wc, + tt_cached +}; + +/** + * struct ttm_tt + * + * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL + * pointer. + * @pages: Array of pages backing the data. + * @first_himem_page: Himem pages are put last in the page array, which + * enables us to run caching attribute changes on only the first part + * of the page array containing lomem pages. This is the index of the + * first himem page. + * @last_lomem_page: Index of the last lomem page in the page array. + * @num_pages: Number of pages in the page array. + * @bdev: Pointer to the current struct ttm_bo_device. + * @be: Pointer to the ttm backend. + * @tsk: The task for user ttm. + * @start: virtual address for user ttm. + * @swap_storage: Pointer to shmem struct file for swap storage. + * @caching_state: The current caching state of the pages. + * @state: The current binding state of the pages. + * + * This is a structure holding the pages, caching- and aperture binding + * status for a buffer object that isn't backed by fixed (VRAM / AGP) + * memory. + */ + +struct ttm_tt { + struct page *dummy_read_page; + struct page **pages; + long first_himem_page; + long last_lomem_page; + uint32_t page_flags; + unsigned long num_pages; + struct ttm_bo_device *bdev; + struct ttm_backend *be; + struct task_struct *tsk; + unsigned long start; + struct file *swap_storage; + enum ttm_caching_state caching_state; + enum { + tt_bound, + tt_unbound, + tt_unpopulated, + } state; +}; + +#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ +#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ +#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap + before kernel access. */ +#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ + +/** + * struct ttm_mem_type_manager + * + * @has_type: The memory type has been initialized. + * @use_type: The memory type is enabled. + * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. + * @io_offset: The io_offset of the first managed page of IO memory or + * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA + * memory, this should be set to NULL. + * @io_size: The size of a managed IO region (fixed memory or aperture). + * @io_addr: Virtual kernel address if the io region is pre-mapped. For + * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and + * @io_addr should be set to NULL. + * @size: Size of the managed region. + * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, + * as defined in ttm_placement_common.h + * @default_caching: The default caching policy used for a buffer object + * placed in this memory type if the user doesn't provide one. + * @manager: The range manager used for this memory type. FIXME: If the aperture + * has a page size different from the underlying system, the granularity + * of this manager should take care of this. But the range allocating code + * in ttm_bo.c needs to be modified for this. + * @lru: The lru list for this memory type. + * + * This structure is used to identify and manage memory types for a device. + * It's set up by the ttm_bo_driver::init_mem_type method. + */ + +struct ttm_mem_type_manager { + + /* + * No protection. Constant from start. + */ + + bool has_type; + bool use_type; + uint32_t flags; + unsigned long gpu_offset; + unsigned long io_offset; + unsigned long io_size; + void *io_addr; + uint64_t size; + uint32_t available_caching; + uint32_t default_caching; + + /* + * Protected by the bdev->lru_lock. + * TODO: Consider one lru_lock per ttm_mem_type_manager. + * Plays ill with list removal, though. + */ + + struct drm_mm manager; + struct list_head lru; +}; + +/** + * struct ttm_bo_driver + * + * @mem_type_prio: Priority array of memory types to place a buffer object in + * if it fits without evicting buffers from any of these memory types. + * @mem_busy_prio: Priority array of memory types to place a buffer object in + * if it needs to evict buffers to make room. + * @num_mem_type_prio: Number of elements in the @mem_type_prio array. + * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array. + * @create_ttm_backend_entry: Callback to create a struct ttm_backend. + * @invalidate_caches: Callback to invalidate read caches when a buffer object + * has been evicted. + * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager structure. + * @evict_flags: Callback to obtain placement flags when a buffer is evicted. + * @move: Callback for a driver to hook in accelerated functions to move a buffer. + * If set to NULL, a potentially slow memcpy() move is used. + * @sync_obj_signaled: See ttm_fence_api.h + * @sync_obj_wait: See ttm_fence_api.h + * @sync_obj_flush: See ttm_fence_api.h + * @sync_obj_unref: See ttm_fence_api.h + * @sync_obj_ref: See ttm_fence_api.h + */ + +struct ttm_bo_driver { + const uint32_t *mem_type_prio; + const uint32_t *mem_busy_prio; + uint32_t num_mem_type_prio; + uint32_t num_mem_busy_prio; + + /** + * struct ttm_bo_driver member create_ttm_backend_entry + * + * @bdev: The buffer object device. + * + * Create a driver specific struct ttm_backend. + */ + + struct ttm_backend *(*create_ttm_backend_entry) + (struct ttm_bo_device * bdev); + + /** + * struct ttm_bo_driver member invalidate_caches + * + * @bdev: the buffer object device. + * @flags: new placement of the rebound buffer object. + * + * A previosly evicted buffer has been rebound in a + * potentially new location. Tell the driver that it might + * consider invalidating read (texture) caches on the next command + * submission as a consequence. + */ + + int (*invalidate_caches) (struct ttm_bo_device * bdev, uint32_t flags); + int (*init_mem_type) (struct ttm_bo_device * bdev, uint32_t type, + struct ttm_mem_type_manager * man); + /** + * struct ttm_bo_driver member evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + */ + + uint32_t(*evict_flags) (struct ttm_buffer_object * bo); + /** + * struct ttm_bo_driver member move: + * + * @bo: the buffer to move + * @evict: whether this motion is evicting the buffer from + * the graphics address space + * @interruptible: Use interruptible sleeps if possible when sleeping. + * @no_wait: whether this should give up and return -EBUSY + * if this move would require sleeping + * @new_mem: the new memory region receiving the buffer + * + * Move a buffer between two memory regions. + */ + int (*move) (struct ttm_buffer_object * bo, + bool evict, bool interruptible, + bool no_wait, struct ttm_mem_reg * new_mem); + + /** + * struct ttm_bo_driver_member verify_access + * + * @bo: Pointer to a buffer object. + * @filp: Pointer to a struct file trying to access the object. + * + * Called from the map / write / read methods to verify that the + * caller is permitted to access the buffer object. + * This member may be set to NULL, which will refuse this kind of + * access for all buffer objects. + * This function should return 0 if access is granted, -EPERM otherwise. + */ + int (*verify_access) (struct ttm_buffer_object * bo, + struct file * filp); + + /** + * In case a driver writer dislikes the TTM fence objects, + * the driver writer can replace those with sync objects of + * his / her own. If it turns out that no driver writer is + * using these. I suggest we remove these hooks and plug in + * fences directly. The bo driver needs the following functionality: + * See the corresponding functions in the fence object API + * documentation. + */ + + bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); + int (*sync_obj_wait) (void *sync_obj, void *sync_arg, + bool lazy, bool interruptible); + int (*sync_obj_flush) (void *sync_obj, void *sync_arg); + void (*sync_obj_unref) (void **sync_obj); + void *(*sync_obj_ref) (void *sync_obj); +}; + +#define TTM_NUM_MEM_TYPES 11 + +#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */ +#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving and needs + idling before CPU mapping */ +/** + * struct ttm_bo_device - Buffer object driver device-specific data. + * + * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. + * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. + * @count: Current number of buffer object. + * @pages: Current number of pinned pages. + * @dummy_read_page: Pointer to a dummy page used for mapping requests + * of unpopulated pages. + * @shrink: A shrink callback object used for buffre object swap. + * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) + * used by a buffer object. This is excluding page arrays and backing pages. + * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). + * @man: An array of mem_type_managers. + * @addr_space_mm: Range manager for the device address space. + * lru_lock: Spinlock that protects the buffer+device lru lists and + * ddestroy lists. + * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. + * If a GPU lockup has been detected, this is forced to 0. + * @dev_mapping: A pointer to the struct address_space representing the + * device address space. + * @wq: Work queue structure for the delayed delete workqueue. + * + */ + +struct ttm_bo_device { + + /* + * Constant after bo device init / atomic. + */ + + struct ttm_mem_global *mem_glob; + struct ttm_bo_driver *driver; + struct page *dummy_read_page; + struct ttm_mem_shrink shrink; + + size_t ttm_bo_extra_size; + size_t ttm_bo_size; + + rwlock_t vm_lock; + /* + * Protected by the vm lock. + */ + struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; + struct rb_root addr_space_rb; + struct drm_mm addr_space_mm; + + /* + * Might want to change this to one lock per manager. + */ + spinlock_t lru_lock; + /* + * Protected by the lru lock. + */ + struct list_head ddestroy; + struct list_head swap_lru; + + /* + * Protected by load / firstopen / lastclose /unload sync. + */ + + bool nice_mode; + struct address_space *dev_mapping; + + /* + * Internal protection. + */ + + struct delayed_work wq; +}; + +/** + * ttm_flag_masked + * + * @old: Pointer to the result and original value. + * @new: New value of bits. + * @mask: Mask of bits to change. + * + * Convenience function to change a number of bits identified by a mask. + */ + +static inline uint32_t +ttm_flag_masked(uint32_t * old, uint32_t new, uint32_t mask) +{ + *old ^= (*old ^ new) & mask; + return *old; +} + +/** + * ttm_tt_create + * + * @bdev: pointer to a struct ttm_bo_device: + * @size: Size of the data needed backing. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * @dummy_read_page: See struct ttm_bo_device. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ +extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, + unsigned long size, + uint32_t page_flags, + struct page *dummy_read_page); + +/** + * ttm_tt_set_user: + * + * @ttm: The struct ttm_tt to populate. + * @tsk: A struct task_struct for which @start is a valid user-space address. + * @start: A valid user-space address. + * @num_pages: Size in pages of the user memory area. + * + * Populate a struct ttm_tt with a user-space memory area after first pinning + * the pages backing it. + * Returns: + * !0: Error. + */ + +extern int ttm_tt_set_user(struct ttm_tt *ttm, + struct task_struct *tsk, + unsigned long start, unsigned long num_pages); + +/** + * ttm_ttm_bind: + * + * @ttm: The struct ttm_tt containing backing pages. + * @bo_mem: The struct ttm_mem_reg identifying the binding location. + * + * Bind the pages of @ttm to an aperture location identified by @bo_mem + */ +extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); + +/** + * ttm_ttm_destroy: + * + * @ttm: The struct ttm_tt. + * + * Unbind, unpopulate and destroy a struct ttm_tt. + */ +extern void ttm_tt_destroy(struct ttm_tt *ttm); + +/** + * ttm_ttm_unbind: + * + * @ttm: The struct ttm_tt. + * + * Unbind a struct ttm_tt. + */ +extern void ttm_tt_unbind(struct ttm_tt *ttm); + +/** + * ttm_ttm_destroy: + * + * @ttm: The struct ttm_tt. + * @index: Index of the desired page. + * + * Return a pointer to the struct page backing @ttm at page + * index @index. If the page is unpopulated, one will be allocated to + * populate that index. + * + * Returns: + * NULL on OOM. + */ +extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); + +/** + * ttm_tt_cache_flush: + * + * @pages: An array of pointers to struct page:s to flush. + * @num_pages: Number of pages to flush. + * + * Flush the data of the indicated pages from the cpu caches. + * This is used when changing caching attributes of the pages from + * cache-coherent. + */ +extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); + +/** + * ttm_tt_set_placement_caching: + * + * @ttm A struct ttm_tt the backing pages of which will change caching policy. + * @placement: Flag indicating the desired caching policy. + * + * This function will change caching policy of any default kernel mappings of + * the pages backing @ttm. If changing from cached to uncached or write-combined, + * all CPU caches will first be flushed to make sure the data of the pages + * hit RAM. This function may be very costly as it involves global TLB + * and cache flushes and potential page splitting / combining. + */ +extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); +extern int ttm_tt_swapout(struct ttm_tt *ttm, + struct file *persistant_swap_storage); + +/* + * ttm_bo.c + */ + +/** + * ttm_mem_reg_is_pci + * + * @bdev: Pointer to a struct ttm_bo_device. + * @mem: A valid struct ttm_mem_reg. + * + * Returns true if the memory described by @mem is PCI memory, + * false otherwise. + */ +extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); + +/** + * ttm_bo_mem_space + * + * @bo: Pointer to a struct ttm_buffer_object. the data of which + * we want to allocate space for. + * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set + * up. + * @interruptible: Sleep interruptible when sliping. + * @no_wait: Don't sleep waiting for space to become available. + * + * Allocate memory space for the buffer object pointed to by @bo, using + * the placement flags in @mem, potentially evicting other idle buffer objects. + * This function may sleep while waiting for space to become available. + * Returns: + * -EBUSY: No space available (only if no_wait == 1). + * -ENOMEM: Could not allocate memory for the buffer object, either due to + * fragmentation or concurrent allocators. + * -ERESTART: An interruptible sleep was interrupted by a signal. + */ +extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, + bool interruptible, bool no_wait); +/** + * ttm_bo_wait_for_cpu + * + * @bo: Pointer to a struct ttm_buffer_object. + * @no_wait: Don't sleep while waiting. + * + * Wait until a buffer object is no longer sync'ed for CPU access. + * Returns: + * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). + * -ERESTART: An interruptible sleep was interrupted by a signal. + */ + +extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); + +/** + * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. + * + * @bo Pointer to a struct ttm_buffer_object. + * @bus_base On return the base of the PCI region + * @bus_offset On return the byte offset into the PCI region + * @bus_size On return the byte size of the buffer object or zero if + * the buffer object memory is not accessible through a PCI region. + * + * Returns: + * -EINVAL if the buffer object is currently not mappable. + * 0 otherwise. + */ + +extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem, + unsigned long *bus_base, + unsigned long *bus_offset, + unsigned long *bus_size); + +extern int ttm_bo_device_release(struct ttm_bo_device *bdev); + +/** + * ttm_bo_device_init + * + * @bdev: A pointer to a struct ttm_bo_device to initialize. + * @mem_global: A pointer to an initialized struct ttm_mem_global. + * @driver: A pointer to a struct ttm_bo_driver set up by the caller. + * @file_page_offset: Offset into the device address space that is available + * for buffer data. This ensures compatibility with other users of the + * address space. + * + * Initializes a struct ttm_bo_device: + * Returns: + * !0: Failure. + */ +extern int ttm_bo_device_init(struct ttm_bo_device *bdev, + struct ttm_mem_global *mem_glob, + struct ttm_bo_driver *driver, + uint64_t file_page_offset); + +/** + * ttm_bo_reserve: + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @use_sequence: If @bo is already reserved, Only sleep waiting for + * it to become unreserved if @sequence < (@bo)->sequence. + * + * Locks a buffer object for validation. (Or prevents other processes from + * locking it for validation) and removes it from lru lists, while taking + * a number of measures to prevent deadlocks. + * + * Deadlocks may occur when two processes try to reserve multiple buffers in + * different order, either by will or as a result of a buffer being evicted + * to make room for a buffer already reserved. (Buffers are reserved before + * they are evicted). The following algorithm prevents such deadlocks from + * occuring: + * 1) Buffers are reserved with the lru spinlock held. Upon successful + * reservation they are removed from the lru list. This stops a reserved buffer + * from being evicted. However the lru spinlock is released between the time + * a buffer is selected for eviction and the time it is reserved. + * Therefore a check is made when a buffer is reserved for eviction, that it + * is still the first buffer in the lru list, before it is removed from the + * list. @check_lru == 1 forces this check. If it fails, the function returns + * -EINVAL, and the caller should then choose a new buffer to evict and repeat + * the procedure. + * 2) Processes attempting to reserve multiple buffers other than for eviction, + * (typically execbuf), should first obtain a unique 32-bit + * validation sequence number, + * and call this function with @use_sequence == 1 and @sequence == the unique + * sequence number. If upon call of this function, the buffer object is already + * reserved, the validation sequence is checked against the validation + * sequence of the process currently reserving the buffer, + * and if the current validation sequence is greater than that of the process + * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps + * waiting for the buffer to become unreserved, after which it retries reserving. + * The caller should, when receiving an -EAGAIN error + * release all its buffer reservations, wait for @bo to become unreserved, and + * then rerun the validation with the same validation sequence. This procedure + * will always guarantee that the process with the lowest validation sequence + * will eventually succeed, preventing both deadlocks and starvation. + * + * Returns: + * -EAGAIN: The reservation may cause a deadlock. Release all buffer reservations, + * wait for @bo to become unreserved and try again. (only if use_sequence == 1). + * -ERESTART: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + */ +extern int ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait, bool use_sequence, uint32_t sequence); + +/** + * ttm_bo_unreserve + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Unreserve a previous reservation of @bo. + */ +extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); + +/** + * ttm_bo_wait_unreserved + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Wait for a struct ttm_buffer_object to become unreserved. + * This is typically used in the execbuf code to relax cpu-usage when + * a potential deadlock condition backoff. + */ +extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, + bool interruptible); + +/** + * ttm_bo_block_reservation + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Use interruptible sleep when waiting. + * @no_wait: Don't sleep, but rather return -EBUSY. + * + * Block reservation for validation by simply reserving the buffer. This is intended + * for single buffer use only without eviction, and thus needs no deadlock protection. + * + * Returns: + * -EBUSY: If no_wait == 1 and the buffer is already reserved. + * -ERESTART: If interruptible == 1 and the process received a signal while sleeping. + */ +extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait); + +/** + * ttm_bo_unblock_reservation + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Unblocks reservation leaving lru lists untouched. + */ +extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); + +/* + * ttm_bo_util.c + */ + +/** + * ttm_bo_move_ttm + * + * @bo: A pointer to a struct ttm_buffer_object. + * @evict: 1: This is an eviction. Don't try to pipeline. + * @no_wait: Never sleep, but rather return with -EBUSY. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Optimized move function for a buffer object with both old and + * new placement backed by a TTM. The function will, if successful, + * free any old aperture space, and set (@new_mem)->mm_node to NULL, + * and update the (@bo)->mem placement flags. If unsuccessful, the old + * data remains untouched, and it's up to the caller to free the + * memory space indicated by @new_mem. + * Returns: + * !0: Failure. + */ + +extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + bool evict, bool no_wait, struct ttm_mem_reg *new_mem); + +/** + * ttm_bo_move_memcpy + * + * @bo: A pointer to a struct ttm_buffer_object. + * @evict: 1: This is an eviction. Don't try to pipeline. + * @no_wait: Never sleep, but rather return with -EBUSY. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Fallback move function for a mappable buffer object in mappable memory. + * The function will, if successful, + * free any old aperture space, and set (@new_mem)->mm_node to NULL, + * and update the (@bo)->mem placement flags. If unsuccessful, the old + * data remains untouched, and it's up to the caller to free the + * memory space indicated by @new_mem. + * Returns: + * !0: Failure. + */ + +extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + bool evict, + bool no_wait, struct ttm_mem_reg *new_mem); + +/** + * ttm_bo_free_old_node + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Utility function to free an old placement after a successful move. + */ +extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); + +/** + * ttm_bo_move_accel_cleanup. + * + * @bo: A pointer to a struct ttm_buffer_object. + * @sync_obj: A sync object that signals when moving is complete. + * @sync_obj_arg: An argument to pass to the sync object idle / wait + * functions. + * @evict: This is an evict move. Don't return until the buffer is idle. + * @no_wait: Never sleep, but rather return with -EBUSY. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Accelerated move function to be called when an accelerated move + * has been scheduled. The function will create a new temporary buffer object + * representing the old placement, and put the sync object on both buffer + * objects. After that the newly created buffer object is unref'd to be + * destroyed when the move is complete. This will help pipeline + * buffer moves. + */ + +extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + void *sync_obj, + void *sync_obj_arg, + bool evict, bool no_wait, + struct ttm_mem_reg *new_mem); +/** + * ttm_io_prot + * + * @c_state: Caching state. + * @tmp: Page protection flag for a normal, cached mapping. + * + * Utility function that returns the pgprot_t that should be used for + * setting up a PTE with the caching model indicated by @c_state. + */ +extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp); + +#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) +#define TTM_HAS_AGP +#include + +/** + * ttm_agp_backend_init + * + * @bdev: Pointer to a struct ttm_bo_device. + * @bridge: The agp bridge this device is sitting on. + * + * Create a TTM backend that uses the indicated AGP bridge as an aperture + * for TT memory. This function uses the linux agpgart interface to + * bind and unbind memory backing a ttm_tt. + */ +extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, + struct agp_bridge_data *bridge); +#endif + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c new file mode 100644 index 0000000..6c92310 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c @@ -0,0 +1,536 @@ +/************************************************************************** + * + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement_common.h" +#include "ttm/ttm_pat_compat.h" +#include +#include +#include +#include + +void ttm_bo_free_old_node(struct ttm_buffer_object *bo) +{ + struct ttm_mem_reg *old_mem = &bo->mem; + + if (old_mem->mm_node) { + spin_lock(&bo->bdev->lru_lock); + drm_mm_put_block(old_mem->mm_node); + spin_unlock(&bo->bdev->lru_lock); + } + old_mem->mm_node = NULL; +} + +int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + bool evict, bool no_wait, struct ttm_mem_reg *new_mem) +{ + struct ttm_tt *ttm = bo->ttm; + struct ttm_mem_reg *old_mem = &bo->mem; + uint32_t save_flags = old_mem->flags; + uint32_t save_proposed_flags = old_mem->proposed_flags; + int ret; + + if (old_mem->mem_type != TTM_PL_SYSTEM) { + ttm_tt_unbind(ttm); + ttm_bo_free_old_node(bo); + ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM, + TTM_PL_MASK_MEM); + old_mem->mem_type = TTM_PL_SYSTEM; + save_flags = old_mem->flags; + } + + ret = ttm_tt_set_placement_caching(ttm, new_mem->flags); + if (unlikely(ret != 0)) + return ret; + + if (new_mem->mem_type != TTM_PL_SYSTEM) { + ret = ttm_tt_bind(ttm, new_mem); + if (unlikely(ret != 0)) + return ret; + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->proposed_flags = save_proposed_flags; + ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE); + return 0; +} + +int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, + void **virtual) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long bus_base; + int ret; + void *addr; + + *virtual = NULL; + ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); + if (ret || bus_size == 0) + return ret; + + if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) + addr = (void *)(((u8 *) man->io_addr) + bus_offset); + else { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) + if (mem->flags & TTM_PL_FLAG_WC) + addr = ioremap_wc(bus_base + bus_offset, bus_size); + else + addr = ioremap_nocache(bus_base + bus_offset, bus_size); +#else + addr = ioremap_nocache(bus_base + bus_offset, bus_size); +#endif + if (!addr) + return -ENOMEM; + } + *virtual = addr; + return 0; +} + +void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, + void *virtual) +{ + struct ttm_mem_type_manager *man; + + man = &bdev->man[mem->mem_type]; + + if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) + iounmap(virtual); +} + +static int ttm_copy_io_page(void *dst, void *src, unsigned long page) +{ + uint32_t *dstP = + (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); + uint32_t *srcP = + (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); + + int i; + for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) + iowrite32(ioread32(srcP++), dstP++); + return 0; +} + +static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, + unsigned long page) +{ + struct page *d = ttm_tt_get_page(ttm, page); + void *dst; + + if (!d) + return -ENOMEM; + + src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); + dst = kmap(d); + if (!dst) + return -ENOMEM; + + memcpy_fromio(dst, src, PAGE_SIZE); + kunmap(d); + return 0; +} + +static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, + unsigned long page) +{ + struct page *s = ttm_tt_get_page(ttm, page); + void *src; + + if (!s) + return -ENOMEM; + + dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); + src = kmap(s); + if (!src) + return -ENOMEM; + + memcpy_toio(dst, src, PAGE_SIZE); + kunmap(s); + return 0; +} + +int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + bool evict, bool no_wait, struct ttm_mem_reg *new_mem) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; + struct ttm_tt *ttm = bo->ttm; + struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_mem_reg old_copy = *old_mem; + void *old_iomap; + void *new_iomap; + int ret; + uint32_t save_flags = old_mem->flags; + uint32_t save_proposed_flags = old_mem->proposed_flags; + unsigned long i; + unsigned long page; + unsigned long add = 0; + int dir; + + ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); + if (ret) + return ret; + ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); + if (ret) + goto out; + + if (old_iomap == NULL && new_iomap == NULL) + goto out2; + if (old_iomap == NULL && ttm == NULL) + goto out2; + + add = 0; + dir = 1; + + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { + dir = -1; + add = new_mem->num_pages - 1; + } + + for (i = 0; i < new_mem->num_pages; ++i) { + page = i * dir + add; + if (old_iomap == NULL) + ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); + else if (new_iomap == NULL) + ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); + else + ret = ttm_copy_io_page(new_iomap, old_iomap, page); + if (ret) + goto out1; + } + mb(); + out2: + ttm_bo_free_old_node(bo); + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->proposed_flags = save_proposed_flags; + ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE); + + if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { + ttm_tt_unbind(ttm); + ttm_tt_destroy(ttm); + bo->ttm = NULL; + } + + out1: + ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); + out: + ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); + return ret; +} + +/** + * ttm_buffer_object_transfer + * + * @bo: A pointer to a struct ttm_buffer_object. + * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, + * holding the data of @bo with the old placement. + * + * This is a utility function that may be called after an accelerated move + * has been scheduled. A new buffer object is created as a placeholder for + * the old data while it's being copied. When that buffer object is idle, + * it can be destroyed, releasing the space of the old placement. + * Returns: + * !0: Failure. + */ + +static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, + struct ttm_buffer_object **new_obj) +{ + struct ttm_buffer_object *fbo; + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_driver *driver = bdev->driver; + + fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); + if (!fbo) + return -ENOMEM; + + *fbo = *bo; + mutex_init(&fbo->mutex); + mutex_lock(&fbo->mutex); + + init_waitqueue_head(&fbo->event_queue); + INIT_LIST_HEAD(&fbo->ddestroy); + INIT_LIST_HEAD(&fbo->lru); + + fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); + if (fbo->mem.mm_node) + fbo->mem.mm_node->private = (void *)fbo; + kref_init(&fbo->list_kref); + kref_init(&fbo->kref); + + mutex_unlock(&fbo->mutex); + + *new_obj = fbo; + return 0; +} + +pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) +{ +#if defined(__i386__) || defined(__x86_64__) + if (caching_flags & TTM_PL_FLAG_WC) { + tmp = pgprot_ttm_x86_wc(tmp); + } else if (boot_cpu_data.x86 > 3 && + (caching_flags & TTM_PL_FLAG_UNCACHED)) { + tmp = pgprot_noncached(tmp); + } +#elif defined(__powerpc__) + if (!(caching_flags & TTM_PL_FLAG_CACHED)) { + pgprot_val(tmp) |= _PAGE_NO_CACHE; + if (caching_flags & TTM_PL_FLAG_UNCACHED) + pgprot_val(tmp) |= _PAGE_GUARDED; + } +#endif +#if defined(__ia64__) + if (caching_flags & TTM_PL_FLAG_WC) + tmp = pgprot_writecombine(tmp); + else + tmp = pgprot_noncached(tmp); +#endif +#if defined(__sparc__) + if (!(caching_flags & TTM_PL_FLAG_CACHED)) + tmp = pgprot_noncached(tmp); +#endif + return tmp; +} + +static int ttm_bo_ioremap(struct ttm_buffer_object *bo, + unsigned long bus_base, + unsigned long bus_offset, + unsigned long bus_size, + struct ttm_bo_kmap_obj *map) +{ + struct ttm_bo_device * bdev = bo->bdev; + struct ttm_mem_reg * mem = &bo->mem; + struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type]; + + if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { + map->bo_kmap_type = ttm_bo_map_premapped; + map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else { + map->bo_kmap_type = ttm_bo_map_iomap; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) + if (mem->flags & TTM_PL_FLAG_WC) + map->virtual = ioremap_wc(bus_base + bus_offset, bus_size); + else + map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); +#else + map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); +#endif + } + return (!map->virtual) ? -ENOMEM : 0; +} + +static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, + unsigned long start_page, + unsigned long num_pages, + struct ttm_bo_kmap_obj *map) +{ + struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot; + struct ttm_tt * ttm = bo->ttm; + struct page * d; + bool do_kmap = false; + int i; + BUG_ON(!ttm); + if (num_pages == 1) { + map->page = ttm_tt_get_page(ttm, start_page); + do_kmap = (!PageHighMem(map->page) || + (mem->flags & TTM_PL_FLAG_CACHED)); + } + + if (do_kmap) { + /* + * We're mapping a single page, and the desired + * page protection is consistent with the bo. + */ + map->bo_kmap_type = ttm_bo_map_kmap; + map->virtual = kmap(map->page); + } else { + /* + * Populate the part we're mapping; + */ + for (i = start_page; i < start_page + num_pages; ++i) { + d = ttm_tt_get_page(ttm, i); if (!d) + return -ENOMEM; + } + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contigous. + */ + prot = (mem->flags & TTM_PL_FLAG_CACHED) ? + PAGE_KERNEL : + ttm_io_prot(mem->flags, PAGE_KERNEL); + map->bo_kmap_type = ttm_bo_map_vmap; + map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +int ttm_bo_kmap(struct ttm_buffer_object *bo, + unsigned long start_page, unsigned long num_pages, + struct ttm_bo_kmap_obj *map) +{ + int ret; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + BUG_ON(!list_empty(&bo->swap)); + map->virtual = NULL; + if (num_pages > bo->num_pages) + return -EINVAL; + if (start_page > bo->num_pages) + return -EINVAL; +#if 0 + if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) + return -EPERM; +#endif + ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, + &bus_offset, &bus_size); + if (ret) + return ret; + if (bus_size == 0) { + return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); + } else { + bus_offset += start_page << PAGE_SHIFT; + bus_size = num_pages << PAGE_SHIFT; + return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); + } +} + +void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) +{ + if (!map->virtual) + return; + switch (map->bo_kmap_type) { + case ttm_bo_map_iomap: + iounmap(map->virtual); + break; + case ttm_bo_map_vmap: + vunmap(map->virtual); + break; + case ttm_bo_map_kmap: + kunmap(map->page); + break; + case ttm_bo_map_premapped: + break; + default: + BUG(); + } + map->virtual = NULL; + map->page = NULL; +} + +int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, + unsigned long dst_offset, + unsigned long *pfn, pgprot_t * prot) +{ + struct ttm_mem_reg * mem = &bo->mem; + struct ttm_bo_device * bdev = bo->bdev; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long bus_base; + int ret; + ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, + &bus_size); + if (ret) + return -EINVAL; + if (bus_size != 0) + * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; + else + if (!bo->ttm) + return -EINVAL; + else + *pfn = + page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT)); + *prot = + (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem-> + flags, + PAGE_KERNEL); + return 0; +} + +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + void *sync_obj, + void *sync_obj_arg, + bool evict, bool no_wait, + struct ttm_mem_reg *new_mem) +{ + struct ttm_bo_device * bdev = bo->bdev; + struct ttm_bo_driver * driver = bdev->driver; + struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type]; + struct ttm_mem_reg * old_mem = &bo->mem; + int ret; + uint32_t save_flags = old_mem->flags; + uint32_t save_proposed_flags = old_mem->proposed_flags; + struct ttm_buffer_object * old_obj; + if (bo->sync_obj) + driver->sync_obj_unref(&bo->sync_obj); + bo->sync_obj = driver->sync_obj_ref(sync_obj); + bo->sync_obj_arg = sync_obj_arg; + if (evict) { + ret = ttm_bo_wait(bo, false, false, false); + if (ret) + return ret; + ttm_bo_free_old_node(bo); + if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) { + ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; + } + } else { + + /* This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ + ret = ttm_buffer_object_transfer(bo, &old_obj); + if (ret) + return ret; + if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) + old_obj->ttm = NULL; + else + bo->ttm = NULL; + bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING; + ttm_bo_unreserve(old_obj); + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->proposed_flags = save_proposed_flags; + ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE); + return 0; +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c new file mode 100644 index 0000000..4d950fc --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c @@ -0,0 +1,596 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + + +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement_common.h" +#include +#include +#include +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) +#error "TTM doesn't build on kernel versions below 2.6.25." +#endif + +#define TTM_BO_VM_NUM_PREFAULT 16 + +static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, + unsigned long page_start, + unsigned long num_pages) +{ + struct rb_node *cur = bdev->addr_space_rb.rb_node; + unsigned long cur_offset; + struct ttm_buffer_object *bo; + struct ttm_buffer_object *best_bo = NULL; + + while (likely(cur != NULL)) { + bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); + cur_offset = bo->vm_node->start; + if (page_start >= cur_offset) { + cur = cur->rb_right; + best_bo = bo; + if (page_start == cur_offset) + break; + } else + cur = cur->rb_left; + } + + if (unlikely(best_bo == NULL)) + return NULL; + + if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < + (page_start + num_pages))) + return NULL; + + return best_bo; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; + struct ttm_bo_device *bdev = bo->bdev; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long page_offset; + unsigned long page_last; + unsigned long pfn; + struct ttm_tt *ttm = NULL; + struct page *page; + int ret; + int i; + bool is_iomem; + unsigned long address = (unsigned long)vmf->virtual_address; + int retval = VM_FAULT_NOPAGE; + + ret = ttm_bo_reserve(bo, true, false, false, 0); + if (unlikely(ret != 0)) + return VM_FAULT_NOPAGE; + + mutex_lock(&bo->mutex); + + /* + * Wait for buffer data in transit, due to a pipelined + * move. + */ + + if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) { + ret = ttm_bo_wait(bo, false, true, false); + if (unlikely(ret != 0)) { + retval = (ret != -ERESTART) ? + VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; + goto out_unlock; + } + } + + ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, + &bus_size); + if (unlikely(ret != 0)) { + retval = VM_FAULT_SIGBUS; + goto out_unlock; + } + + is_iomem = (bus_size != 0); + + page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + + bo->vm_node->start - vma->vm_pgoff; + page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + + bo->vm_node->start - vma->vm_pgoff; + + if (unlikely(page_offset >= bo->num_pages)) { + retval = VM_FAULT_SIGBUS; + goto out_unlock; + } + + /* + * Strictly, we're not allowed to modify vma->vm_page_prot here, + * since the mmap_sem is only held in read mode. However, we + * modify only the caching bits of vma->vm_page_prot and + * consider those bits protected by + * the bo->mutex, as we should be the only writers. + * There shouldn't really be any readers of these bits except + * within vm_insert_mixed()? fork? + * + * TODO: Add a list of vmas to the bo, and change the + * vma->vm_page_prot when the object changes caching policy, with + * the correct locks held. + */ + + if (is_iomem) { + vma->vm_page_prot = ttm_io_prot(bo->mem.flags, + vma->vm_page_prot); + } else { + ttm = bo->ttm; + vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ? + vm_get_page_prot(vma->vm_flags) : + ttm_io_prot(bo->mem.flags, vma->vm_page_prot); + } + + /* + * Speculatively prefault a number of pages. Only error on + * first page. + */ + + for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { + + if (is_iomem) + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + + page_offset; + else { + page = ttm_tt_get_page(ttm, page_offset); + if (unlikely(!page && i == 0)) { + retval = VM_FAULT_OOM; + goto out_unlock; + } else if (unlikely(!page)) { + break; + } + pfn = page_to_pfn(page); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + ret = vm_insert_mixed(vma, address, pfn); +#else + ret = vm_insert_pfn(vma, address, pfn); +#endif + /* + * Somebody beat us to this PTE or prefaulting to + * an already populated PTE, or prefaulting error. + */ + + if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) + break; + else if (unlikely(ret != 0)) { + retval = + (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; + goto out_unlock; + + } + + address += PAGE_SIZE; + if (unlikely(++page_offset >= page_last)) + break; + } + + out_unlock: + mutex_unlock(&bo->mutex); + ttm_bo_unreserve(bo); + return retval; +} + +#else + +static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma, + unsigned long address) +{ + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; + struct ttm_bo_device *bdev = bo->bdev; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long page_offset; + unsigned long page_last; + unsigned long pfn; + struct ttm_tt *ttm = NULL; + struct page *page; + int ret; + int i; + bool is_iomem; + unsigned long retval = NOPFN_REFAULT; + + ret = ttm_bo_reserve(bo, true, false, false, 0); + if (unlikely(ret != 0)) + return NOPFN_REFAULT; + + mutex_lock(&bo->mutex); + + /* + * Wait for buffer data in transit, due to a pipelined + * move. + */ + + if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) { + ret = ttm_bo_wait(bo, false, true, false); + if (unlikely(ret != 0)) { + retval = (ret != -ERESTART) ? + NOPFN_SIGBUS : NOPFN_REFAULT; + goto out_unlock; + } + } + + ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, + &bus_size); + if (unlikely(ret != 0)) { + printk(KERN_ERR "Attempted buffer object access " + "of unmappable object.\n"); + retval = NOPFN_SIGBUS; + goto out_unlock; + } + + is_iomem = (bus_size != 0); + + page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + + bo->vm_node->start - vma->vm_pgoff; + + page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + + bo->vm_node->start - vma->vm_pgoff; + + if (unlikely(page_offset >= bo->num_pages)) { + printk(KERN_ERR "Attempted buffer object access " + "outside object.\n"); + retval = NOPFN_SIGBUS; + goto out_unlock; + } + + /* + * Strictly, we're not allowed to modify vma->vm_page_prot here, + * since the mmap_sem is only held in read mode. However, we + * modify only the caching bits of vma->vm_page_prot and + * consider those bits protected by + * the bo->mutex, as we should be the only writers. + * There shouldn't really be any readers of these bits except + * within vm_insert_mixed()? fork? + * + * TODO: Add a list of vmas to the bo, and change the + * vma->vm_page_prot when the object changes caching policy, with + * the correct locks held. + */ + + if (is_iomem) { + vma->vm_page_prot = ttm_io_prot(bo->mem.flags, + vma->vm_page_prot); + } else { + ttm = bo->ttm; + vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ? + vm_get_page_prot(vma->vm_flags) : + ttm_io_prot(bo->mem.flags, vma->vm_page_prot); + } + + /* + * Speculatively prefault a number of pages. Only error on + * first page. + */ + + for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { + + if (is_iomem) + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + + page_offset; + else { + page = ttm_tt_get_page(ttm, page_offset); + if (unlikely(!page && i == 0)) { + retval = NOPFN_OOM; + goto out_unlock; + } else if (unlikely(!page)) { + break; + } + pfn = page_to_pfn(page); + } + + ret = vm_insert_pfn(vma, address, pfn); + if (unlikely(ret == -EBUSY || (ret != 0 && i != 0))) + break; + + /* + * Somebody beat us to this PTE or prefaulting to + * an already populated PTE, or prefaulting error. + */ + + if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) + break; + else if (unlikely(ret != 0)) { + retval = + (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; + goto out_unlock; + } + + address += PAGE_SIZE; + if (unlikely(++page_offset >= page_last)) + break; + } + + out_unlock: + mutex_unlock(&bo->mutex); + ttm_bo_unreserve(bo); + return retval; +} +#endif + +static void ttm_bo_vm_open(struct vm_area_struct *vma) +{ + struct ttm_buffer_object *bo = + (struct ttm_buffer_object *)vma->vm_private_data; + + (void)ttm_bo_reference(bo); +} + +static void ttm_bo_vm_close(struct vm_area_struct *vma) +{ + struct ttm_buffer_object *bo = + (struct ttm_buffer_object *)vma->vm_private_data; + + ttm_bo_unref(&bo); + vma->vm_private_data = NULL; +} + +static struct vm_operations_struct ttm_bo_vm_ops = { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + .fault = ttm_bo_vm_fault, +#else + .nopfn = ttm_bo_vm_nopfn, +#endif + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close +}; + +int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, + struct ttm_bo_device *bdev) +{ + struct ttm_bo_driver *driver; + struct ttm_buffer_object *bo; + int ret; + + read_lock(&bdev->vm_lock); + bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, + (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); + if (likely(bo != NULL)) + ttm_bo_reference(bo); + read_unlock(&bdev->vm_lock); + + if (unlikely(bo == NULL)) { + printk(KERN_ERR "Could not find buffer object to map.\n"); + ret = -EINVAL; + goto out_unref; + } + + driver = bo->bdev->driver; + if (unlikely(!driver->verify_access)) { + ret = -EPERM; + goto out_unref; + } + ret = driver->verify_access(bo, filp); + if (unlikely(ret != 0)) + goto out_unref; + + vma->vm_ops = &ttm_bo_vm_ops; + + /* + * Note: We're transferring the bo reference to + * vma->vm_private_data here. + */ + + vma->vm_private_data = bo; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; +#else + vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; +#endif + return 0; + out_unref: + ttm_bo_unref(&bo); + return ret; +} + +int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) +{ + if (vma->vm_pgoff != 0) + return -EACCES; + + vma->vm_ops = &ttm_bo_vm_ops; + vma->vm_private_data = ttm_bo_reference(bo); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; +#else + vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; +#endif + return 0; +} + +ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp, + const char __user * wbuf, char __user * rbuf, size_t count, + loff_t * f_pos, bool write) +{ + struct ttm_buffer_object *bo; + struct ttm_bo_driver *driver; + struct ttm_bo_kmap_obj map; + unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); + unsigned long kmap_offset; + unsigned long kmap_end; + unsigned long kmap_num; + size_t io_size; + unsigned int page_offset; + char *virtual; + int ret; + bool no_wait = false; + bool dummy; + + read_lock(&bdev->vm_lock); + bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); + if (likely(bo != NULL)) + ttm_bo_reference(bo); + read_unlock(&bdev->vm_lock); + + if (unlikely(bo == NULL)) + return -EFAULT; + + driver = bo->bdev->driver; + if (unlikely(driver->verify_access)) + return -EPERM; + + ret = driver->verify_access(bo, filp); + if (unlikely(ret != 0)) + goto out_unref; + + kmap_offset = dev_offset - bo->vm_node->start; + if (unlikely(kmap_offset) >= bo->num_pages) { + ret = -EFBIG; + goto out_unref; + } + + page_offset = *f_pos & ~PAGE_MASK; + io_size = bo->num_pages - kmap_offset; + io_size = (io_size << PAGE_SHIFT) - page_offset; + if (count < io_size) + io_size = count; + + kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; + kmap_num = kmap_end - kmap_offset + 1; + + ret = ttm_bo_reserve(bo, true, no_wait, false, 0); + + switch (ret) { + case 0: + break; + case -ERESTART: + ret = -EINTR; + goto out_unref; + case -EBUSY: + ret = -EAGAIN; + goto out_unref; + default: + goto out_unref; + } + + ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); + if (unlikely(ret != 0)) + goto out_unref; + + virtual = ttm_kmap_obj_virtual(&map, &dummy); + virtual += page_offset; + + if (write) + ret = copy_from_user(virtual, wbuf, io_size); + else + ret = copy_to_user(rbuf, virtual, io_size); + + ttm_bo_kunmap(&map); + ttm_bo_unreserve(bo); + ttm_bo_unref(&bo); + + if (unlikely(ret != 0)) + return -EFBIG; + + *f_pos += io_size; + + return io_size; + out_unref: + ttm_bo_unref(&bo); + return ret; +} + +ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf, + char __user * rbuf, size_t count, loff_t * f_pos, + bool write) +{ + struct ttm_bo_kmap_obj map; + unsigned long kmap_offset; + unsigned long kmap_end; + unsigned long kmap_num; + size_t io_size; + unsigned int page_offset; + char *virtual; + int ret; + bool no_wait = false; + bool dummy; + + kmap_offset = (*f_pos >> PAGE_SHIFT); + if (unlikely(kmap_offset) >= bo->num_pages) + return -EFBIG; + + page_offset = *f_pos & ~PAGE_MASK; + io_size = bo->num_pages - kmap_offset; + io_size = (io_size << PAGE_SHIFT) - page_offset; + if (count < io_size) + io_size = count; + + kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; + kmap_num = kmap_end - kmap_offset + 1; + + ret = ttm_bo_reserve(bo, true, no_wait, false, 0); + + switch (ret) { + case 0: + break; + case -ERESTART: + return -EINTR; + case -EBUSY: + return -EAGAIN; + default: + return ret; + } + + ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); + if (unlikely(ret != 0)) + return ret; + + virtual = ttm_kmap_obj_virtual(&map, &dummy); + virtual += page_offset; + + if (write) + ret = copy_from_user(virtual, wbuf, io_size); + else + ret = copy_to_user(rbuf, virtual, io_size); + + ttm_bo_kunmap(&map); + ttm_bo_unreserve(bo); + ttm_bo_unref(&bo); + + if (unlikely(ret != 0)) + return ret; + + *f_pos += io_size; + + return io_size; +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c new file mode 100644 index 0000000..4a34c18 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c @@ -0,0 +1,115 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "ttm/ttm_execbuf_util.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement_common.h" +#include +#include + +void ttm_eu_backoff_reservation(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + if (!entry->reserved) + continue; + + entry->reserved = false; + ttm_bo_unreserve(bo); + } +} + +/* + * Reserve buffers for validation. + * + * If a buffer in the list is marked for CPU access, we back off and + * wait for that buffer to become free for GPU access. + * + * If a buffer is reserved for another validation, the validator with + * the highest validation sequence backs off and waits for that buffer + * to become unreserved. This prevents deadlocks when validating multiple + * buffers in different orders. + */ + +int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) +{ + struct ttm_validate_buffer *entry; + int ret; + + retry: + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + + entry->reserved = false; + ret = ttm_bo_reserve(bo, true, false, true, val_seq); + if (ret != 0) { + ttm_eu_backoff_reservation(list); + if (ret == -EAGAIN) { + ret = ttm_bo_wait_unreserved(bo, true); + if (unlikely(ret != 0)) + return ret; + goto retry; + } else + return ret; + } + + entry->reserved = true; + if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { + ttm_eu_backoff_reservation(list); + ret = ttm_bo_wait_cpu(bo, false); + if (ret) + return ret; + goto retry; + } + } + return 0; +} + +void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + struct ttm_bo_driver *driver = bo->bdev->driver; + void *old_sync_obj; + + mutex_lock(&bo->mutex); + old_sync_obj = bo->sync_obj; + bo->sync_obj = driver->sync_obj_ref(sync_obj); + bo->sync_obj_arg = entry->new_sync_obj_arg; + mutex_unlock(&bo->mutex); + ttm_bo_unreserve(bo); + entry->reserved = false; + if (old_sync_obj) + driver->sync_obj_unref(&old_sync_obj); + } +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h new file mode 100644 index 0000000..6577f63 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h @@ -0,0 +1,110 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_EXECBUF_UTIL_H_ +#define _TTM_EXECBUF_UTIL_H_ + +#include "ttm/ttm_bo_api.h" +#include "ttm/ttm_fence_api.h" +#include + +/** + * struct ttm_validate_buffer + * + * @head: list head for thread-private list. + * @bo: refcounted buffer object pointer. + * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once + * adding a new sync object. + * @reservied: Indicates whether @bo has been reserved for validation. + */ + +struct ttm_validate_buffer { + struct list_head head; + struct ttm_buffer_object *bo; + void *new_sync_obj_arg; + bool reserved; +}; + +/** + * function ttm_eu_backoff_reservation + * + * @list: thread private list of ttm_validate_buffer structs. + * + * Undoes all buffer validation reservations for bos pointed to by + * the list entries. + */ + +extern void ttm_eu_backoff_reservation(struct list_head *list); + +/** + * function ttm_eu_reserve_buffers + * + * @list: thread private list of ttm_validate_buffer structs. + * @val_seq: A unique sequence number. + * + * Tries to reserve bos pointed to by the list entries for validation. + * If the function returns 0, all buffers are marked as "unfenced", + * taken off the lru lists and are not synced for write CPU usage. + * + * If the function detects a deadlock due to multiple threads trying to + * reserve the same buffers in reverse order, all threads except one will + * back off and retry. This function may sleep while waiting for + * CPU write reservations to be cleared, and for other threads to + * unreserve their buffers. + * + * This function may return -ERESTART or -EAGAIN if the calling process + * receives a signal while waiting. In that case, no buffers on the list + * will be reserved upon return. + * + * Buffers reserved by this function should be unreserved by + * a call to either ttm_eu_backoff_reservation() or + * ttm_eu_fence_buffer_objects() when command submission is complete or + * has failed. + */ + +extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); + +/** + * function ttm_eu_fence_buffer_objects. + * + * @list: thread private list of ttm_validate_buffer structs. + * @sync_obj: The new sync object for the buffers. + * + * This function should be called when command submission is complete, and + * it will add a new sync object to bos pointed to by entries on @list. + * It also unreserves all buffers, putting them on lru lists. + * + */ + +extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj); + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence.c b/drivers/gpu/drm/psb/ttm/ttm_fence.c new file mode 100644 index 0000000..115e7b7 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_fence.c @@ -0,0 +1,607 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "ttm/ttm_fence_api.h" +#include "ttm/ttm_fence_driver.h" +#include +#include + +#include + +/* + * Simple implementation for now. + */ + +static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask) +{ + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + + printk(KERN_ERR "GPU lockup dectected on engine %u " + "fence type 0x%08x\n", + (unsigned int)fence->fence_class, (unsigned int)mask); + /* + * Give engines some time to idle? + */ + + write_lock(&fc->lock); + ttm_fence_handler(fence->fdev, fence->fence_class, + fence->sequence, mask, -EBUSY); + write_unlock(&fc->lock); +} + +/* + * Convenience function to be called by fence::wait methods that + * need polling. + */ + +int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy, + bool interruptible, uint32_t mask) +{ + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); + uint32_t count = 0; + int ret; + unsigned long end_jiffies = fence->timeout_jiffies; + + DECLARE_WAITQUEUE(entry, current); + add_wait_queue(&fc->fence_queue, &entry); + + ret = 0; + + for (;;) { + __set_current_state((interruptible) ? + TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + if (ttm_fence_object_signaled(fence, mask)) + break; + if (time_after_eq(jiffies, end_jiffies)) { + if (driver->lockup) + driver->lockup(fence, mask); + else + ttm_fence_lockup(fence, mask); + continue; + } + if (lazy) + schedule_timeout(1); + else if ((++count & 0x0F) == 0) { + __set_current_state(TASK_RUNNING); + schedule(); + __set_current_state((interruptible) ? + TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); + } + if (interruptible && signal_pending(current)) { + ret = -ERESTART; + break; + } + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(&fc->fence_queue, &entry); + return ret; +} + +/* + * Typically called by the IRQ handler. + */ + +void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class, + uint32_t sequence, uint32_t type, uint32_t error) +{ + int wake = 0; + uint32_t diff; + uint32_t relevant_type; + uint32_t new_type; + struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class]; + const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev); + struct list_head *head; + struct ttm_fence_object *fence, *next; + bool found = false; + + if (list_empty(&fc->ring)) + return; + + list_for_each_entry(fence, &fc->ring, ring) { + diff = (sequence - fence->sequence) & fc->sequence_mask; + if (diff > fc->wrap_diff) { + found = true; + break; + } + } + + fc->waiting_types &= ~type; + head = (found) ? &fence->ring : &fc->ring; + + list_for_each_entry_safe_reverse(fence, next, head, ring) { + if (&fence->ring == &fc->ring) + break; + + DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n", + (unsigned long)fence, fence->sequence, + fence->fence_type); + + if (error) { + fence->info.error = error; + fence->info.signaled_types = fence->fence_type; + list_del_init(&fence->ring); + wake = 1; + break; + } + + relevant_type = type & fence->fence_type; + new_type = (fence->info.signaled_types | relevant_type) ^ + fence->info.signaled_types; + + if (new_type) { + fence->info.signaled_types |= new_type; + DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", + (unsigned long)fence, + fence->info.signaled_types); + + if (unlikely(driver->signaled)) + driver->signaled(fence); + + if (driver->needed_flush) + fc->pending_flush |= + driver->needed_flush(fence); + + if (new_type & fence->waiting_types) + wake = 1; + } + + fc->waiting_types |= + fence->waiting_types & ~fence->info.signaled_types; + + if (!(fence->fence_type & ~fence->info.signaled_types)) { + DRM_DEBUG("Fence completely signaled 0x%08lx\n", + (unsigned long)fence); + list_del_init(&fence->ring); + } + } + + /* + * Reinstate lost waiting types. + */ + + if ((fc->waiting_types & type) != type) { + head = head->prev; + list_for_each_entry(fence, head, ring) { + if (&fence->ring == &fc->ring) + break; + diff = + (fc->highest_waiting_sequence - + fence->sequence) & fc->sequence_mask; + if (diff > fc->wrap_diff) + break; + + fc->waiting_types |= + fence->waiting_types & ~fence->info.signaled_types; + } + } + + if (wake) + wake_up_all(&fc->fence_queue); +} + +static void ttm_fence_unring(struct ttm_fence_object *fence) +{ + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + unsigned long irq_flags; + + write_lock_irqsave(&fc->lock, irq_flags); + list_del_init(&fence->ring); + write_unlock_irqrestore(&fc->lock, irq_flags); +} + +bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask) +{ + unsigned long flags; + bool signaled; + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + + mask &= fence->fence_type; + read_lock_irqsave(&fc->lock, flags); + signaled = (mask & fence->info.signaled_types) == mask; + read_unlock_irqrestore(&fc->lock, flags); + if (!signaled && driver->poll) { + write_lock_irqsave(&fc->lock, flags); + driver->poll(fence->fdev, fence->fence_class, mask); + signaled = (mask & fence->info.signaled_types) == mask; + write_unlock_irqrestore(&fc->lock, flags); + } + return signaled; +} + +int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type) +{ + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + unsigned long irq_flags; + uint32_t saved_pending_flush; + uint32_t diff; + bool call_flush; + + if (type & ~fence->fence_type) { + DRM_ERROR("Flush trying to extend fence type, " + "0x%x, 0x%x\n", type, fence->fence_type); + return -EINVAL; + } + + write_lock_irqsave(&fc->lock, irq_flags); + fence->waiting_types |= type; + fc->waiting_types |= fence->waiting_types; + diff = (fence->sequence - fc->highest_waiting_sequence) & + fc->sequence_mask; + + if (diff < fc->wrap_diff) + fc->highest_waiting_sequence = fence->sequence; + + /* + * fence->waiting_types has changed. Determine whether + * we need to initiate some kind of flush as a result of this. + */ + + saved_pending_flush = fc->pending_flush; + if (driver->needed_flush) + fc->pending_flush |= driver->needed_flush(fence); + + if (driver->poll) + driver->poll(fence->fdev, fence->fence_class, + fence->waiting_types); + + call_flush = (fc->pending_flush != 0); + write_unlock_irqrestore(&fc->lock, irq_flags); + + if (call_flush && driver->flush) + driver->flush(fence->fdev, fence->fence_class); + + return 0; +} + +/* + * Make sure old fence objects are signaled before their fence sequences are + * wrapped around and reused. + */ + +void ttm_fence_flush_old(struct ttm_fence_device *fdev, + uint32_t fence_class, uint32_t sequence) +{ + struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class]; + struct ttm_fence_object *fence; + unsigned long irq_flags; + const struct ttm_fence_driver *driver = fdev->driver; + bool call_flush; + + uint32_t diff; + + write_lock_irqsave(&fc->lock, irq_flags); + + list_for_each_entry_reverse(fence, &fc->ring, ring) { + diff = (sequence - fence->sequence) & fc->sequence_mask; + if (diff <= fc->flush_diff) + break; + + fence->waiting_types = fence->fence_type; + fc->waiting_types |= fence->fence_type; + + if (driver->needed_flush) + fc->pending_flush |= driver->needed_flush(fence); + } + + if (driver->poll) + driver->poll(fdev, fence_class, fc->waiting_types); + + call_flush = (fc->pending_flush != 0); + write_unlock_irqrestore(&fc->lock, irq_flags); + + if (call_flush && driver->flush) + driver->flush(fdev, fence->fence_class); + + /* + * FIXME: Shold we implement a wait here for really old fences? + */ + +} + +int ttm_fence_object_wait(struct ttm_fence_object *fence, + bool lazy, bool interruptible, uint32_t mask) +{ + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + int ret = 0; + unsigned long timeout; + unsigned long cur_jiffies; + unsigned long to_jiffies; + + if (mask & ~fence->fence_type) { + DRM_ERROR("Wait trying to extend fence type" + " 0x%08x 0x%08x\n", mask, fence->fence_type); + BUG(); + return -EINVAL; + } + + if (driver->wait) + return driver->wait(fence, lazy, interruptible, mask); + + ttm_fence_object_flush(fence, mask); + retry: + if (!driver->has_irq || + driver->has_irq(fence->fdev, fence->fence_class, mask)) { + + cur_jiffies = jiffies; + to_jiffies = fence->timeout_jiffies; + + timeout = (time_after(to_jiffies, cur_jiffies)) ? + to_jiffies - cur_jiffies : 1; + + if (interruptible) + ret = wait_event_interruptible_timeout + (fc->fence_queue, + ttm_fence_object_signaled(fence, mask), timeout); + else + ret = wait_event_timeout + (fc->fence_queue, + ttm_fence_object_signaled(fence, mask), timeout); + + if (unlikely(ret == -ERESTARTSYS)) + return -ERESTART; + + if (unlikely(ret == 0)) { + if (driver->lockup) + driver->lockup(fence, mask); + else + ttm_fence_lockup(fence, mask); + goto retry; + } + + return 0; + } + + return ttm_fence_wait_polling(fence, lazy, interruptible, mask); +} + +int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags, + uint32_t fence_class, uint32_t type) +{ + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + unsigned long flags; + uint32_t sequence; + unsigned long timeout; + int ret; + + ttm_fence_unring(fence); + ret = driver->emit(fence->fdev, + fence_class, fence_flags, &sequence, &timeout); + if (ret) + return ret; + + write_lock_irqsave(&fc->lock, flags); + fence->fence_class = fence_class; + fence->fence_type = type; + fence->waiting_types = 0; + fence->info.signaled_types = 0; + fence->info.error = 0; + fence->sequence = sequence; + fence->timeout_jiffies = timeout; + if (list_empty(&fc->ring)) + fc->highest_waiting_sequence = sequence - 1; + list_add_tail(&fence->ring, &fc->ring); + fc->latest_queued_sequence = sequence; + write_unlock_irqrestore(&fc->lock, flags); + return 0; +} + +int ttm_fence_object_init(struct ttm_fence_device *fdev, + uint32_t fence_class, + uint32_t type, + uint32_t create_flags, + void (*destroy) (struct ttm_fence_object *), + struct ttm_fence_object *fence) +{ + int ret = 0; + + kref_init(&fence->kref); + fence->fence_class = fence_class; + fence->fence_type = type; + fence->info.signaled_types = 0; + fence->waiting_types = 0; + fence->sequence = 0; + fence->info.error = 0; + fence->fdev = fdev; + fence->destroy = destroy; + INIT_LIST_HEAD(&fence->ring); + atomic_inc(&fdev->count); + + if (create_flags & TTM_FENCE_FLAG_EMIT) { + ret = ttm_fence_object_emit(fence, create_flags, + fence->fence_class, type); + } + + return ret; +} + +int ttm_fence_object_create(struct ttm_fence_device *fdev, + uint32_t fence_class, + uint32_t type, + uint32_t create_flags, + struct ttm_fence_object **c_fence) +{ + struct ttm_fence_object *fence; + int ret; + + ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), false, false, false); + if (unlikely(ret != 0)) { + printk(KERN_ERR "Out of memory creating fence object\n"); + return ret; + } + + fence = kmalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) { + printk(KERN_ERR "Out of memory creating fence object\n"); + ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false); + return -ENOMEM; + } + + ret = ttm_fence_object_init(fdev, fence_class, type, + create_flags, NULL, fence); + if (ret) { + ttm_fence_object_unref(&fence); + return ret; + } + *c_fence = fence; + + return 0; +} + +static void ttm_fence_object_destroy(struct kref *kref) +{ + struct ttm_fence_object *fence = + container_of(kref, struct ttm_fence_object, kref); + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + unsigned long irq_flags; + + write_lock_irqsave(&fc->lock, irq_flags); + list_del_init(&fence->ring); + write_unlock_irqrestore(&fc->lock, irq_flags); + + atomic_dec(&fence->fdev->count); + if (fence->destroy) + fence->destroy(fence); + else { + ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), false); + kfree(fence); + } +} + +void ttm_fence_device_release(struct ttm_fence_device *fdev) +{ + kfree(fdev->fence_class); +} + +int +ttm_fence_device_init(int num_classes, + struct ttm_mem_global *mem_glob, + struct ttm_fence_device *fdev, + const struct ttm_fence_class_init *init, + bool replicate_init, const struct ttm_fence_driver *driver) +{ + struct ttm_fence_class_manager *fc; + const struct ttm_fence_class_init *fci; + int i; + + fdev->mem_glob = mem_glob; + fdev->fence_class = kzalloc(num_classes * + sizeof(*fdev->fence_class), GFP_KERNEL); + + if (unlikely(!fdev->fence_class)) + return -ENOMEM; + + fdev->num_classes = num_classes; + atomic_set(&fdev->count, 0); + fdev->driver = driver; + + for (i = 0; i < fdev->num_classes; ++i) { + fc = &fdev->fence_class[i]; + fci = &init[(replicate_init) ? 0 : i]; + + fc->wrap_diff = fci->wrap_diff; + fc->flush_diff = fci->flush_diff; + fc->sequence_mask = fci->sequence_mask; + + rwlock_init(&fc->lock); + INIT_LIST_HEAD(&fc->ring); + init_waitqueue_head(&fc->fence_queue); + } + + return 0; +} + +struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence) +{ + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); + struct ttm_fence_info tmp; + unsigned long irq_flags; + + read_lock_irqsave(&fc->lock, irq_flags); + tmp = fence->info; + read_unlock_irqrestore(&fc->lock, irq_flags); + + return tmp; +} + +void ttm_fence_object_unref(struct ttm_fence_object **p_fence) +{ + struct ttm_fence_object *fence = *p_fence; + + *p_fence = NULL; + (void)kref_put(&fence->kref, &ttm_fence_object_destroy); +} + +/* + * Placement / BO sync object glue. + */ + +bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg) +{ + struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; + uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; + + return ttm_fence_object_signaled(fence, fence_types); +} + +int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible) +{ + struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; + uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; + + return ttm_fence_object_wait(fence, lazy, interruptible, fence_types); +} + +int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg) +{ + struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; + uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; + + return ttm_fence_object_flush(fence, fence_types); +} + +void ttm_fence_sync_obj_unref(void **sync_obj) +{ + ttm_fence_object_unref((struct ttm_fence_object **)sync_obj); +} + +void *ttm_fence_sync_obj_ref(void *sync_obj) +{ + return (void *) + ttm_fence_object_ref((struct ttm_fence_object *)sync_obj); +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h new file mode 100644 index 0000000..2a4e12b --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h @@ -0,0 +1,277 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +#ifndef _TTM_FENCE_API_H_ +#define _TTM_FENCE_API_H_ + +#include +#include + +#define TTM_FENCE_FLAG_EMIT (1 << 0) +#define TTM_FENCE_TYPE_EXE (1 << 0) + +struct ttm_fence_device; + +/** + * struct ttm_fence_info + * + * @fence_class: The fence class. + * @fence_type: Bitfield indicating types for this fence. + * @signaled_types: Bitfield indicating which types are signaled. + * @error: Last error reported from the device. + * + * Used as output from the ttm_fence_get_info + */ + +struct ttm_fence_info { + uint32_t signaled_types; + uint32_t error; +}; + +/** + * struct ttm_fence_object + * + * @fdev: Pointer to the fence device struct. + * @kref: Holds the reference count of this fence object. + * @ring: List head used for the circular list of not-completely + * signaled fences. + * @info: Data for fast retrieval using the ttm_fence_get_info() + * function. + * @timeout_jiffies: Absolute jiffies value indicating when this fence + * object times out and, if waited on, calls ttm_fence_lockup + * to check for and resolve a GPU lockup. + * @sequence: Fence sequence number. + * @waiting_types: Types currently waited on. + * @destroy: Called to free the fence object, when its refcount has + * reached zero. If NULL, kfree is used. + * + * This struct is provided in the driver interface so that drivers can + * derive from it and create their own fence implementation. All members + * are private to the fence implementation and the fence driver callbacks. + * Otherwise a driver may access the derived object using container_of(). + */ + +struct ttm_fence_object { + struct ttm_fence_device *fdev; + struct kref kref; + uint32_t fence_class; + uint32_t fence_type; + + /* + * The below fields are protected by the fence class + * manager spinlock. + */ + + struct list_head ring; + struct ttm_fence_info info; + unsigned long timeout_jiffies; + uint32_t sequence; + uint32_t waiting_types; + void (*destroy) (struct ttm_fence_object *); +}; + +/** + * ttm_fence_object_init + * + * @fdev: Pointer to a struct ttm_fence_device. + * @fence_class: Fence class for this fence. + * @type: Fence type for this fence. + * @create_flags: Flags indicating varios actions at init time. At this point + * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to + * the command stream. + * @destroy: Destroy function. If NULL, kfree() is used. + * @fence: The struct ttm_fence_object to initialize. + * + * Initialize a pre-allocated fence object. This function, together with the + * destroy function makes it possible to derive driver-specific fence objects. + */ + +extern int +ttm_fence_object_init(struct ttm_fence_device *fdev, + uint32_t fence_class, + uint32_t type, + uint32_t create_flags, + void (*destroy) (struct ttm_fence_object * fence), + struct ttm_fence_object *fence); + +/** + * ttm_fence_object_create + * + * @fdev: Pointer to a struct ttm_fence_device. + * @fence_class: Fence class for this fence. + * @type: Fence type for this fence. + * @create_flags: Flags indicating varios actions at init time. At this point + * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to + * the command stream. + * @c_fence: On successful termination, *(@c_fence) will point to the created + * fence object. + * + * Create and initialize a struct ttm_fence_object. The destroy function will + * be set to kfree(). + */ + +extern int +ttm_fence_object_create(struct ttm_fence_device *fdev, + uint32_t fence_class, + uint32_t type, + uint32_t create_flags, + struct ttm_fence_object **c_fence); + +/** + * ttm_fence_object_wait + * + * @fence: The fence object to wait on. + * @lazy: Allow sleeps to reduce the cpu-usage if polling. + * @interruptible: Sleep interruptible when waiting. + * @type_mask: Wait for the given type_mask to signal. + * + * Wait for a fence to signal the given type_mask. The function will + * perform a fence_flush using type_mask. (See ttm_fence_object_flush). + * + * Returns + * -ERESTART if interrupted by a signal. + * May return driver-specific error codes if timed-out. + */ + +extern int +ttm_fence_object_wait(struct ttm_fence_object *fence, + bool lazy, bool interruptible, uint32_t type_mask); + +/** + * ttm_fence_object_flush + * + * @fence: The fence object to flush. + * @flush_mask: Fence types to flush. + * + * Make sure that the given fence eventually signals the + * types indicated by @flush_mask. Note that this may or may not + * map to a CPU or GPU flush. + */ + +extern int +ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask); + +/** + * ttm_fence_get_info + * + * @fence: The fence object. + * + * Copy the info block from the fence while holding relevant locks. + */ + +struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence); + +/** + * ttm_fence_object_ref + * + * @fence: The fence object. + * + * Return a ref-counted pointer to the fence object indicated by @fence. + */ + +static inline struct ttm_fence_object *ttm_fence_object_ref(struct + ttm_fence_object + *fence) +{ + kref_get(&fence->kref); + return fence; +} + +/** + * ttm_fence_object_unref + * + * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object. + * + * Unreference the fence object pointed to by *(@p_fence), clearing + * *(p_fence). + */ + +extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence); + +/** + * ttm_fence_object_signaled + * + * @fence: Pointer to the struct ttm_fence_object. + * @mask: Type mask to check whether signaled. + * + * This function checks (without waiting) whether the fence object + * pointed to by @fence has signaled the types indicated by @mask, + * and returns 1 if true, 0 if false. This function does NOT perform + * an implicit fence flush. + */ + +extern bool +ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask); + +/** + * ttm_fence_class + * + * @fence: Pointer to the struct ttm_fence_object. + * + * Convenience function that returns the fence class of a struct ttm_fence_object. + */ + +static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence) +{ + return fence->fence_class; +} + +/** + * ttm_fence_types + * + * @fence: Pointer to the struct ttm_fence_object. + * + * Convenience function that returns the fence types of a struct ttm_fence_object. + */ + +static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence) +{ + return fence->fence_type; +} + +/* + * The functions below are wrappers to the above functions, with + * similar names but with sync_obj omitted. These wrappers are intended + * to be plugged directly into the buffer object driver's sync object + * API, if the driver chooses to use ttm_fence_objects as buffer object + * sync objects. In the prototypes below, a sync_obj is cast to a + * struct ttm_fence_object, whereas a sync_arg is cast to an uint32_t representing + * a fence_type argument. + */ + +extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg); +extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible); +extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg); +extern void ttm_fence_sync_obj_unref(void **sync_obj); +extern void *ttm_fence_sync_obj_ref(void *sync_obj); + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h new file mode 100644 index 0000000..2eca494 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h @@ -0,0 +1,309 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +#ifndef _TTM_FENCE_DRIVER_H_ +#define _TTM_FENCE_DRIVER_H_ + +#include +#include +#include +#include "ttm_fence_api.h" +#include "ttm_memory.h" + +/** @file ttm_fence_driver.h + * + * Definitions needed for a driver implementing the + * ttm_fence subsystem. + */ + +/** + * struct ttm_fence_class_manager: + * + * @wrap_diff: Sequence difference to catch 32-bit wrapping. + * if (seqa - seqb) > @wrap_diff, then seqa < seqb. + * @flush_diff: Sequence difference to trigger fence flush. + * if (cur_seq - seqa) > @flush_diff, then consider fence object with + * seqa as old an needing a flush. + * @sequence_mask: Mask of valid bits in a fence sequence. + * @lock: Lock protecting this struct as well as fence objects + * associated with this struct. + * @ring: Circular sequence-ordered list of fence objects. + * @pending_flush: Fence types currently needing a flush. + * @waiting_types: Fence types that are currently waited for. + * @fence_queue: Queue of waiters on fences belonging to this fence class. + * @highest_waiting_sequence: Sequence number of the fence with highest sequence + * number and that is waited for. + * @latest_queued_sequence: Sequence number of the fence latest queued on the ring. + */ + +struct ttm_fence_class_manager { + + /* + * Unprotected constant members. + */ + + uint32_t wrap_diff; + uint32_t flush_diff; + uint32_t sequence_mask; + + /* + * The rwlock protects this structure as well as + * the data in all fence objects belonging to this + * class. This should be OK as most fence objects are + * only read from once they're created. + */ + + rwlock_t lock; + struct list_head ring; + uint32_t pending_flush; + uint32_t waiting_types; + wait_queue_head_t fence_queue; + uint32_t highest_waiting_sequence; + uint32_t latest_queued_sequence; +}; + +/** + * struct ttm_fence_device + * + * @fence_class: Array of fence class managers. + * @num_classes: Array dimension of @fence_class. + * @count: Current number of fence objects for statistics. + * @driver: Driver struct. + * + * Provided in the driver interface so that the driver can derive + * from this struct for its driver_private, and accordingly + * access the driver_private from the fence driver callbacks. + * + * All members except "count" are initialized at creation and + * never touched after that. No protection needed. + * + * This struct is private to the fence implementation and to the fence + * driver callbacks, and may otherwise be used by drivers only to + * obtain the derived device_private object using container_of(). + */ + +struct ttm_fence_device { + struct ttm_mem_global *mem_glob; + struct ttm_fence_class_manager *fence_class; + uint32_t num_classes; + atomic_t count; + const struct ttm_fence_driver *driver; +}; + +/** + * struct ttm_fence_class_init + * + * @wrap_diff: Fence sequence number wrap indicator. If + * (sequence1 - sequence2) > @wrap_diff, then sequence1 is + * considered to be older than sequence2. + * @flush_diff: Fence sequence number flush indicator. + * If a non-completely-signaled fence has a fence sequence number + * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff, + * the fence is considered too old and it will be flushed upon the + * next call of ttm_fence_flush_old(), to make sure no fences with + * stale sequence numbers remains unsignaled. @flush_diff should + * be sufficiently less than @wrap_diff. + * @sequence_mask: Mask with valid bits of the fence sequence + * number set to 1. + * + * This struct is used as input to ttm_fence_device_init. + */ + +struct ttm_fence_class_init { + uint32_t wrap_diff; + uint32_t flush_diff; + uint32_t sequence_mask; +}; + +/** + * struct ttm_fence_driver + * + * @has_irq: Called by a potential waiter. Should return 1 if a + * fence object with indicated parameters is expected to signal + * automatically, and 0 if the fence implementation needs to + * repeatedly call @poll to make it signal. + * @emit: Make sure a fence with the given parameters is + * present in the indicated command stream. Return its sequence number + * in "breadcrumb". + * @poll: Check and report sequences of the given "fence_class" + * that have signaled "types" + * @flush: Make sure that the types indicated by the bitfield + * ttm_fence_class_manager::pending_flush will eventually + * signal. These bits have been put together using the + * result from the needed_flush function described below. + * @needed_flush: Given the fence_class and fence_types indicated by + * "fence", and the last received fence sequence of this + * fence class, indicate what types need a fence flush to + * signal. Return as a bitfield. + * @wait: Set to non-NULL if the driver wants to override the fence + * wait implementation. Return 0 on success, -EBUSY on failure, + * and -ERESTART if interruptible and a signal is pending. + * @signaled: Driver callback that is called whenever a + * ttm_fence_object::signaled_types has changed status. + * This function is called from atomic context, + * with the ttm_fence_class_manager::lock held in write mode. + * @lockup: Driver callback that is called whenever a wait has exceeded + * the lifetime of a fence object. + * If there is a GPU lockup, + * this function should, if possible, reset the GPU, + * call the ttm_fence_handler with an error status, and + * return. If no lockup was detected, simply extend the + * fence timeout_jiffies and return. The driver might + * want to protect the lockup check with a mutex and cache a + * non-locked-up status for a while to avoid an excessive + * amount of lockup checks from every waiting thread. + */ + +struct ttm_fence_driver { + bool (*has_irq) (struct ttm_fence_device * fdev, + uint32_t fence_class, uint32_t flags); + int (*emit) (struct ttm_fence_device * fdev, + uint32_t fence_class, + uint32_t flags, + uint32_t * breadcrumb, unsigned long *timeout_jiffies); + void (*flush) (struct ttm_fence_device * fdev, uint32_t fence_class); + void (*poll) (struct ttm_fence_device * fdev, + uint32_t fence_class, uint32_t types); + uint32_t(*needed_flush) + (struct ttm_fence_object * fence); + int (*wait) (struct ttm_fence_object * fence, bool lazy, + bool interruptible, uint32_t mask); + void (*signaled) (struct ttm_fence_object * fence); + void (*lockup) (struct ttm_fence_object * fence, uint32_t fence_types); +}; + +/** + * function ttm_fence_device_init + * + * @num_classes: Number of fence classes for this fence implementation. + * @mem_global: Pointer to the global memory accounting info. + * @fdev: Pointer to an uninitialised struct ttm_fence_device. + * @init: Array of initialization info for each fence class. + * @replicate_init: Use the first @init initialization info for all classes. + * @driver: Driver callbacks. + * + * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if + * out-of-memory. Otherwise returns 0. + */ +extern int +ttm_fence_device_init(int num_classes, + struct ttm_mem_global *mem_glob, + struct ttm_fence_device *fdev, + const struct ttm_fence_class_init *init, + bool replicate_init, + const struct ttm_fence_driver *driver); + +/** + * function ttm_fence_device_release + * + * @fdev: Pointer to the fence device. + * + * Release all resources held by a fence device. Note that before + * this function is called, the caller must have made sure all fence + * objects belonging to this fence device are completely signaled. + */ + +extern void ttm_fence_device_release(struct ttm_fence_device *fdev); + +/** + * ttm_fence_handler - the fence handler. + * + * @fdev: Pointer to the fence device. + * @fence_class: Fence class that signals. + * @sequence: Signaled sequence. + * @type: Types that signal. + * @error: Error from the engine. + * + * This function signals all fences with a sequence previous to the + * @sequence argument, and belonging to @fence_class. The signaled fence + * types are provided in @type. If error is non-zero, the error member + * of the fence with sequence = @sequence is set to @error. This value + * may be reported back to user-space, indicating, for example an illegal + * 3D command or illegal mpeg data. + * + * This function is typically called from the driver::poll method when the + * command sequence preceding the fence marker has executed. It should be + * called with the ttm_fence_class_manager::lock held in write mode and + * may be called from interrupt context. + */ + +extern void +ttm_fence_handler(struct ttm_fence_device *fdev, + uint32_t fence_class, + uint32_t sequence, uint32_t type, uint32_t error); + +/** + * ttm_fence_driver_from_dev + * + * @fdev: The ttm fence device. + * + * Returns a pointer to the fence driver struct. + */ + +static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(struct + ttm_fence_device + *fdev) +{ + return fdev->driver; +} + +/** + * ttm_fence_driver + * + * @fence: Pointer to a ttm fence object. + * + * Returns a pointer to the fence driver struct. + */ + +static inline const struct ttm_fence_driver *ttm_fence_driver(struct + ttm_fence_object + *fence) +{ + return ttm_fence_driver_from_dev(fence->fdev); +} + +/** + * ttm_fence_fc + * + * @fence: Pointer to a ttm fence object. + * + * Returns a pointer to the struct ttm_fence_class_manager for the + * fence class of @fence. + */ + +static inline struct ttm_fence_class_manager *ttm_fence_fc(struct + ttm_fence_object + *fence) +{ + return &fence->fdev->fence_class[fence->fence_class]; +} + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c new file mode 100644 index 0000000..d9bb787 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c @@ -0,0 +1,242 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include +#include "ttm/ttm_fence_user.h" +#include "ttm/ttm_object.h" +#include "ttm/ttm_fence_driver.h" +#include "ttm/ttm_userobj_api.h" + +/** + * struct ttm_fence_user_object + * + * @base: The base object used for user-space visibility and refcounting. + * + * @fence: The fence object itself. + * + */ + +struct ttm_fence_user_object { + struct ttm_base_object base; + struct ttm_fence_object fence; +}; + +static struct ttm_fence_user_object *ttm_fence_user_object_lookup(struct + ttm_object_file + *tfile, + uint32_t + handle) +{ + struct ttm_base_object *base; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL)) { + printk(KERN_ERR "Invalid fence handle 0x%08lx\n", + (unsigned long)handle); + return NULL; + } + + if (unlikely(base->object_type != ttm_fence_type)) { + ttm_base_object_unref(&base); + printk(KERN_ERR "Invalid fence handle 0x%08lx\n", + (unsigned long)handle); + return NULL; + } + + return container_of(base, struct ttm_fence_user_object, base); +} + +/* + * The fence object destructor. + */ + +static void ttm_fence_user_destroy(struct ttm_fence_object *fence) +{ + struct ttm_fence_user_object *ufence = + container_of(fence, struct ttm_fence_user_object, fence); + + ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false); + kfree(ufence); +} + +/* + * The base object destructor. We basically unly unreference the + * attached fence object. + */ + +static void ttm_fence_user_release(struct ttm_base_object **p_base) +{ + struct ttm_fence_user_object *ufence; + struct ttm_base_object *base = *p_base; + struct ttm_fence_object *fence; + + *p_base = NULL; + + if (unlikely(base == NULL)) + return; + + ufence = container_of(base, struct ttm_fence_user_object, base); + fence = &ufence->fence; + ttm_fence_object_unref(&fence); +} + +int +ttm_fence_user_create(struct ttm_fence_device *fdev, + struct ttm_object_file *tfile, + uint32_t fence_class, + uint32_t fence_types, + uint32_t create_flags, + struct ttm_fence_object **fence, uint32_t * user_handle) +{ + int ret; + struct ttm_fence_object *tmp; + struct ttm_fence_user_object *ufence; + + ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), false, false, false); + if (unlikely(ret != 0)) + return -ENOMEM; + + ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); + if (unlikely(ufence == NULL)) { + ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false); + return -ENOMEM; + } + + ret = ttm_fence_object_init(fdev, + fence_class, + fence_types, create_flags, + &ttm_fence_user_destroy, &ufence->fence); + + if (unlikely(ret != 0)) + goto out_err0; + + /* + * One fence ref is held by the fence ptr we return. + * The other one by the base object. Need to up the + * fence refcount before we publish this object to + * user-space. + */ + + tmp = ttm_fence_object_ref(&ufence->fence); + ret = ttm_base_object_init(tfile, &ufence->base, + false, ttm_fence_type, + &ttm_fence_user_release, NULL); + + if (unlikely(ret != 0)) + goto out_err1; + + *fence = &ufence->fence; + *user_handle = ufence->base.hash.key; + + return 0; + out_err1: + ttm_fence_object_unref(&tmp); + tmp = &ufence->fence; + ttm_fence_object_unref(&tmp); + return ret; + out_err0: + ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false); + kfree(ufence); + return ret; +} + +int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data) +{ + int ret; + union ttm_fence_signaled_arg *arg = data; + struct ttm_fence_object *fence; + struct ttm_fence_info info; + struct ttm_fence_user_object *ufence; + struct ttm_base_object *base; + ret = 0; + + ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle); + if (unlikely(ufence == NULL)) + return -EINVAL; + + fence = &ufence->fence; + + if (arg->req.flush) { + ret = ttm_fence_object_flush(fence, arg->req.fence_type); + if (unlikely(ret != 0)) + goto out; + } + + info = ttm_fence_get_info(fence); + arg->rep.signaled_types = info.signaled_types; + arg->rep.fence_error = info.error; + + out: + base = &ufence->base; + ttm_base_object_unref(&base); + return ret; +} + +int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data) +{ + int ret; + union ttm_fence_finish_arg *arg = data; + struct ttm_fence_user_object *ufence; + struct ttm_base_object *base; + struct ttm_fence_object *fence; + ret = 0; + + ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle); + if (unlikely(ufence == NULL)) + return -EINVAL; + + fence = &ufence->fence; + + ret = ttm_fence_object_wait(fence, + arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY, + true, arg->req.fence_type); + if (likely(ret == 0)) { + struct ttm_fence_info info = ttm_fence_get_info(fence); + + arg->rep.signaled_types = info.signaled_types; + arg->rep.fence_error = info.error; + } + + base = &ufence->base; + ttm_base_object_unref(&base); + + return ret; +} + +int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data) +{ + struct ttm_fence_unref_arg *arg = data; + int ret = 0; + + ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type); + return ret; +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h new file mode 100644 index 0000000..0cad597 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h @@ -0,0 +1,147 @@ +/************************************************************************** + * + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + **************************************************************************/ +/* + * Authors + * Thomas Hellström + */ + +#ifndef TTM_FENCE_USER_H +#define TTM_FENCE_USER_H + +#if !defined(__KERNEL__) && !defined(_KERNEL) +#include +#endif + +#define TTM_FENCE_MAJOR 0 +#define TTM_FENCE_MINOR 1 +#define TTM_FENCE_PL 0 +#define TTM_FENCE_DATE "080819" + +/** + * struct ttm_fence_signaled_req + * + * @handle: Handle to the fence object. Input. + * + * @fence_type: Fence types we want to flush. Input. + * + * @flush: Boolean. Flush the indicated fence_types. Input. + * + * Argument to the TTM_FENCE_SIGNALED ioctl. + */ + +struct ttm_fence_signaled_req { + uint32_t handle; + uint32_t fence_type; + int32_t flush; + uint32_t pad64; +}; + +/** + * struct ttm_fence_rep + * + * @signaled_types: Fence type that has signaled. + * + * @fence_error: Command execution error. + * Hardware errors that are consequences of the execution + * of the command stream preceding the fence are reported + * here. + * + * Output argument to the TTM_FENCE_SIGNALED and + * TTM_FENCE_FINISH ioctls. + */ + +struct ttm_fence_rep { + uint32_t signaled_types; + uint32_t fence_error; +}; + +union ttm_fence_signaled_arg { + struct ttm_fence_signaled_req req; + struct ttm_fence_rep rep; +}; + +/* + * Waiting mode flags for the TTM_FENCE_FINISH ioctl. + * + * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling + * wait. + * + * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU, + * but return -EBUSY if the buffer is busy. + */ + +#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0) +#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1) + +/** + * struct ttm_fence_finish_req + * + * @handle: Handle to the fence object. Input. + * + * @fence_type: Fence types we want to finish. + * + * @mode: Wait mode. + * + * Input to the TTM_FENCE_FINISH ioctl. + */ + +struct ttm_fence_finish_req { + uint32_t handle; + uint32_t fence_type; + uint32_t mode; + uint32_t pad64; +}; + +union ttm_fence_finish_arg { + struct ttm_fence_finish_req req; + struct ttm_fence_rep rep; +}; + +/** + * struct ttm_fence_unref_arg + * + * @handle: Handle to the fence object. + * + * Argument to the TTM_FENCE_UNREF ioctl. + */ + +struct ttm_fence_unref_arg { + uint32_t handle; + uint32_t pad64; +}; + +/* + * Ioctl offsets frome extenstion start. + */ + +#define TTM_FENCE_SIGNALED 0x01 +#define TTM_FENCE_FINISH 0x02 +#define TTM_FENCE_UNREF 0x03 + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_lock.c b/drivers/gpu/drm/psb/ttm/ttm_lock.c new file mode 100644 index 0000000..a3b503f --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_lock.c @@ -0,0 +1,162 @@ +/************************************************************************** + * + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "ttm/ttm_lock.h" +#include +#include +#include +#include + +void ttm_lock_init(struct ttm_lock *lock) +{ + init_waitqueue_head(&lock->queue); + atomic_set(&lock->write_lock_pending, 0); + atomic_set(&lock->readers, 0); + lock->kill_takers = false; + lock->signal = SIGKILL; +} + +void ttm_read_unlock(struct ttm_lock *lock) +{ + if (atomic_dec_and_test(&lock->readers)) + wake_up_all(&lock->queue); +} + +int ttm_read_lock(struct ttm_lock *lock, bool interruptible) +{ + while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { + int ret; + + if (!interruptible) { + wait_event(lock->queue, + atomic_read(&lock->write_lock_pending) == 0); + continue; + } + ret = wait_event_interruptible + (lock->queue, atomic_read(&lock->write_lock_pending) == 0); + if (ret) + return -ERESTART; + } + + while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { + int ret; + if (!interruptible) { + wait_event(lock->queue, + atomic_read(&lock->readers) != -1); + continue; + } + ret = wait_event_interruptible + (lock->queue, atomic_read(&lock->readers) != -1); + if (ret) + return -ERESTART; + } + + if (unlikely(lock->kill_takers)) { + send_sig(lock->signal, current, 0); + ttm_read_unlock(lock); + return -ERESTART; + } + + return 0; +} + +static int __ttm_write_unlock(struct ttm_lock *lock) +{ + if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) + return -EINVAL; + wake_up_all(&lock->queue); + return 0; +} + +static void ttm_write_lock_remove(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct ttm_lock *lock = container_of(base, struct ttm_lock, base); + int ret; + + *p_base = NULL; + ret = __ttm_write_unlock(lock); + BUG_ON(ret != 0); +} + +int ttm_write_lock(struct ttm_lock *lock, + bool interruptible, + struct ttm_object_file *tfile) +{ + int ret = 0; + + atomic_inc(&lock->write_lock_pending); + + while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { + if (!interruptible) { + wait_event(lock->queue, + atomic_read(&lock->readers) == 0); + continue; + } + ret = wait_event_interruptible + (lock->queue, atomic_read(&lock->readers) == 0); + + if (ret) { + if (atomic_dec_and_test(&lock->write_lock_pending)) + wake_up_all(&lock->queue); + return -ERESTART; + } + } + + if (atomic_dec_and_test(&lock->write_lock_pending)) + wake_up_all(&lock->queue); + + if (unlikely(lock->kill_takers)) { + send_sig(lock->signal, current, 0); + __ttm_write_unlock(lock); + return -ERESTART; + } + + /* + * Add a base-object, the destructor of which will + * make sure the lock is released if the client dies + * while holding it. + */ + + ret = ttm_base_object_init(tfile, &lock->base, false, + ttm_lock_type, &ttm_write_lock_remove, NULL); + if (ret) + (void)__ttm_write_unlock(lock); + + return ret; +} + +int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile) +{ + return ttm_ref_object_base_unref(tfile, + lock->base.hash.key, TTM_REF_USAGE); +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_lock.h b/drivers/gpu/drm/psb/ttm/ttm_lock.h new file mode 100644 index 0000000..0169ad7 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_lock.h @@ -0,0 +1,181 @@ +/************************************************************************** + * + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +/** @file ttm_lock.h + * This file implements a simple replacement for the buffer manager use + * of the DRM heavyweight hardware lock. + * The lock is a read-write lock. Taking it in read mode is fast, and + * intended for in-kernel use only. + * Taking it in write mode is slow. + * + * The write mode is used only when there is a need to block all + * user-space processes from validating buffers. + * It's allowed to leave kernel space with the write lock held. + * If a user-space process dies while having the write-lock, + * it will be released during the file descriptor release. + * + * The read lock is typically placed at the start of an IOCTL- or + * user-space callable function that may end up allocating a memory area. + * This includes setstatus, super-ioctls and faults; the latter may move + * unmappable regions to mappable. It's a bug to leave kernel space with the + * read lock held. + * + * Both read- and write lock taking is interruptible for low signal-delivery + * latency. The locking functions will return -ERESTART if interrupted by a + * signal. + * + * Locking order: The lock should be taken BEFORE any TTM mutexes + * or spinlocks. + * + * Typical usages: + * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock + * stops it from being repopulated. + * b) out-of-VRAM or out-of-aperture space, in which case the process + * receiving the out-of-space notification may take the lock in write mode + * and evict all buffers prior to start validating its own buffers. + */ + +#ifndef _TTM_LOCK_H_ +#define _TTM_LOCK_H_ + +#include "ttm_object.h" +#include +#include + +/** + * struct ttm_lock + * + * @base: ttm base object used solely to release the lock if the client + * holding the lock dies. + * @queue: Queue for processes waiting for lock change-of-status. + * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids + * write lock starvation. + * @readers: The lock status: A negative number indicates that a write lock is + * held. Positive values indicate number of concurrent readers. + */ + +struct ttm_lock { + struct ttm_base_object base; + wait_queue_head_t queue; + atomic_t write_lock_pending; + atomic_t readers; + bool kill_takers; + int signal; +}; + +/** + * ttm_lock_init + * + * @lock: Pointer to a struct ttm_lock + * Initializes the lock. + */ +extern void ttm_lock_init(struct ttm_lock *lock); + +/** + * ttm_read_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a read lock. + */ + +extern void ttm_read_unlock(struct ttm_lock *lock); + +/** + * ttm_read_unlock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in read mode. + * Returns: + * -ERESTART If interrupted by a signal and interruptible is true. + */ + +extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_write_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * @tfile: Pointer to a struct ttm_object_file used to identify the user-space + * application taking the lock. + * + * Takes the lock in write mode. + * Returns: + * -ERESTART If interrupted by a signal and interruptible is true. + * -ENOMEM: Out of memory when locking. + */ +extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible, + struct ttm_object_file *tfile); + +/** + * ttm_write_unlock + * + * @lock: Pointer to a struct ttm_lock + * @tfile: Pointer to a struct ttm_object_file used to identify the user-space + * application taking the lock. + * + * Releases a write lock. + * Returns: + * -EINVAL If the lock was not held. + */ +extern int ttm_write_unlock(struct ttm_lock *lock, + struct ttm_object_file *tfile); + +/** + * ttm_lock_set_kill + * + * @lock: Pointer to a struct ttm_lock + * @val: Boolean whether to kill processes taking the lock. + * @signal: Signal to send to the process taking the lock. + * + * The kill-when-taking-lock functionality is used to kill processes that keep + * on using the TTM functionality when its resources has been taken down, for + * example when the X server exits. A typical sequence would look like this: + * - X server takes lock in write mode. + * - ttm_lock_set_kill() is called with @val set to true. + * - As part of X server exit, TTM resources are taken down. + * - X server releases the lock on file release. + * - Another dri client wants to render, takes the lock and is killed. + * + */ + +static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, int signal) +{ + lock->kill_takers = val; + if (val) + lock->signal = signal; +} + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_memory.c b/drivers/gpu/drm/psb/ttm/ttm_memory.c new file mode 100644 index 0000000..75df380 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_memory.c @@ -0,0 +1,232 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "ttm/ttm_memory.h" +#include +#include +#include +#include + +#define TTM_MEMORY_ALLOC_RETRIES 4 + +/** + * At this point we only support a single shrink callback. + * Extend this if needed, perhaps using a linked list of callbacks. + * Note that this function is reentrant: + * many threads may try to swap out at any given time. + */ + +static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue, + uint64_t extra) +{ + int ret; + struct ttm_mem_shrink *shrink; + uint64_t target; + uint64_t total_target; + + spin_lock(&glob->lock); + if (glob->shrink == NULL) + goto out; + + if (from_workqueue) { + target = glob->swap_limit; + total_target = glob->total_memory_swap_limit; + } else if (capable(CAP_SYS_ADMIN)) { + total_target = glob->emer_total_memory; + target = glob->emer_memory; + } else { + total_target = glob->max_total_memory; + target = glob->max_memory; + } + + total_target = (extra >= total_target) ? 0: total_target - extra; + target = (extra >= target) ? 0: target - extra; + + while (glob->used_memory > target || + glob->used_total_memory > total_target) { + shrink = glob->shrink; + spin_unlock(&glob->lock); + ret = shrink->do_shrink(shrink); + spin_lock(&glob->lock); + if (unlikely(ret != 0)) + goto out; + } + out: + spin_unlock(&glob->lock); +} + +static void ttm_shrink_work(struct work_struct *work) +{ + struct ttm_mem_global *glob = + container_of(work, struct ttm_mem_global, work); + + ttm_shrink(glob, true, 0ULL); +} + +int ttm_mem_global_init(struct ttm_mem_global *glob) +{ + struct sysinfo si; + uint64_t mem; + + spin_lock_init(&glob->lock); + glob->swap_queue = create_singlethread_workqueue("ttm_swap"); + INIT_WORK(&glob->work, ttm_shrink_work); + init_waitqueue_head(&glob->queue); + + si_meminfo(&si); + + mem = si.totalram - si.totalhigh; + mem *= si.mem_unit; + + glob->max_memory = mem >> 1; + glob->emer_memory = glob->max_memory + (mem >> 2); + glob->swap_limit = glob->max_memory - (mem >> 5); + glob->used_memory = 0; + glob->used_total_memory = 0; + glob->shrink = NULL; + + mem = si.totalram; + mem *= si.mem_unit; + + glob->max_total_memory = mem >> 1; + glob->emer_total_memory = glob->max_total_memory + (mem >> 2); + glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5); + + printk(KERN_INFO "TTM available graphics memory: %llu MiB\n", + glob->max_total_memory >> 20); + printk(KERN_INFO "TTM available object memory: %llu MiB\n", + glob->max_memory >> 20); + printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n", + glob->swap_limit >> 20); + + return 0; +} + +void ttm_mem_global_release(struct ttm_mem_global *glob) +{ + printk(KERN_INFO "Used total memory is %llu bytes.\n", + (unsigned long long)glob->used_total_memory); + flush_workqueue(glob->swap_queue); + destroy_workqueue(glob->swap_queue); + glob->swap_queue = NULL; +} + +static inline void ttm_check_swapping(struct ttm_mem_global *glob) +{ + bool needs_swapping; + + spin_lock(&glob->lock); + needs_swapping = (glob->used_memory > glob->swap_limit || + glob->used_total_memory > + glob->total_memory_swap_limit); + spin_unlock(&glob->lock); + + if (unlikely(needs_swapping)) + (void)queue_work(glob->swap_queue, &glob->work); + +} + +void ttm_mem_global_free(struct ttm_mem_global *glob, + uint64_t amount, bool himem) +{ + spin_lock(&glob->lock); + glob->used_total_memory -= amount; + if (!himem) + glob->used_memory -= amount; + wake_up_all(&glob->queue); + spin_unlock(&glob->lock); +} + +static int ttm_mem_global_reserve(struct ttm_mem_global *glob, + uint64_t amount, bool himem, bool reserve) +{ + uint64_t limit; + uint64_t lomem_limit; + int ret = -ENOMEM; + + spin_lock(&glob->lock); + + if (capable(CAP_SYS_ADMIN)) { + limit = glob->emer_total_memory; + lomem_limit = glob->emer_memory; + } else { + limit = glob->max_total_memory; + lomem_limit = glob->max_memory; + } + + if (unlikely(glob->used_total_memory + amount > limit)) + goto out_unlock; + if (unlikely(!himem && glob->used_memory + amount > lomem_limit)) + goto out_unlock; + + if (reserve) { + glob->used_total_memory += amount; + if (!himem) + glob->used_memory += amount; + } + ret = 0; + out_unlock: + spin_unlock(&glob->lock); + ttm_check_swapping(glob); + + return ret; +} + +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + bool no_wait, bool interruptible, bool himem) +{ + int count = TTM_MEMORY_ALLOC_RETRIES; + + while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) != 0)) { + if (no_wait) + return -ENOMEM; + if (unlikely(count-- == 0)) + return -ENOMEM; + ttm_shrink(glob, false, memory + (memory >> 2) + 16); + } + + return 0; +} + +size_t ttm_round_pot(size_t size) +{ + if ((size & (size - 1)) == 0) + return size; + else if (size > PAGE_SIZE) + return PAGE_ALIGN(size); + else { + size_t tmp_size = 4; + + while (tmp_size < size) + tmp_size <<= 1; + + return tmp_size; + } + return 0; +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_memory.h b/drivers/gpu/drm/psb/ttm/ttm_memory.h new file mode 100644 index 0000000..9bff60f --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_memory.h @@ -0,0 +1,154 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef TTM_MEMORY_H +#define TTM_MEMORY_H + +#include +#include +#include + +/** + * struct ttm_mem_shrink - callback to shrink TTM memory usage. + * + * @do_shrink: The callback function. + * + * Arguments to the do_shrink functions are intended to be passed using + * inheritance. That is, the argument class derives from struct ttm_mem_srink, + * and can be accessed using container_of(). + */ + +struct ttm_mem_shrink { + int (*do_shrink) (struct ttm_mem_shrink *); +}; + +/** + * struct ttm_mem_global - Global memory accounting structure. + * + * @shrink: A single callback to shrink TTM memory usage. Extend this + * to a linked list to be able to handle multiple callbacks when needed. + * @swap_queue: A workqueue to handle shrinking in low memory situations. We + * need a separate workqueue since it will spend a lot of time waiting + * for the GPU, and this will otherwise block other workqueue tasks(?) + * At this point we use only a single-threaded workqueue. + * @work: The workqueue callback for the shrink queue. + * @queue: Wait queue for processes suspended waiting for memory. + * @lock: Lock to protect the @shrink - and the memory accounting members, + * that is, essentially the whole structure with some exceptions. + * @emer_memory: Lowmem memory limit available for root. + * @max_memory: Lowmem memory limit available for non-root. + * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in. + * @used_memory: Currently used lowmem memory. + * @used_total_memory: Currently used total (lowmem + highmem) memory. + * @total_memory_swap_limit: Total memory limit where the shrink workqueue + * kicks in. + * @max_total_memory: Total memory available to non-root processes. + * @emer_total_memory: Total memory available to root processes. + * + * Note that this structure is not per device. It should be global for all + * graphics devices. + */ + +struct ttm_mem_global { + struct ttm_mem_shrink *shrink; + struct workqueue_struct *swap_queue; + struct work_struct work; + wait_queue_head_t queue; + spinlock_t lock; + uint64_t emer_memory; + uint64_t max_memory; + uint64_t swap_limit; + uint64_t used_memory; + uint64_t used_total_memory; + uint64_t total_memory_swap_limit; + uint64_t max_total_memory; + uint64_t emer_total_memory; +}; + +/** + * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object + * + * @shrink: The object to initialize. + * @func: The callback function. + */ + +static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink, + int (*func) (struct ttm_mem_shrink *)) +{ + shrink->do_shrink = func; +} + +/** + * ttm_mem_register_shrink - register a struct ttm_mem_shrink object. + * + * @glob: The struct ttm_mem_global object to register with. + * @shrink: An initialized struct ttm_mem_shrink object to register. + * + * Returns: + * -EBUSY: There's already a callback registered. (May change). + */ + +static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, + struct ttm_mem_shrink *shrink) +{ + spin_lock(&glob->lock); + if (glob->shrink != NULL) { + spin_unlock(&glob->lock); + return -EBUSY; + } + glob->shrink = shrink; + spin_unlock(&glob->lock); + return 0; +} + +/** + * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object. + * + * @glob: The struct ttm_mem_global object to unregister from. + * @shrink: A previously registert struct ttm_mem_shrink object. + * + */ + +static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, + struct ttm_mem_shrink *shrink) +{ + spin_lock(&glob->lock); + BUG_ON(glob->shrink != shrink); + glob->shrink = NULL; + spin_unlock(&glob->lock); +} + +extern int ttm_mem_global_init(struct ttm_mem_global *glob); +extern void ttm_mem_global_release(struct ttm_mem_global *glob); +extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + bool no_wait, bool interruptible, bool himem); +extern void ttm_mem_global_free(struct ttm_mem_global *glob, + uint64_t amount, bool himem); +extern size_t ttm_round_pot(size_t size); +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_object.c b/drivers/gpu/drm/psb/ttm/ttm_object.c new file mode 100644 index 0000000..294a795 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_object.c @@ -0,0 +1,444 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +/** @file ttm_ref_object.c + * + * Base- and reference object implementation for the various + * ttm objects. Implements reference counting, minimal security checks + * and release on file close. + */ + +/** + * struct ttm_object_file + * + * @tdev: Pointer to the ttm_object_device. + * + * @lock: Lock that protects the ref_list list and the + * ref_hash hash tables. + * + * @ref_list: List of ttm_ref_objects to be destroyed at + * file release. + * + * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, + * for fast lookup of ref objects given a base object. + */ + +#include "ttm/ttm_object.h" +#include +#include +#include +#include + +struct ttm_object_file { + struct ttm_object_device *tdev; + rwlock_t lock; + struct list_head ref_list; + struct drm_open_hash ref_hash[TTM_REF_NUM]; + struct kref refcount; +}; + +/** + * struct ttm_object_device + * + * @object_lock: lock that protects the object_hash hash table. + * + * @object_hash: hash table for fast lookup of object global names. + * + * @object_count: Per device object count. + * + * This is the per-device data structure needed for ttm object management. + */ + +struct ttm_object_device { + rwlock_t object_lock; + struct drm_open_hash object_hash; + atomic_t object_count; + struct ttm_mem_global *mem_glob; +}; + +/** + * struct ttm_ref_object + * + * @hash: Hash entry for the per-file object reference hash. + * + * @head: List entry for the per-file list of ref-objects. + * + * @kref: Ref count. + * + * @obj: Base object this ref object is referencing. + * + * @ref_type: Type of ref object. + * + * This is similar to an idr object, but it also has a hash table entry + * that allows lookup with a pointer to the referenced object as a key. In + * that way, one can easily detect whether a base object is referenced by + * a particular ttm_object_file. It also carries a ref count to avoid creating + * multiple ref objects if a ttm_object_file references the same base object more + * than once. + */ + +struct ttm_ref_object { + struct drm_hash_item hash; + struct list_head head; + struct kref kref; + struct ttm_base_object *obj; + enum ttm_ref_type ref_type; + struct ttm_object_file *tfile; +}; + +static inline struct ttm_object_file * +ttm_object_file_ref(struct ttm_object_file *tfile) +{ + kref_get(&tfile->refcount); + return tfile; +} + +static void ttm_object_file_destroy(struct kref *kref) +{ + struct ttm_object_file *tfile = + container_of(kref, struct ttm_object_file, refcount); + +// printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile); + kfree(tfile); +} + + +static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) +{ + struct ttm_object_file *tfile = *p_tfile; + + *p_tfile = NULL; + kref_put(&tfile->refcount, ttm_object_file_destroy); +} + + +int ttm_base_object_init(struct ttm_object_file *tfile, + struct ttm_base_object *base, + bool shareable, + enum ttm_object_type object_type, + void (*refcount_release) (struct ttm_base_object **), + void (*ref_obj_release) (struct ttm_base_object *, + enum ttm_ref_type ref_type)) +{ + struct ttm_object_device *tdev = tfile->tdev; + int ret; + + base->shareable = shareable; + base->tfile = ttm_object_file_ref(tfile); + base->refcount_release = refcount_release; + base->ref_obj_release = ref_obj_release; + base->object_type = object_type; + write_lock(&tdev->object_lock); + kref_init(&base->refcount); + ret = drm_ht_just_insert_please(&tdev->object_hash, + &base->hash, + (unsigned long)base, 31, 0, 0); + write_unlock(&tdev->object_lock); + if (unlikely(ret != 0)) + goto out_err0; + + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + if (unlikely(ret != 0)) + goto out_err1; + + ttm_base_object_unref(&base); + + return 0; + out_err1: + (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); + out_err0: + return ret; +} + +static void ttm_release_base(struct kref *kref) +{ + struct ttm_base_object *base = + container_of(kref, struct ttm_base_object, refcount); + struct ttm_object_device *tdev = base->tfile->tdev; + + (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); + write_unlock(&tdev->object_lock); + if (base->refcount_release) { + ttm_object_file_unref(&base->tfile); + base->refcount_release(&base); + } + write_lock(&tdev->object_lock); +} + +void ttm_base_object_unref(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct ttm_object_device *tdev = base->tfile->tdev; + + // printk(KERN_INFO "TTM base object unref.\n"); + *p_base = NULL; + + /* + * Need to take the lock here to avoid racing with + * users trying to look up the object. + */ + + write_lock(&tdev->object_lock); + (void)kref_put(&base->refcount, &ttm_release_base); + write_unlock(&tdev->object_lock); +} + +struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, + uint32_t key) +{ + struct ttm_object_device *tdev = tfile->tdev; + struct ttm_base_object *base; + struct drm_hash_item *hash; + int ret; + + read_lock(&tdev->object_lock); + ret = drm_ht_find_item(&tdev->object_hash, key, &hash); + + if (likely(ret == 0)) { + base = drm_hash_entry(hash, struct ttm_base_object, hash); + kref_get(&base->refcount); + } + read_unlock(&tdev->object_lock); + + if (unlikely(ret != 0)) + return NULL; + + if (tfile != base->tfile && !base->shareable) { + printk(KERN_ERR "Attempted access of non-shareable object.\n"); + ttm_base_object_unref(&base); + return NULL; + } + + return base; +} + +int ttm_ref_object_add(struct ttm_object_file *tfile, + struct ttm_base_object *base, + enum ttm_ref_type ref_type, bool *existed) +{ + struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct ttm_ref_object *ref; + struct drm_hash_item *hash; + struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; + int ret = -EINVAL; + + if (existed != NULL) + *existed = true; + + while (ret == -EINVAL) { + read_lock(&tfile->lock); + ret = drm_ht_find_item(ht, base->hash.key, &hash); + + if (ret == 0) { + ref = drm_hash_entry(hash, struct ttm_ref_object, hash); + kref_get(&ref->kref); + read_unlock(&tfile->lock); + break; + } + + read_unlock(&tfile->lock); + ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false, false); + if (unlikely(ret != 0)) + return ret; + ref = kmalloc(sizeof(*ref), GFP_KERNEL); + if (unlikely(ref == NULL)) { + ttm_mem_global_free(mem_glob, sizeof(*ref), false); + return -ENOMEM; + } + + ref->hash.key = base->hash.key; + ref->obj = base; + ref->tfile = tfile; + ref->ref_type = ref_type; + kref_init(&ref->kref); + + write_lock(&tfile->lock); + ret = drm_ht_insert_item(ht, &ref->hash); + + if (likely(ret == 0)) { + list_add_tail(&ref->head, &tfile->ref_list); + kref_get(&base->refcount); + write_unlock(&tfile->lock); + if (existed != NULL) + *existed = false; + break; + } + + write_unlock(&tfile->lock); + BUG_ON(ret != -EINVAL); + + ttm_mem_global_free(mem_glob, sizeof(*ref), false); + kfree(ref); + } + + return ret; +} + +static void ttm_ref_object_release(struct kref *kref) +{ + struct ttm_ref_object *ref = + container_of(kref, struct ttm_ref_object, kref); + struct ttm_base_object *base = ref->obj; + struct ttm_object_file *tfile = ref->tfile; + struct drm_open_hash *ht; + struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; + + ht = &tfile->ref_hash[ref->ref_type]; + (void)drm_ht_remove_item(ht, &ref->hash); + list_del(&ref->head); + write_unlock(&tfile->lock); + + if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) + base->ref_obj_release(base, ref->ref_type); + + ttm_base_object_unref(&ref->obj); + ttm_mem_global_free(mem_glob, sizeof(*ref), false); + kfree(ref); + write_lock(&tfile->lock); +} + +int ttm_ref_object_base_unref(struct ttm_object_file *tfile, + unsigned long key, enum ttm_ref_type ref_type) +{ + struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct ttm_ref_object *ref; + struct drm_hash_item *hash; + int ret; + + write_lock(&tfile->lock); + ret = drm_ht_find_item(ht, key, &hash); + if (unlikely(ret != 0)) { + write_unlock(&tfile->lock); + return -EINVAL; + } + ref = drm_hash_entry(hash, struct ttm_ref_object, hash); + kref_put(&ref->kref, ttm_ref_object_release); + write_unlock(&tfile->lock); + return 0; +} + +void ttm_object_file_release(struct ttm_object_file **p_tfile) +{ + struct ttm_ref_object *ref; + struct list_head *list; + unsigned int i; + struct ttm_object_file *tfile = *p_tfile; + + *p_tfile = NULL; + write_lock(&tfile->lock); + + /* + * Since we release the lock within the loop, we have to + * restart it from the beginning each time. + */ + + while (!list_empty(&tfile->ref_list)) { + list = tfile->ref_list.next; + ref = list_entry(list, struct ttm_ref_object, head); + ttm_ref_object_release(&ref->kref); + } + + for (i = 0; i < TTM_REF_NUM; ++i) { + drm_ht_remove(&tfile->ref_hash[i]); + } + + write_unlock(&tfile->lock); + ttm_object_file_unref(&tfile); +} + +struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, + unsigned int hash_order) +{ + struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); + unsigned int i; + unsigned int j = 0; + int ret; + + if (unlikely(tfile == NULL)) + return NULL; + + rwlock_init(&tfile->lock); + tfile->tdev = tdev; + kref_init(&tfile->refcount); + INIT_LIST_HEAD(&tfile->ref_list); + + for (i = 0; i < TTM_REF_NUM; ++i) { + ret = drm_ht_create(&tfile->ref_hash[i], hash_order); + if (ret) { + j = i; + goto out_err; + } + } + + return tfile; + out_err: + for (i = 0; i < j; ++i) { + drm_ht_remove(&tfile->ref_hash[i]); + } + kfree(tfile); + + return NULL; +} + +struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global + *mem_glob, + unsigned int hash_order) +{ + struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); + int ret; + + if (unlikely(tdev == NULL)) + return NULL; + + tdev->mem_glob = mem_glob; + rwlock_init(&tdev->object_lock); + atomic_set(&tdev->object_count, 0); + ret = drm_ht_create(&tdev->object_hash, hash_order); + + if (likely(ret == 0)) + return tdev; + + kfree(tdev); + return NULL; +} + +void ttm_object_device_release(struct ttm_object_device **p_tdev) +{ + struct ttm_object_device *tdev = *p_tdev; + + *p_tdev = NULL; + + write_lock(&tdev->object_lock); + drm_ht_remove(&tdev->object_hash); + write_unlock(&tdev->object_lock); + + kfree(tdev); +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_object.h b/drivers/gpu/drm/psb/ttm/ttm_object.h new file mode 100644 index 0000000..0925ac5 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_object.h @@ -0,0 +1,269 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +/** @file ttm_ref_object.h + * + * Base- and reference object implementation for the various + * ttm objects. Implements reference counting, minimal security checks + * and release on file close. + */ + +#ifndef _TTM_OBJECT_H_ +#define _TTM_OBJECT_H_ + +#include +#include +#include +#include + +/** + * enum ttm_ref_type + * + * Describes what type of reference a ref object holds. + * + * TTM_REF_USAGE is a simple refcount on a base object. + * + * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a + * buffer object. + * + * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a + * buffer object. + * + */ + +enum ttm_ref_type { + TTM_REF_USAGE, + TTM_REF_SYNCCPU_READ, + TTM_REF_SYNCCPU_WRITE, + TTM_REF_NUM +}; + +/** + * enum ttm_object_type + * + * One entry per ttm object type. + * Device-specific types should use the + * ttm_driver_typex types. + */ + +enum ttm_object_type { + ttm_fence_type, + ttm_buffer_type, + ttm_lock_type, + ttm_driver_type0 = 256, + ttm_driver_type1 +}; + +struct ttm_object_file; +struct ttm_object_device; + +/** + * struct ttm_base_object + * + * @hash: hash entry for the per-device object hash. + * @type: derived type this object is base class for. + * @shareable: Other ttm_object_files can access this object. + * + * @tfile: Pointer to ttm_object_file of the creator. + * NULL if the object was not created by a user request. + * (kernel object). + * + * @refcount: Number of references to this object, not + * including the hash entry. A reference to a base object can + * only be held by a ref object. + * + * @refcount_release: A function to be called when there are + * no more references to this object. This function should + * destroy the object (or make sure destruction eventually happens), + * and when it is called, the object has + * already been taken out of the per-device hash. The parameter + * "base" should be set to NULL by the function. + * + * @ref_obj_release: A function to be called when a reference object + * with another ttm_ref_type than TTM_REF_USAGE is deleted. + * this function may, for example, release a lock held by a user-space + * process. + * + * This struct is intended to be used as a base struct for objects that + * are visible to user-space. It provides a global name, race-safe + * access and refcounting, minimal access contol and hooks for unref actions. + */ + +struct ttm_base_object { + struct drm_hash_item hash; + enum ttm_object_type object_type; + bool shareable; + struct ttm_object_file *tfile; + struct kref refcount; + void (*refcount_release) (struct ttm_base_object ** base); + void (*ref_obj_release) (struct ttm_base_object * base, + enum ttm_ref_type ref_type); +}; + +/** + * ttm_base_object_init + * + * @tfile: Pointer to a struct ttm_object_file. + * @base: The struct ttm_base_object to initialize. + * @shareable: This object is shareable with other applcations. + * (different @tfile pointers.) + * @type: The object type. + * @refcount_release: See the struct ttm_base_object description. + * @ref_obj_release: See the struct ttm_base_object description. + * + * Initializes a struct ttm_base_object. + */ + +extern int ttm_base_object_init(struct ttm_object_file *tfile, + struct ttm_base_object *base, + bool shareable, + enum ttm_object_type type, + void (*refcount_release) (struct ttm_base_object + **), + void (*ref_obj_release) (struct ttm_base_object + *, + enum ttm_ref_type + ref_type)); + +/** + * ttm_base_object_lookup + * + * @tfile: Pointer to a struct ttm_object_file. + * @key: Hash key + * + * Looks up a struct ttm_base_object with the key @key. + * Also verifies that the object is visible to the application, by + * comparing the @tfile argument and checking the object shareable flag. + */ + +extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file + *tfile, uint32_t key); + +/** + * ttm_base_object_unref + * + * @p_base: Pointer to a pointer referncing a struct ttm_base_object. + * + * Decrements the base object refcount and clears the pointer pointed to by + * p_base. + */ + +extern void ttm_base_object_unref(struct ttm_base_object **p_base); + +/** + * ttm_ref_object_add. + * + * @tfile: A struct ttm_object_file representing the application owning the + * ref_object. + * @base: The base object to reference. + * @ref_type: The type of reference. + * @existed: Upon completion, indicates that an identical reference object + * already existed, and the refcount was upped on that object instead. + * + * Adding a ref object to a base object is basically like referencing the + * base object, but a user-space application holds the reference. When the + * file corresponding to @tfile is closed, all its reference objects are + * deleted. A reference object can have different types depending on what + * it's intended for. It can be refcounting to prevent object destruction, + * When user-space takes a lock, it can add a ref object to that lock to + * make sure the lock is released if the application dies. A ref object + * will hold a single reference on a base object. + */ +extern int ttm_ref_object_add(struct ttm_object_file *tfile, + struct ttm_base_object *base, + enum ttm_ref_type ref_type, bool *existed); +/** + * ttm_ref_object_base_unref + * + * @key: Key representing the base object. + * @ref_type: Ref type of the ref object to be dereferenced. + * + * Unreference a ref object with type @ref_type + * on the base object identified by @key. If there are no duplicate + * references, the ref object will be destroyed and the base object + * will be unreferenced. + */ +extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, + unsigned long key, + enum ttm_ref_type ref_type); + +/** + * ttm_object_file_init - initialize a struct ttm_object file + * + * @tdev: A struct ttm_object device this file is initialized on. + * @hash_order: Order of the hash table used to hold the reference objects. + * + * This is typically called by the file_ops::open function. + */ + +extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device + *tdev, + unsigned int hash_order); + +/** + * ttm_object_file_release - release data held by a ttm_object_file + * + * @p_tfile: Pointer to pointer to the ttm_object_file object to release. + * *p_tfile will be set to NULL by this function. + * + * Releases all data associated by a ttm_object_file. + * Typically called from file_ops::release. The caller must + * ensure that there are no concurrent users of tfile. + */ + +extern void ttm_object_file_release(struct ttm_object_file **p_tfile); + +/** + * ttm_object device init - initialize a struct ttm_object_device + * + * @hash_order: Order of hash table used to hash the base objects. + * + * This function is typically called on device initialization to prepare + * data structures needed for ttm base and ref objects. + */ + +extern struct ttm_object_device *ttm_object_device_init + (struct ttm_mem_global *mem_glob, unsigned int hash_order); + +/** + * ttm_object_device_release - release data held by a ttm_object_device + * + * @p_tdev: Pointer to pointer to the ttm_object_device object to release. + * *p_tdev will be set to NULL by this function. + * + * Releases all data associated by a ttm_object_device. + * Typically called from driver::unload before the destruction of the + * device private data structure. + */ + +extern void ttm_object_device_release(struct ttm_object_device **p_tdev); + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c new file mode 100644 index 0000000..701be0d --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c @@ -0,0 +1,178 @@ +/************************************************************************** + * + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "ttm/ttm_pat_compat.h" +#include +#include +#include +#include + +#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)) +#include +#include +#include +#include +#include + +#ifndef MSR_IA32_CR_PAT +#define MSR_IA32_CR_PAT 0x0277 +#endif + +#ifndef _PAGE_PAT +#define _PAGE_PAT 0x080 +#endif + +static int ttm_has_pat = 0; + +/* + * Used at resume-time when CPU-s are fired up. + */ + +static void ttm_pat_ipi_handler(void *notused) +{ + u32 v1, v2; + + rdmsr(MSR_IA32_CR_PAT, v1, v2); + v2 &= 0xFFFFFFF8; + v2 |= 0x00000001; + wbinvd(); + wrmsr(MSR_IA32_CR_PAT, v1, v2); + wbinvd(); + __flush_tlb_all(); +} + +static void ttm_pat_enable(void) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1, 1) != 0) { +#else + if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0) { +#endif + printk(KERN_ERR "Timed out setting up CPU PAT.\n"); + } +} + +void ttm_pat_resume(void) +{ + if (unlikely(!ttm_has_pat)) + return; + + ttm_pat_enable(); +} + +static int psb_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + if (action == CPU_ONLINE) { + ttm_pat_resume(); + } + + return 0; +} + +static struct notifier_block psb_nb = { + .notifier_call = psb_cpu_callback, + .priority = 1 +}; + +/* + * Set i386 PAT entry PAT4 to Write-combining memory type on all processors. + */ + +void ttm_pat_init(void) +{ + if (likely(ttm_has_pat)) + return; + + if (!boot_cpu_has(X86_FEATURE_PAT)) { + return; + } + + ttm_pat_enable(); + + if (num_present_cpus() > 1) + register_cpu_notifier(&psb_nb); + + ttm_has_pat = 1; +} + +void ttm_pat_takedown(void) +{ + if (unlikely(!ttm_has_pat)) + return; + + if (num_present_cpus() > 1) + unregister_cpu_notifier(&psb_nb); + + ttm_has_pat = 0; +} + +pgprot_t pgprot_ttm_x86_wc(pgprot_t prot) +{ + if (likely(ttm_has_pat)) { + pgprot_val(prot) |= _PAGE_PAT; + return prot; + } else { + return pgprot_noncached(prot); + } +} + +#else + +void ttm_pat_init(void) +{ +} + +void ttm_pat_takedown(void) +{ +} + +void ttm_pat_resume(void) +{ +} + +#ifdef CONFIG_X86 +#include + +pgprot_t pgprot_ttm_x86_wc(pgprot_t prot) +{ + uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS); + + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits); +} +#else +pgprot_t pgprot_ttm_x86_wc(pgprot_t prot) +{ + BUG(); +} +#endif +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h new file mode 100644 index 0000000..d767570 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h @@ -0,0 +1,41 @@ +/************************************************************************** + * + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_PAT_COMPAT_ +#define _TTM_PAT_COMPAT_ +#include +#include +extern void ttm_pat_init(void); +extern void ttm_pat_takedown(void); +extern void ttm_pat_resume(void); +extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot); +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h new file mode 100644 index 0000000..13f3861 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h @@ -0,0 +1,98 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_PL_COMMON_H_ +#define _TTM_PL_COMMON_H_ +/* + * Memory regions for data placement. + */ + +#define TTM_PL_SYSTEM 0 +#define TTM_PL_TT 1 +#define TTM_PL_VRAM 2 +#define TTM_PL_PRIV0 3 +#define TTM_PL_PRIV1 4 +#define TTM_PL_PRIV2 5 +#define TTM_PL_PRIV3 6 +#define TTM_PL_PRIV4 7 +#define TTM_PL_PRIV5 8 +#define TTM_PL_CI 9 +#define TTM_PL_RAR 10 +#define TTM_PL_SWAPPED 15 + +#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) +#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) +#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) +#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0) +#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1) +#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2) +#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3) +#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4) +#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5) +#define TTM_PL_FLAG_CI (1 << TTM_PL_CI) +#define TTM_PL_FLAG_RAR (1 << TTM_PL_RAR) +#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED) +#define TTM_PL_MASK_MEM 0x0000FFFF + +/* + * Other flags that affects data placement. + * TTM_PL_FLAG_CACHED indicates cache-coherent mappings + * if available. + * TTM_PL_FLAG_SHARED means that another application may + * reference the buffer. + * TTM_PL_FLAG_NO_EVICT means that the buffer may never + * be evicted to make room for other buffers. + */ + +#define TTM_PL_FLAG_CACHED (1 << 16) +#define TTM_PL_FLAG_UNCACHED (1 << 17) +#define TTM_PL_FLAG_WC (1 << 18) +#define TTM_PL_FLAG_SHARED (1 << 20) +#define TTM_PL_FLAG_NO_EVICT (1 << 21) + +#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ + TTM_PL_FLAG_UNCACHED | \ + TTM_PL_FLAG_WC) + +#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING) + +/* + * Access flags to be used for CPU- and GPU- mappings. + * The idea is that the TTM synchronization mechanism will + * allow concurrent READ access and exclusive write access. + * Currently GPU- and CPU accesses are exclusive. + */ + +#define TTM_ACCESS_READ (1 << 0) +#define TTM_ACCESS_WRITE (1 << 1) + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c new file mode 100644 index 0000000..68cbb08 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c @@ -0,0 +1,468 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "ttm/ttm_placement_user.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_object.h" +#include "ttm/ttm_userobj_api.h" +#include "ttm/ttm_lock.h" + +struct ttm_bo_user_object { + struct ttm_base_object base; + struct ttm_buffer_object bo; +}; + +static size_t pl_bo_size = 0; + +static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages) +{ + size_t page_array_size = + (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; + + if (unlikely(pl_bo_size == 0)) { + pl_bo_size = bdev->ttm_bo_extra_size + + ttm_round_pot(sizeof(struct ttm_bo_user_object)); + } + + return bdev->ttm_bo_size + 2 * page_array_size; +} + +static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file + *tfile, uint32_t handle) +{ + struct ttm_base_object *base; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL)) { + printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", + (unsigned long)handle); + return NULL; + } + + if (unlikely(base->object_type != ttm_buffer_type)) { + ttm_base_object_unref(&base); + printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", + (unsigned long)handle); + return NULL; + } + + return container_of(base, struct ttm_bo_user_object, base); +} + +struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file + *tfile, uint32_t handle) +{ + struct ttm_bo_user_object *user_bo; + struct ttm_base_object *base; + + user_bo = ttm_bo_user_lookup(tfile, handle); + if (unlikely(user_bo == NULL)) + return NULL; + + (void)ttm_bo_reference(&user_bo->bo); + base = &user_bo->base; + ttm_base_object_unref(&base); + return &user_bo->bo; +} + +static void ttm_bo_user_destroy(struct ttm_buffer_object *bo) +{ + struct ttm_bo_user_object *user_bo = + container_of(bo, struct ttm_bo_user_object, bo); + + ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false); + kfree(user_bo); +} + +static void ttm_bo_user_release(struct ttm_base_object **p_base) +{ + struct ttm_bo_user_object *user_bo; + struct ttm_base_object *base = *p_base; + struct ttm_buffer_object *bo; + + *p_base = NULL; + + if (unlikely(base == NULL)) + return; + + user_bo = container_of(base, struct ttm_bo_user_object, base); + bo = &user_bo->bo; + ttm_bo_unref(&bo); +} + +static void ttm_bo_user_ref_release(struct ttm_base_object *base, + enum ttm_ref_type ref_type) +{ + struct ttm_bo_user_object *user_bo = + container_of(base, struct ttm_bo_user_object, base); + struct ttm_buffer_object *bo = &user_bo->bo; + + switch (ref_type) { + case TTM_REF_SYNCCPU_WRITE: + ttm_bo_synccpu_write_release(bo); + break; + default: + BUG(); + } +} + +static void ttm_pl_fill_rep(struct ttm_buffer_object *bo, + struct ttm_pl_rep *rep) +{ + struct ttm_bo_user_object *user_bo = + container_of(bo, struct ttm_bo_user_object, bo); + + rep->gpu_offset = bo->offset; + rep->bo_size = bo->num_pages << PAGE_SHIFT; + rep->map_handle = bo->addr_space_offset; + rep->placement = bo->mem.flags; + rep->handle = user_bo->base.hash.key; + rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg; +} + +int ttm_pl_create_ioctl(struct ttm_object_file *tfile, + struct ttm_bo_device *bdev, + struct ttm_lock *lock, void *data) +{ + union ttm_pl_create_arg *arg = data; + struct ttm_pl_create_req *req = &arg->req; + struct ttm_pl_rep *rep = &arg->rep; + struct ttm_buffer_object *bo; + struct ttm_buffer_object *tmp; + struct ttm_bo_user_object *user_bo; + uint32_t flags; + int ret = 0; + struct ttm_mem_global *mem_glob = bdev->mem_glob; + size_t acc_size = + ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); + if (unlikely(ret != 0)) + return ret; + + flags = req->placement; + user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); + if (unlikely(user_bo == NULL)) { + ttm_mem_global_free(mem_glob, acc_size, false); + return -ENOMEM; + } + + bo = &user_bo->bo; + ret = ttm_read_lock(lock, true); + if (unlikely(ret != 0)) { + ttm_mem_global_free(mem_glob, acc_size, false); + kfree(user_bo); + return ret; + } + + ret = ttm_buffer_object_init(bdev, bo, req->size, + ttm_bo_type_device, flags, + req->page_alignment, 0, true, + NULL, acc_size, &ttm_bo_user_destroy); + ttm_read_unlock(lock); + + /* + * Note that the ttm_buffer_object_init function + * would've called the destroy function on failure!! + */ + + if (unlikely(ret != 0)) + goto out; + + tmp = ttm_bo_reference(bo); + ret = ttm_base_object_init(tfile, &user_bo->base, + flags & TTM_PL_FLAG_SHARED, + ttm_buffer_type, + &ttm_bo_user_release, + &ttm_bo_user_ref_release); + if (unlikely(ret != 0)) + goto out_err; + + mutex_lock(&bo->mutex); + ttm_pl_fill_rep(bo, rep); + mutex_unlock(&bo->mutex); + ttm_bo_unref(&bo); + out: + return 0; + out_err: + ttm_bo_unref(&tmp); + ttm_bo_unref(&bo); + return ret; +} + +int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, + struct ttm_bo_device *bdev, + struct ttm_lock *lock, void *data) +{ + union ttm_pl_create_ub_arg *arg = data; + struct ttm_pl_create_ub_req *req = &arg->req; + struct ttm_pl_rep *rep = &arg->rep; + struct ttm_buffer_object *bo; + struct ttm_buffer_object *tmp; + struct ttm_bo_user_object *user_bo; + uint32_t flags; + int ret = 0; + struct ttm_mem_global *mem_glob = bdev->mem_glob; + size_t acc_size = + ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); + if (unlikely(ret != 0)) + return ret; + + flags = req->placement; + user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); + if (unlikely(user_bo == NULL)) { + ttm_mem_global_free(mem_glob, acc_size, false); + return -ENOMEM; + } + ret = ttm_read_lock(lock, true); + if (unlikely(ret != 0)) { + ttm_mem_global_free(mem_glob, acc_size, false); + kfree(user_bo); + return ret; + } + bo = &user_bo->bo; + ret = ttm_buffer_object_init(bdev, bo, req->size, + ttm_bo_type_user, flags, + req->page_alignment, req->user_address, + true, NULL, acc_size, &ttm_bo_user_destroy); + + /* + * Note that the ttm_buffer_object_init function + * would've called the destroy function on failure!! + */ + ttm_read_unlock(lock); + if (unlikely(ret != 0)) + goto out; + + tmp = ttm_bo_reference(bo); + ret = ttm_base_object_init(tfile, &user_bo->base, + flags & TTM_PL_FLAG_SHARED, + ttm_buffer_type, + &ttm_bo_user_release, + &ttm_bo_user_ref_release); + if (unlikely(ret != 0)) + goto out_err; + + mutex_lock(&bo->mutex); + ttm_pl_fill_rep(bo, rep); + mutex_unlock(&bo->mutex); + ttm_bo_unref(&bo); + out: + return 0; + out_err: + ttm_bo_unref(&tmp); + ttm_bo_unref(&bo); + return ret; +} + +int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data) +{ + union ttm_pl_reference_arg *arg = data; + struct ttm_pl_rep *rep = &arg->rep; + struct ttm_bo_user_object *user_bo; + struct ttm_buffer_object *bo; + struct ttm_base_object *base; + int ret; + + user_bo = ttm_bo_user_lookup(tfile, arg->req.handle); + if (unlikely(user_bo == NULL)) { + printk(KERN_ERR "Could not reference buffer object.\n"); + return -EINVAL; + } + + bo = &user_bo->bo; + ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); + if (unlikely(ret != 0)) { + printk(KERN_ERR + "Could not add a reference to buffer object.\n"); + goto out; + } + + mutex_lock(&bo->mutex); + ttm_pl_fill_rep(bo, rep); + mutex_unlock(&bo->mutex); + + out: + base = &user_bo->base; + ttm_base_object_unref(&base); + return ret; +} + +int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data) +{ + struct ttm_pl_reference_req *arg = data; + + return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE); +} + +int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data) +{ + struct ttm_pl_synccpu_arg *arg = data; + struct ttm_bo_user_object *user_bo; + struct ttm_buffer_object *bo; + struct ttm_base_object *base; + bool existed; + int ret; + + switch (arg->op) { + case TTM_PL_SYNCCPU_OP_GRAB: + user_bo = ttm_bo_user_lookup(tfile, arg->handle); + if (unlikely(user_bo == NULL)) { + printk(KERN_ERR + "Could not find buffer object for synccpu.\n"); + return -EINVAL; + } + bo = &user_bo->bo; + base = &user_bo->base; + ret = ttm_bo_synccpu_write_grab(bo, + arg->access_mode & + TTM_PL_SYNCCPU_MODE_NO_BLOCK); + if (unlikely(ret != 0)) { + ttm_base_object_unref(&base); + goto out; + } + ret = ttm_ref_object_add(tfile, &user_bo->base, + TTM_REF_SYNCCPU_WRITE, &existed); + if (existed || ret != 0) + ttm_bo_synccpu_write_release(bo); + ttm_base_object_unref(&base); + break; + case TTM_PL_SYNCCPU_OP_RELEASE: + ret = ttm_ref_object_base_unref(tfile, arg->handle, + TTM_REF_SYNCCPU_WRITE); + break; + default: + ret = -EINVAL; + break; + } + out: + return ret; +} + +int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, + struct ttm_lock *lock, void *data) +{ + union ttm_pl_setstatus_arg *arg = data; + struct ttm_pl_setstatus_req *req = &arg->req; + struct ttm_pl_rep *rep = &arg->rep; + struct ttm_buffer_object *bo; + struct ttm_bo_device *bdev; + int ret; + + bo = ttm_buffer_object_lookup(tfile, req->handle); + if (unlikely(bo == NULL)) { + printk(KERN_ERR + "Could not find buffer object for setstatus.\n"); + return -EINVAL; + } + + bdev = bo->bdev; + + ret = ttm_read_lock(lock, true); + if (unlikely(ret != 0)) + goto out_err0; + + ret = ttm_bo_reserve(bo, true, false, false, 0); + if (unlikely(ret != 0)) + goto out_err1; + + ret = ttm_bo_wait_cpu(bo, false); + if (unlikely(ret != 0)) + goto out_err2; + + mutex_lock(&bo->mutex); + ret = ttm_bo_check_placement(bo, req->set_placement, + req->clr_placement); + if (unlikely(ret != 0)) + goto out_err2; + + bo->proposed_flags = (bo->proposed_flags | req->set_placement) + & ~req->clr_placement; + ret = ttm_buffer_object_validate(bo, true, false); + if (unlikely(ret != 0)) + goto out_err2; + + ttm_pl_fill_rep(bo, rep); + out_err2: + mutex_unlock(&bo->mutex); + ttm_bo_unreserve(bo); + out_err1: + ttm_read_unlock(lock); + out_err0: + ttm_bo_unref(&bo); + return ret; +} + +int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data) +{ + struct ttm_pl_waitidle_arg *arg = data; + struct ttm_buffer_object *bo; + int ret; + + bo = ttm_buffer_object_lookup(tfile, arg->handle); + if (unlikely(bo == NULL)) { + printk(KERN_ERR "Could not find buffer object for waitidle.\n"); + return -EINVAL; + } + + ret = + ttm_bo_block_reservation(bo, true, + arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK); + if (unlikely(ret != 0)) + goto out; + mutex_lock(&bo->mutex); + ret = ttm_bo_wait(bo, + arg->mode & TTM_PL_WAITIDLE_MODE_LAZY, + true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK); + mutex_unlock(&bo->mutex); + ttm_bo_unblock_reservation(bo); + out: + ttm_bo_unref(&bo); + return ret; +} + +int ttm_pl_verify_access(struct ttm_buffer_object *bo, + struct ttm_object_file *tfile) +{ + struct ttm_bo_user_object *ubo; + + /* + * Check bo subclass. + */ + + if (unlikely(bo->destroy != &ttm_bo_user_destroy)) + return -EPERM; + + ubo = container_of(bo, struct ttm_bo_user_object, bo); + if (likely(ubo->base.shareable || ubo->base.tfile == tfile)) + return 0; + + return -EPERM; +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h new file mode 100644 index 0000000..9f69cdc --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h @@ -0,0 +1,259 @@ +/************************************************************************** + * + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + **************************************************************************/ +/* + * Authors + * Thomas Hellström + */ + +#ifndef _TTM_PLACEMENT_USER_H_ +#define _TTM_PLACEMENT_USER_H_ + +#if !defined(__KERNEL__) && !defined(_KERNEL) +#include +#else +#include +#endif + +#include "ttm/ttm_placement_common.h" + +#define TTM_PLACEMENT_MAJOR 0 +#define TTM_PLACEMENT_MINOR 1 +#define TTM_PLACEMENT_PL 0 +#define TTM_PLACEMENT_DATE "080819" + +/** + * struct ttm_pl_create_req + * + * @size: The buffer object size. + * @placement: Flags that indicate initial acceptable + * placement. + * @page_alignment: Required alignment in pages. + * + * Input to the TTM_BO_CREATE ioctl. + */ + +struct ttm_pl_create_req { + uint64_t size; + uint32_t placement; + uint32_t page_alignment; +}; + +/** + * struct ttm_pl_create_ub_req + * + * @size: The buffer object size. + * @user_address: User-space address of the memory area that + * should be used to back the buffer object cast to 64-bit. + * @placement: Flags that indicate initial acceptable + * placement. + * @page_alignment: Required alignment in pages. + * + * Input to the TTM_BO_CREATE_UB ioctl. + */ + +struct ttm_pl_create_ub_req { + uint64_t size; + uint64_t user_address; + uint32_t placement; + uint32_t page_alignment; +}; + +/** + * struct ttm_pl_rep + * + * @gpu_offset: The current offset into the memory region used. + * This can be used directly by the GPU if there are no + * additional GPU mapping procedures used by the driver. + * + * @bo_size: Actual buffer object size. + * + * @map_handle: Offset into the device address space. + * Used for map, seek, read, write. This will never change + * during the lifetime of an object. + * + * @placement: Flag indicating the placement status of + * the buffer object using the TTM_PL flags above. + * + * @sync_object_arg: Used for user-space synchronization and + * depends on the synchronization model used. If fences are + * used, this is the buffer_object::fence_type_mask + * + * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and + * TTM_PL_SETSTATUS ioctls. + */ + +struct ttm_pl_rep { + uint64_t gpu_offset; + uint64_t bo_size; + uint64_t map_handle; + uint32_t placement; + uint32_t handle; + uint32_t sync_object_arg; + uint32_t pad64; +}; + +/** + * struct ttm_pl_setstatus_req + * + * @set_placement: Placement flags to set. + * + * @clr_placement: Placement flags to clear. + * + * @handle: The object handle + * + * Input to the TTM_PL_SETSTATUS ioctl. + */ + +struct ttm_pl_setstatus_req { + uint32_t set_placement; + uint32_t clr_placement; + uint32_t handle; + uint32_t pad64; +}; + +/** + * struct ttm_pl_reference_req + * + * @handle: The object to put a reference on. + * + * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls. + */ + +struct ttm_pl_reference_req { + uint32_t handle; + uint32_t pad64; +}; + +/* + * ACCESS mode flags for SYNCCPU. + * + * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not + * writing to the buffer. + * + * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not + * accessing the buffer. + * + * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait + * for GPU accesses to finish but return -EBUSY. + * + * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable + * memory while synchronized for CPU. + */ + +#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ +#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE +#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2) +#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3) + +/** + * struct ttm_pl_synccpu_arg + * + * @handle: The object to synchronize. + * + * @access_mode: access mode indicated by the + * TTM_SYNCCPU_MODE flags. + * + * @op: indicates whether to grab or release the + * buffer for cpu usage. + * + * Input to the TTM_PL_SYNCCPU ioctl. + */ + +struct ttm_pl_synccpu_arg { + uint32_t handle; + uint32_t access_mode; + enum { + TTM_PL_SYNCCPU_OP_GRAB, + TTM_PL_SYNCCPU_OP_RELEASE + } op; + uint32_t pad64; +}; + +/* + * Waiting mode flags for the TTM_BO_WAITIDLE ioctl. + * + * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling + * wait. + * + * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU, + * but return -EBUSY if the buffer is busy. + */ + +#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0) +#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1) + +/** + * struct ttm_waitidle_arg + * + * @handle: The object to synchronize. + * + * @mode: wait mode indicated by the + * TTM_SYNCCPU_MODE flags. + * + * Argument to the TTM_BO_WAITIDLE ioctl. + */ + +struct ttm_pl_waitidle_arg { + uint32_t handle; + uint32_t mode; +}; + +union ttm_pl_create_arg { + struct ttm_pl_create_req req; + struct ttm_pl_rep rep; +}; + +union ttm_pl_reference_arg { + struct ttm_pl_reference_req req; + struct ttm_pl_rep rep; +}; + +union ttm_pl_setstatus_arg { + struct ttm_pl_setstatus_req req; + struct ttm_pl_rep rep; +}; + +union ttm_pl_create_ub_arg { + struct ttm_pl_create_ub_req req; + struct ttm_pl_rep rep; +}; + +/* + * Ioctl offsets. + */ + +#define TTM_PL_CREATE 0x00 +#define TTM_PL_REFERENCE 0x01 +#define TTM_PL_UNREF 0x02 +#define TTM_PL_SYNCCPU 0x03 +#define TTM_PL_WAITIDLE 0x04 +#define TTM_PL_SETSTATUS 0x05 +#define TTM_PL_CREATE_UB 0x06 + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_regman.h b/drivers/gpu/drm/psb/ttm/ttm_regman.h new file mode 100644 index 0000000..5db5eda --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_regman.h @@ -0,0 +1,74 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_REGMAN_H_ +#define _TTM_REGMAN_H_ + +#include + +struct ttm_fence_object; + +struct ttm_reg { + struct list_head head; + struct ttm_fence_object *fence; + uint32_t fence_type; + uint32_t new_fence_type; +}; + +struct ttm_reg_manager { + struct list_head free; + struct list_head lru; + struct list_head unfenced; + + int (*reg_reusable)(const struct ttm_reg *reg, const void *data); + void (*reg_destroy)(struct ttm_reg *reg); +}; + +extern int ttm_regs_alloc(struct ttm_reg_manager *manager, + const void *data, + uint32_t fence_class, + uint32_t fence_type, + int interruptible, + int no_wait, + struct ttm_reg **reg); + +extern void ttm_regs_fence(struct ttm_reg_manager *regs, + struct ttm_fence_object *fence); + +extern void ttm_regs_free(struct ttm_reg_manager *manager); +extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg); +extern void ttm_regs_init(struct ttm_reg_manager *manager, + int (*reg_reusable)(const struct ttm_reg *, + const void *), + void (*reg_destroy)(struct ttm_reg *)); + +#endif diff --git a/drivers/gpu/drm/psb/ttm/ttm_tt.c b/drivers/gpu/drm/psb/ttm/ttm_tt.c new file mode 100644 index 0000000..5119aec --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_tt.c @@ -0,0 +1,655 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement_common.h" + +static int ttm_tt_swapin(struct ttm_tt *ttm); + +#if defined( CONFIG_X86 ) +static void ttm_tt_clflush_page(struct page *page) +{ + uint8_t *page_virtual; + unsigned int i; + + if (unlikely(page == NULL)) + return; + + page_virtual = kmap_atomic(page, KM_USER0); + + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + clflush(page_virtual + i); + + kunmap_atomic(page_virtual, KM_USER0); +} + +static void ttm_tt_cache_flush_clflush(struct page *pages[], + unsigned long num_pages) +{ + unsigned long i; + + mb(); + for (i = 0; i < num_pages; ++i) + ttm_tt_clflush_page(*pages++); + mb(); +} +#else +static void ttm_tt_ipi_handler(void *null) +{ + ; +} +#endif + +void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) +{ + +#if defined( CONFIG_X86 ) + if (cpu_has_clflush) { + ttm_tt_cache_flush_clflush(pages, num_pages); + return; + } +#else + if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0) + printk(KERN_ERR "Timed out waiting for drm cache flush.\n"); +#endif +} + +/** + * Allocates storage for pointers to the pages that back the ttm. + * + * Uses kmalloc if possible. Otherwise falls back to vmalloc. + */ +static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) +{ + unsigned long size = ttm->num_pages * sizeof(*ttm->pages); + ttm->pages = NULL; + + if (size <= PAGE_SIZE) + ttm->pages = kzalloc(size, GFP_KERNEL); + + if (!ttm->pages) { + ttm->pages = vmalloc_user(size); + if (ttm->pages) + ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; + } +} + +static void ttm_tt_free_page_directory(struct ttm_tt *ttm) +{ + if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { + vfree(ttm->pages); + ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; + } else { + kfree(ttm->pages); + } + ttm->pages = NULL; +} + +static struct page *ttm_tt_alloc_page(void) +{ + return alloc_page(GFP_KERNEL | __GFP_ZERO); +} + +static void ttm_tt_free_user_pages(struct ttm_tt *ttm) +{ + int write; + int dirty; + struct page *page; + int i; + struct ttm_backend *be = ttm->be; + + BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER)); + write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0); + dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0); + + if (be) + be->func->clear(be); + + for (i = 0; i < ttm->num_pages; ++i) { + page = ttm->pages[i]; + if (page == NULL) + continue; + + if (page == ttm->dummy_read_page) { + BUG_ON(write); + continue; + } + + if (write && dirty && !PageReserved(page)) + set_page_dirty_lock(page); + + ttm->pages[i] = NULL; + ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); + put_page(page); + } + ttm->state = tt_unpopulated; + ttm->first_himem_page = ttm->num_pages; + ttm->last_lomem_page = -1; +} + +static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) +{ + struct page *p; + struct ttm_bo_device *bdev = ttm->bdev; + struct ttm_mem_global *mem_glob = bdev->mem_glob; + int ret; + + while (NULL == (p = ttm->pages[index])) { + p = ttm_tt_alloc_page(); + + if (!p) + return NULL; + + if (PageHighMem(p)) { + ret = + ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, true); + if (unlikely(ret != 0)) + goto out_err; + ttm->pages[--ttm->first_himem_page] = p; + } else { + ret = + ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, false); + if (unlikely(ret != 0)) + goto out_err; + ttm->pages[++ttm->last_lomem_page] = p; + } + } + return p; + out_err: + put_page(p); + return NULL; +} + +struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) +{ + int ret; + + if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { + ret = ttm_tt_swapin(ttm); + if (unlikely(ret != 0)) + return NULL; + } + return __ttm_tt_get_page(ttm, index); +} + +int ttm_tt_populate(struct ttm_tt *ttm) +{ + struct page *page; + unsigned long i; + struct ttm_backend *be; + int ret; + + if (ttm->state != tt_unpopulated) + return 0; + + if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { + ret = ttm_tt_swapin(ttm); + if (unlikely(ret != 0)) + return ret; + } + + be = ttm->be; + + for (i = 0; i < ttm->num_pages; ++i) { + page = __ttm_tt_get_page(ttm, i); + if (!page) + return -ENOMEM; + } + + be->func->populate(be, ttm->num_pages, ttm->pages, + ttm->dummy_read_page); + ttm->state = tt_unbound; + return 0; +} + +#ifdef CONFIG_X86 +static inline int ttm_tt_set_page_caching(struct page *p, + enum ttm_caching_state c_state) +{ + if (PageHighMem(p)) + return 0; + + switch (c_state) { + case tt_cached: + return set_pages_wb(p, 1); + case tt_wc: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + return set_memory_wc((unsigned long) page_address(p), 1); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) */ + default: + return set_pages_uc(p, 1); + } +} +#else /* CONFIG_X86 */ +static inline int ttm_tt_set_page_caching(struct page *p, + enum ttm_caching_state c_state) +{ + return 0; +} +#endif /* CONFIG_X86 */ + +/* + * Change caching policy for the linear kernel map + * for range of pages in a ttm. + */ + +static int ttm_tt_set_caching(struct ttm_tt *ttm, + enum ttm_caching_state c_state) +{ + int i, j; + struct page *cur_page; + int ret; + + if (ttm->caching_state == c_state) + return 0; + + if (c_state != tt_cached) { + ret = ttm_tt_populate(ttm); + if (unlikely(ret != 0)) + return ret; + } + + if (ttm->caching_state == tt_cached) + ttm_tt_cache_flush(ttm->pages, ttm->num_pages); + + for (i = 0; i < ttm->num_pages; ++i) { + cur_page = ttm->pages[i]; + if (likely(cur_page != NULL)) { + ret = ttm_tt_set_page_caching(cur_page, c_state); + if (unlikely(ret != 0)) + goto out_err; + } + } + + ttm->caching_state = c_state; + + return 0; + + out_err: + for (j = 0; j < i; ++j) { + cur_page = ttm->pages[j]; + if (likely(cur_page != NULL)) { + (void)ttm_tt_set_page_caching(cur_page, + ttm->caching_state); + } + } + + return ret; +} + +int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) +{ + enum ttm_caching_state state; + + if (placement & TTM_PL_FLAG_WC) + state = tt_wc; + else if (placement & TTM_PL_FLAG_UNCACHED) + state = tt_uncached; + else + state = tt_cached; + + return ttm_tt_set_caching(ttm, state); +} + +static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) +{ + int i; + struct page *cur_page; + struct ttm_backend *be = ttm->be; + + if (be) + be->func->clear(be); + (void)ttm_tt_set_caching(ttm, tt_cached); + for (i = 0; i < ttm->num_pages; ++i) { + cur_page = ttm->pages[i]; + ttm->pages[i] = NULL; + if (cur_page) { + if (page_count(cur_page) != 1) + printk(KERN_ERR + "Erroneous page count. Leaking pages.\n"); + ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, + PageHighMem(cur_page)); + __free_page(cur_page); + } + } + ttm->state = tt_unpopulated; + ttm->first_himem_page = ttm->num_pages; + ttm->last_lomem_page = -1; +} + +void ttm_tt_destroy(struct ttm_tt *ttm) +{ + struct ttm_backend *be; + + if (unlikely(ttm == NULL)) + return; + + be = ttm->be; + if (likely(be != NULL)) { + be->func->destroy(be); + ttm->be = NULL; + } + + if (likely(ttm->pages != NULL)) { + if (ttm->page_flags & TTM_PAGE_FLAG_USER) + ttm_tt_free_user_pages(ttm); + else + ttm_tt_free_alloced_pages(ttm); + + ttm_tt_free_page_directory(ttm); + } + + if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) && + ttm->swap_storage) + fput(ttm->swap_storage); + + kfree(ttm); +} + +int ttm_tt_set_user(struct ttm_tt *ttm, + struct task_struct *tsk, + unsigned long start, unsigned long num_pages) +{ + struct mm_struct *mm = tsk->mm; + int ret; + int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; + struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob; + + BUG_ON(num_pages != ttm->num_pages); + BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); + + /** + * Account user pages as lowmem pages for now. + */ + + ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false, false); + if (unlikely(ret != 0)) + return ret; + + down_read(&mm->mmap_sem); + ret = get_user_pages(tsk, mm, start, num_pages, + write, 0, ttm->pages, NULL); + up_read(&mm->mmap_sem); + + if (ret != num_pages && write) { + ttm_tt_free_user_pages(ttm); + ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); + return -ENOMEM; + } + + ttm->tsk = tsk; + ttm->start = start; + ttm->state = tt_unbound; + + return 0; +} + +struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, + uint32_t page_flags, struct page *dummy_read_page) +{ + struct ttm_bo_driver *bo_driver = bdev->driver; + struct ttm_tt *ttm; + + if (!bo_driver) + return NULL; + + ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); + if (!ttm) + return NULL; + + ttm->bdev = bdev; + + ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + ttm->first_himem_page = ttm->num_pages; + ttm->last_lomem_page = -1; + ttm->caching_state = tt_cached; + ttm->page_flags = page_flags; + + ttm->dummy_read_page = dummy_read_page; + + ttm_tt_alloc_page_directory(ttm); + if (!ttm->pages) { + ttm_tt_destroy(ttm); + printk(KERN_ERR "Failed allocating page table\n"); + return NULL; + } + ttm->be = bo_driver->create_ttm_backend_entry(bdev); + if (!ttm->be) { + ttm_tt_destroy(ttm); + printk(KERN_ERR "Failed creating ttm backend entry\n"); + return NULL; + } + ttm->state = tt_unpopulated; + return ttm; +} + +/** + * ttm_tt_unbind: + * + * @ttm: the object to unbind from the graphics device + * + * Unbind an object from the aperture. This removes the mappings + * from the graphics device and flushes caches if necessary. + */ +void ttm_tt_unbind(struct ttm_tt *ttm) +{ + int ret; + struct ttm_backend *be = ttm->be; + + if (ttm->state == tt_bound) { + ret = be->func->unbind(be); + BUG_ON(ret); + } + ttm->state = tt_unbound; +} + +/** + * ttm_tt_bind: + * + * @ttm: the ttm object to bind to the graphics device + * + * @bo_mem: the aperture memory region which will hold the object + * + * Bind a ttm object to the aperture. This ensures that the necessary + * pages are allocated, flushes CPU caches as needed and marks the + * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been + * modified by the GPU + */ + +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +{ + int ret = 0; + struct ttm_backend *be; + + if (!ttm) + return -EINVAL; + + if (ttm->state == tt_bound) + return 0; + + be = ttm->be; + + ret = ttm_tt_populate(ttm); + if (ret) + return ret; + + ret = be->func->bind(be, bo_mem); + if (ret) { + printk(KERN_ERR "Couldn't bind backend.\n"); + return ret; + } + + ttm->state = tt_bound; + + if (ttm->page_flags & TTM_PAGE_FLAG_USER) + ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY; + return 0; +} + +static int ttm_tt_swapin(struct ttm_tt *ttm) +{ + struct address_space *swap_space; + struct file *swap_storage; + struct page *from_page; + struct page *to_page; + void *from_virtual; + void *to_virtual; + int i; + int ret; + + if (ttm->page_flags & TTM_PAGE_FLAG_USER) { + ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, + ttm->num_pages); + if (unlikely(ret != 0)) + return ret; + + ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; + return 0; + } + + swap_storage = ttm->swap_storage; + BUG_ON(swap_storage == NULL); + + swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; + + for (i = 0; i < ttm->num_pages; ++i) { + from_page = read_mapping_page(swap_space, i, NULL); + if (IS_ERR(from_page)) + goto out_err; + to_page = __ttm_tt_get_page(ttm, i); + if (unlikely(to_page == NULL)) + goto out_err; + + preempt_disable(); + from_virtual = kmap_atomic(from_page, KM_USER0); + to_virtual = kmap_atomic(to_page, KM_USER1); + memcpy(to_virtual, from_virtual, PAGE_SIZE); + kunmap_atomic(to_virtual, KM_USER1); + kunmap_atomic(from_virtual, KM_USER0); + preempt_enable(); + page_cache_release(from_page); + } + + if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) + fput(swap_storage); + ttm->swap_storage = NULL; + ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; + + return 0; + out_err: + ttm_tt_free_alloced_pages(ttm); + return -ENOMEM; +} + +int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) +{ + struct address_space *swap_space; + struct file *swap_storage; + struct page *from_page; + struct page *to_page; + void *from_virtual; + void *to_virtual; + int i; + + BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); + BUG_ON(ttm->caching_state != tt_cached); + + /* + * For user buffers, just unpin the pages, as there should be + * vma references. + */ + + if (ttm->page_flags & TTM_PAGE_FLAG_USER) { + ttm_tt_free_user_pages(ttm); + ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; + ttm->swap_storage = NULL; + return 0; + } + + if (!persistant_swap_storage) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) + swap_storage = shmem_file_setup("ttm swap", + ttm->num_pages << PAGE_SHIFT, + 0); + if (unlikely(IS_ERR(swap_storage))) { + printk(KERN_ERR "Failed allocating swap storage.\n"); + return -ENOMEM; + } +#else + return -ENOMEM; +#endif + } else + swap_storage = persistant_swap_storage; + + swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; + + for (i = 0; i < ttm->num_pages; ++i) { + from_page = ttm->pages[i]; + if (unlikely(from_page == NULL)) + continue; + to_page = read_mapping_page(swap_space, i, NULL); + if (unlikely(to_page == NULL)) + goto out_err; + + preempt_disable(); + from_virtual = kmap_atomic(from_page, KM_USER0); + to_virtual = kmap_atomic(to_page, KM_USER1); + memcpy(to_virtual, from_virtual, PAGE_SIZE); + kunmap_atomic(to_virtual, KM_USER1); + kunmap_atomic(from_virtual, KM_USER0); + preempt_enable(); + set_page_dirty(to_page); + mark_page_accessed(to_page); +// unlock_page(to_page); + page_cache_release(to_page); + } + + ttm_tt_free_alloced_pages(ttm); + ttm->swap_storage = swap_storage; + ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; + if (persistant_swap_storage) + ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; + + return 0; + out_err: + if (!persistant_swap_storage) + fput(swap_storage); + + return -ENOMEM; +} diff --git a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h new file mode 100644 index 0000000..5309050 --- /dev/null +++ b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h @@ -0,0 +1,79 @@ +/************************************************************************** + * + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_USEROBJ_API_H_ +#define _TTM_USEROBJ_API_H_ + +#include "ttm/ttm_placement_user.h" +#include "ttm/ttm_fence_user.h" +#include "ttm/ttm_object.h" +#include "ttm/ttm_fence_api.h" +#include "ttm/ttm_bo_api.h" + +struct ttm_lock; + +/* + * User ioctls. + */ + +extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile, + struct ttm_bo_device *bdev, + struct ttm_lock *lock, void *data); +extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, + struct ttm_bo_device *bdev, + struct ttm_lock *lock, void *data); +extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data); +extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data); +extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data); +extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, + struct ttm_lock *lock, void *data); +extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data); +extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data); +extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data); +extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data); + +extern int +ttm_fence_user_create(struct ttm_fence_device *fdev, + struct ttm_object_file *tfile, + uint32_t fence_class, + uint32_t fence_types, + uint32_t create_flags, + struct ttm_fence_object **fence, uint32_t * user_handle); + +extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file + *tfile, + uint32_t handle); + +extern int +ttm_pl_verify_access(struct ttm_buffer_object *bo, + struct ttm_object_file *tfile); +#endif diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5575b9a..9c0b919 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1101,6 +1109,8 @@ extern int drm_init(struct drm_driver *driver); extern void drm_exit(struct drm_driver *driver); extern int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern long drm_unlocked_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_lastclose(struct drm_device *dev); @@ -1514,5 +1524,25 @@ static __inline void drm_free_large(void *ptr) } /*@}*/ +enum drm_global_types { + DRM_GLOBAL_TTM_MEM = 0, + DRM_GLOBAL_TTM_BO, + DRM_GLOBAL_TTM_OBJECT, + DRM_GLOBAL_NUM +}; + +struct drm_global_reference { + enum drm_global_types global_type; + size_t size; + void *object; + int (*init) (struct drm_global_reference *); + void (*release) (struct drm_global_reference *); +}; + +extern void drm_global_init(void); +extern void drm_global_release(void); +extern int drm_global_item_ref(struct drm_global_reference *ref); +extern void drm_global_item_unref(struct drm_global_reference *ref); + #endif /* __KERNEL__ */ #endif diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index ae304cc..43a62a8 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -121,6 +121,7 @@ struct drm_mode_crtc { #define DRM_MODE_ENCODER_TMDS 2 #define DRM_MODE_ENCODER_LVDS 3 #define DRM_MODE_ENCODER_TVDAC 4 +#define DRM_MODE_ENCODER_MIPI 5 struct drm_mode_get_encoder { __u32 encoder_id; @@ -155,6 +156,7 @@ struct drm_mode_get_encoder { #define DRM_MODE_CONNECTOR_DisplayPort 10 #define DRM_MODE_CONNECTOR_HDMIA 11 #define DRM_MODE_CONNECTOR_HDMIB 12 +#define DRM_MODE_CONNECTOR_MIPI 13 struct drm_mode_get_connector { diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 79ca2da..00d7255 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -87,6 +87,9 @@ struct backlight_device { struct notifier_block fb_notif; struct device dev; + + /* Private Backlight Data */ + void *priv; }; static inline void backlight_update_status(struct backlight_device *bd) -- 1.6.0.6