1. 11 9月, 2014 3 次提交
    • S
      clk: Don't hold prepare_lock across debugfs creation · 6314b679
      Stephen Boyd 提交于
      Rob Clark reports a lockdep splat that involves the prepare_lock
      chained with the mmap semaphore.
      
      ======================================================
      [ INFO: possible circular locking dependency detected ]
      3.17.0-rc1-00050-g07a489b #802 Tainted: G        W
      -------------------------------------------------------
      Xorg.bin/5413 is trying to acquire lock:
       (prepare_lock){+.+.+.}, at: [<c0781280>] clk_prepare_lock+0x88/0xfc
      
      but task is already holding lock:
       (qcom_iommu_lock){+.+...}, at: [<c079f664>] qcom_iommu_unmap+0x1c/0x1f0
      
      which lock already depends on the new lock.
      
      the existing dependency chain (in reverse order) is:
      
      -> #4 (qcom_iommu_lock){+.+...}:
             [<c079f860>] qcom_iommu_map+0x28/0x450
             [<c079eb50>] iommu_map+0xc8/0x12c
             [<c056c1fc>] msm_iommu_map+0xb4/0x130
             [<c05697bc>] msm_gem_get_iova_locked+0x9c/0xe8
             [<c0569854>] msm_gem_get_iova+0x4c/0x64
             [<c0562208>] mdp4_kms_init+0x4c4/0x6c0
             [<c056881c>] msm_load+0x2ac/0x34c
             [<c0545724>] drm_dev_register+0xac/0x108
             [<c0547510>] drm_platform_init+0x50/0xf0
             [<c0578a60>] try_to_bring_up_master.part.3+0xc8/0x108
             [<c0578b48>] component_master_add_with_match+0xa8/0x104
             [<c0568294>] msm_pdev_probe+0x64/0x70
             [<c057e704>] platform_drv_probe+0x2c/0x60
             [<c057cff8>] driver_probe_device+0x108/0x234
             [<c057b65c>] bus_for_each_drv+0x64/0x98
             [<c057cec0>] device_attach+0x78/0x8c
             [<c057c590>] bus_probe_device+0x88/0xac
             [<c057c9b8>] deferred_probe_work_func+0x68/0x9c
             [<c0259db4>] process_one_work+0x1a0/0x40c
             [<c025a710>] worker_thread+0x44/0x4d8
             [<c025ec54>] kthread+0xd8/0xec
             [<c020e9a8>] ret_from_fork+0x14/0x2c
      
      -> #3 (&dev->struct_mutex){+.+.+.}:
             [<c0541188>] drm_gem_mmap+0x38/0xd0
             [<c05695b8>] msm_gem_mmap+0xc/0x5c
             [<c02f0b6c>] mmap_region+0x35c/0x6c8
             [<c02f11ec>] do_mmap_pgoff+0x314/0x398
             [<c02de1e0>] vm_mmap_pgoff+0x84/0xb4
             [<c02ef83c>] SyS_mmap_pgoff+0x94/0xbc
             [<c020e8e0>] ret_fast_syscall+0x0/0x48
      
      -> #2 (&mm->mmap_sem){++++++}:
             [<c0321138>] filldir64+0x68/0x180
             [<c0333fe0>] dcache_readdir+0x188/0x22c
             [<c0320ed0>] iterate_dir+0x9c/0x11c
             [<c03213b0>] SyS_getdents64+0x78/0xe8
             [<c020e8e0>] ret_fast_syscall+0x0/0x48
      
      -> #1 (&sb->s_type->i_mutex_key#3){+.+.+.}:
             [<c03fc544>] __create_file+0x58/0x1dc
             [<c03fc70c>] debugfs_create_dir+0x1c/0x24
             [<c0781c7c>] clk_debug_create_subtree+0x20/0x170
             [<c0be2af8>] clk_debug_init+0xec/0x14c
             [<c0208c70>] do_one_initcall+0x8c/0x1c8
             [<c0b9cce4>] kernel_init_freeable+0x13c/0x1dc
             [<c0877bc4>] kernel_init+0x8/0xe8
             [<c020e9a8>] ret_from_fork+0x14/0x2c
      
      -> #0 (prepare_lock){+.+.+.}:
             [<c087c408>] mutex_lock_nested+0x70/0x3e8
             [<c0781280>] clk_prepare_lock+0x88/0xfc
             [<c0782c50>] clk_prepare+0xc/0x24
             [<c079f474>] __enable_clocks.isra.4+0x18/0xa4
             [<c079f614>] __flush_iotlb_va+0xe0/0x114
             [<c079f6f4>] qcom_iommu_unmap+0xac/0x1f0
             [<c079ea3c>] iommu_unmap+0x9c/0xe8
             [<c056c2fc>] msm_iommu_unmap+0x64/0x84
             [<c0569da4>] msm_gem_free_object+0x11c/0x338
             [<c05413ec>] drm_gem_object_handle_unreference_unlocked+0xfc/0x130
             [<c0541604>] drm_gem_object_release_handle+0x50/0x68
             [<c0447a98>] idr_for_each+0xa8/0xdc
             [<c0541c10>] drm_gem_release+0x1c/0x28
             [<c0540b3c>] drm_release+0x370/0x428
             [<c031105c>] __fput+0x98/0x1e8
             [<c025d73c>] task_work_run+0xb0/0xfc
             [<c02477ec>] do_exit+0x2ec/0x948
             [<c0247ec0>] do_group_exit+0x4c/0xb8
             [<c025180c>] get_signal+0x28c/0x6ac
             [<c0211204>] do_signal+0xc4/0x3e4
             [<c02116cc>] do_work_pending+0xb4/0xc4
             [<c020e938>] work_pending+0xc/0x20
      
      other info that might help us debug this:
      
      Chain exists of:
        prepare_lock --> &dev->struct_mutex --> qcom_iommu_lock
      
       Possible unsafe locking scenario:
      
             CPU0                    CPU1
             ----                    ----
        lock(qcom_iommu_lock);
                                     lock(&dev->struct_mutex);
                                     lock(qcom_iommu_lock);
        lock(prepare_lock);
      
       *** DEADLOCK ***
      
      3 locks held by Xorg.bin/5413:
       #0:  (drm_global_mutex){+.+.+.}, at: [<c0540800>] drm_release+0x34/0x428
       #1:  (&dev->struct_mutex){+.+.+.}, at: [<c05413bc>] drm_gem_object_handle_unreference_unlocked+0xcc/0x130
       #2:  (qcom_iommu_lock){+.+...}, at: [<c079f664>] qcom_iommu_unmap+0x1c/0x1f0
      
      stack backtrace:
      CPU: 1 PID: 5413 Comm: Xorg.bin Tainted: G        W      3.17.0-rc1-00050-g07a489b #802
      [<c0216290>] (unwind_backtrace) from [<c0211d8c>] (show_stack+0x10/0x14)
      [<c0211d8c>] (show_stack) from [<c087a078>] (dump_stack+0x98/0xb8)
      [<c087a078>] (dump_stack) from [<c027f024>] (print_circular_bug+0x218/0x340)
      [<c027f024>] (print_circular_bug) from [<c0283e08>] (__lock_acquire+0x1d24/0x20b8)
      [<c0283e08>] (__lock_acquire) from [<c0284774>] (lock_acquire+0x9c/0xbc)
      [<c0284774>] (lock_acquire) from [<c087c408>] (mutex_lock_nested+0x70/0x3e8)
      [<c087c408>] (mutex_lock_nested) from [<c0781280>] (clk_prepare_lock+0x88/0xfc)
      [<c0781280>] (clk_prepare_lock) from [<c0782c50>] (clk_prepare+0xc/0x24)
      [<c0782c50>] (clk_prepare) from [<c079f474>] (__enable_clocks.isra.4+0x18/0xa4)
      [<c079f474>] (__enable_clocks.isra.4) from [<c079f614>] (__flush_iotlb_va+0xe0/0x114)
      [<c079f614>] (__flush_iotlb_va) from [<c079f6f4>] (qcom_iommu_unmap+0xac/0x1f0)
      [<c079f6f4>] (qcom_iommu_unmap) from [<c079ea3c>] (iommu_unmap+0x9c/0xe8)
      [<c079ea3c>] (iommu_unmap) from [<c056c2fc>] (msm_iommu_unmap+0x64/0x84)
      [<c056c2fc>] (msm_iommu_unmap) from [<c0569da4>] (msm_gem_free_object+0x11c/0x338)
      [<c0569da4>] (msm_gem_free_object) from [<c05413ec>] (drm_gem_object_handle_unreference_unlocked+0xfc/0x130)
      [<c05413ec>] (drm_gem_object_handle_unreference_unlocked) from [<c0541604>] (drm_gem_object_release_handle+0x50/0x68)
      [<c0541604>] (drm_gem_object_release_handle) from [<c0447a98>] (idr_for_each+0xa8/0xdc)
      [<c0447a98>] (idr_for_each) from [<c0541c10>] (drm_gem_release+0x1c/0x28)
      [<c0541c10>] (drm_gem_release) from [<c0540b3c>] (drm_release+0x370/0x428)
      [<c0540b3c>] (drm_release) from [<c031105c>] (__fput+0x98/0x1e8)
      [<c031105c>] (__fput) from [<c025d73c>] (task_work_run+0xb0/0xfc)
      [<c025d73c>] (task_work_run) from [<c02477ec>] (do_exit+0x2ec/0x948)
      [<c02477ec>] (do_exit) from [<c0247ec0>] (do_group_exit+0x4c/0xb8)
      [<c0247ec0>] (do_group_exit) from [<c025180c>] (get_signal+0x28c/0x6ac)
      [<c025180c>] (get_signal) from [<c0211204>] (do_signal+0xc4/0x3e4)
      [<c0211204>] (do_signal) from [<c02116cc>] (do_work_pending+0xb4/0xc4)
      [<c02116cc>] (do_work_pending) from [<c020e938>] (work_pending+0xc/0x20)
      
      We can break this chain if we don't hold the prepare_lock while
      creating debugfs directories. We only hold the prepare_lock right
      now because we're traversing the clock tree recursively and we
      don't want the hierarchy to change during the traversal.
      Replacing this traversal with a simple linked list walk allows us
      to only grab a list lock instead of the prepare_lock, thus
      breaking the lock chain.
      Signed-off-by: NStephen Boyd <sboyd@codeaurora.org>
      Signed-off-by: NMike Turquette <mturquette@linaro.org>
      6314b679
    • H
      clk: rockchip: also protect hclk_peri as critical · 2fed71e5
      Heiko Stübner 提交于
      The dwc2 usb controller also uses agressive clock gating, which in this
      case leads to hclk_peri getting disabled and hanging the system.
      Therefore move it to the critical clocks until we also control that
      part of the system.
      Signed-off-by: NHeiko Stuebner <heiko@sntech.de>
      Signed-off-by: NMike Turquette <mturquette@linaro.org>
      2fed71e5
    • H
      clk: fractional-divider: cast parent_rate to u64 before multiplying · feaefa0e
      Heiko Stübner 提交于
      On 32bit architectures, like ARM calculating the fractional rate will
      do the multiplication before converting the value to u64 when it gets
      assigned to ret, which can produce overflows.
      
      The error in question happened with a parent_rate of 386MHz, m = 3000,
      n = 60000, which resulted in a wrong rate value of 15812Hz.
      
      Therefore cast parent_rate to u64 to make sure the multiplication
      happens in a 64bit space and produces the correct 192MHz in the example.
      Signed-off-by: NHeiko Stuebner <heiko@sntech.de>
      Signed-off-by: NMike Turquette <mturquette@linaro.org>
      feaefa0e
  2. 10 9月, 2014 8 次提交
  3. 04 9月, 2014 2 次提交
  4. 03 9月, 2014 12 次提交
  5. 02 9月, 2014 2 次提交
    • M
      clk: mvebu: powersave clock is a multiplexer · e8e8a9b0
      Mike Turquette 提交于
      Kirkwood is unique among the mvebu SoCs for having a clock multiplexer
      that feeds into the cpu. This multiplexer can select either the cpu pll
      or the ddr clock as its input signal, allowing for a choice between
      performance and power savings.
      
      This patch introduces the code needed to register the clock multiplexer
      on Kirkwood SoCs but does not include the clock data to actually
      register the clock. That will be done in a follow-up patch which is
      necessary to prevent breaking git bisect.
      
      Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
      Tested-by: NAndrew Lunn <andrew@lunn.ch>
      Signed-off-by: NMike Turquette <mturquette@linaro.org>
      e8e8a9b0
    • M
      clk: mvebu: share locks between gate clocks · 87e39216
      Mike Turquette 提交于
      Refactor mvebu_clk_gating_setup() to use a common spinlock instead of a
      unique lock for every instance of a struct clk_gating_ctrl object. This
      will be used later for a separate mux clock type that shares a register
      with gate clock types and needs to use the same lock to protect access
      to the register.
      
      Cc: Andrew Lunn <andrew@lunn.ch>
      Tested-by: NAndrew Lunn <andrew@lunn.ch>
      Signed-off-by: NMike Turquette <mturquette@linaro.org>
      87e39216
  6. 31 8月, 2014 1 次提交
  7. 30 8月, 2014 2 次提交
  8. 29 8月, 2014 3 次提交
    • J
      x86, irq, PCI: Keep IRQ assignment for runtime power management · 9eabc99a
      Jiang Liu 提交于
      Now IOAPIC driver dynamically allocates IRQ numbers for IOAPIC pins.
      We need to keep IRQ assignment for PCI devices during runtime power
      management, otherwise it may cause failure of device wakeups.
      
      Commit 3eec5952 "x86, irq, PCI: Keep IRQ assignment for PCI
      devices during suspend/hibernation" has fixed the issue for suspend/
      hibernation, we also need the same fix for runtime device sleep too.
      
      Fix: https://bugzilla.kernel.org/show_bug.cgi?id=83271Reported-and-Tested-by: NEmanueL Czirai <amanual@openmailbox.org>
      Signed-off-by: NJiang Liu <jiang.liu@linux.intel.com>
      Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
      Cc: Tony Luck <tony.luck@intel.com>
      Cc: Joerg Roedel <joro@8bytes.org>
      Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
      Cc: EmanueL Czirai <amanual@openmailbox.org>
      Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
      Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
      Cc: Bjorn Helgaas <bhelgaas@google.com>
      Cc: Randy Dunlap <rdunlap@infradead.org>
      Cc: Yinghai Lu <yinghai@kernel.org>
      Cc: Borislav Petkov <bp@alien8.de>
      Cc: Grant Likely <grant.likely@linaro.org>
      Link: http://lkml.kernel.org/r/1409304383-18806-1-git-send-email-jiang.liu@linux.intel.comSigned-off-by: NThomas Gleixner <tglx@linutronix.de>
      9eabc99a
    • D
      spi/rockchip: Avoid accidentally turning off the clock · 5d1d150d
      Doug Anderson 提交于
      If our client is requesting a clock that is above the maximum clock
      then the following division will result in 0:
        rs->max_freq / rs->speed
      
      We'll then program 0 into the SPI_BAUDR register.  The Rockchip TRM
      says: "If the value is 0, the serial output clock (sclk_out) is
      disabled."
      
      It's much better to end up with the fastest possible clock rather than
      a clock that is off, so enforce a minimum value.
      Signed-off-by: NDoug Anderson <dianders@chromium.org>
      Signed-off-by: NMark Brown <broonie@kernel.org>
      5d1d150d
    • M
      dm crypt: fix access beyond the end of allocated space · d49ec52f
      Mikulas Patocka 提交于
      The DM crypt target accesses memory beyond allocated space resulting in
      a crash on 32 bit x86 systems.
      
      This bug is very old (it dates back to 2.6.25 commit 3a7f6c99 "dm
      crypt: use async crypto").  However, this bug was masked by the fact
      that kmalloc rounds the size up to the next power of two.  This bug
      wasn't exposed until 3.17-rc1 commit 298a9fa0 ("dm crypt: use per-bio
      data").  By switching to using per-bio data there was no longer any
      padding beyond the end of a dm-crypt allocated memory block.
      
      To minimize allocation overhead dm-crypt puts several structures into one
      block allocated with kmalloc.  The block holds struct ablkcipher_request,
      cipher-specific scratch pad (crypto_ablkcipher_reqsize(any_tfm(cc))),
      struct dm_crypt_request and an initialization vector.
      
      The variable dmreq_start is set to offset of struct dm_crypt_request
      within this memory block.  dm-crypt allocates the block with this size:
      cc->dmreq_start + sizeof(struct dm_crypt_request) + cc->iv_size.
      
      When accessing the initialization vector, dm-crypt uses the function
      iv_of_dmreq, which performs this calculation: ALIGN((unsigned long)(dmreq
      + 1), crypto_ablkcipher_alignmask(any_tfm(cc)) + 1).
      
      dm-crypt allocated "cc->iv_size" bytes beyond the end of dm_crypt_request
      structure.  However, when dm-crypt accesses the initialization vector, it
      takes a pointer to the end of dm_crypt_request, aligns it, and then uses
      it as the initialization vector.  If the end of dm_crypt_request is not
      aligned on a crypto_ablkcipher_alignmask(any_tfm(cc)) boundary the
      alignment causes the initialization vector to point beyond the allocated
      space.
      
      Fix this bug by calculating the variable iv_size_padding and adding it
      to the allocated size.
      
      Also correct the alignment of dm_crypt_request.  struct dm_crypt_request
      is specific to dm-crypt (it isn't used by the crypto subsystem at all),
      so it is aligned on __alignof__(struct dm_crypt_request).
      
      Also align per_bio_data_size on ARCH_KMALLOC_MINALIGN, so that it is
      aligned as if the block was allocated with kmalloc.
      Reported-by: NKrzysztof Kolasa <kkolasa@winsoft.pl>
      Tested-by: NMilan Broz <gmazyland@gmail.com>
      Signed-off-by: NMikulas Patocka <mpatocka@redhat.com>
      Signed-off-by: NMike Snitzer <snitzer@redhat.com>
      d49ec52f
  9. 28 8月, 2014 7 次提交