OSDN Git Service

Merge tag 'drm-intel-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-intel...
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include <drm/drm_print.h>
33
34 #include "i915_drv.h"
35 #include "i915_irq.h"
36 #include "intel_cdclk.h"
37 #include "intel_combo_phy.h"
38 #include "intel_crt.h"
39 #include "intel_csr.h"
40 #include "intel_dp.h"
41 #include "intel_dpio_phy.h"
42 #include "intel_drv.h"
43 #include "intel_hotplug.h"
44 #include "intel_sideband.h"
45
46 /**
47  * DOC: runtime pm
48  *
49  * The i915 driver supports dynamic enabling and disabling of entire hardware
50  * blocks at runtime. This is especially important on the display side where
51  * software is supposed to control many power gates manually on recent hardware,
52  * since on the GT side a lot of the power management is done by the hardware.
53  * But even there some manual control at the device level is required.
54  *
55  * Since i915 supports a diverse set of platforms with a unified codebase and
56  * hardware engineers just love to shuffle functionality around between power
57  * domains there's a sizeable amount of indirection required. This file provides
58  * generic functions to the driver for grabbing and releasing references for
59  * abstract power domains. It then maps those to the actual power wells
60  * present for a given platform.
61  */
62
63 static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915);
64 static void
65 __intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref,
66                        bool wakelock);
67
68 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
69 static void
70 intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref);
71 #else
72 static inline void intel_runtime_pm_put_raw(struct drm_i915_private *i915,
73                                             intel_wakeref_t wref)
74 {
75         __intel_runtime_pm_put(i915, -1, false);
76 }
77 #endif
78
79 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
80
81 #include <linux/sort.h>
82
83 #define STACKDEPTH 8
84
85 static noinline depot_stack_handle_t __save_depot_stack(void)
86 {
87         unsigned long entries[STACKDEPTH];
88         unsigned int n;
89
90         n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
91         return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
92 }
93
94 static void __print_depot_stack(depot_stack_handle_t stack,
95                                 char *buf, int sz, int indent)
96 {
97         unsigned long *entries;
98         unsigned int nr_entries;
99
100         nr_entries = stack_depot_fetch(stack, &entries);
101         stack_trace_snprint(buf, sz, entries, nr_entries, indent);
102 }
103
104 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
105 {
106         struct i915_runtime_pm *rpm = &i915->runtime_pm;
107
108         spin_lock_init(&rpm->debug.lock);
109 }
110
111 static noinline depot_stack_handle_t
112 track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
113 {
114         struct i915_runtime_pm *rpm = &i915->runtime_pm;
115         depot_stack_handle_t stack, *stacks;
116         unsigned long flags;
117
118         if (!HAS_RUNTIME_PM(i915))
119                 return -1;
120
121         stack = __save_depot_stack();
122         if (!stack)
123                 return -1;
124
125         spin_lock_irqsave(&rpm->debug.lock, flags);
126
127         if (!rpm->debug.count)
128                 rpm->debug.last_acquire = stack;
129
130         stacks = krealloc(rpm->debug.owners,
131                           (rpm->debug.count + 1) * sizeof(*stacks),
132                           GFP_NOWAIT | __GFP_NOWARN);
133         if (stacks) {
134                 stacks[rpm->debug.count++] = stack;
135                 rpm->debug.owners = stacks;
136         } else {
137                 stack = -1;
138         }
139
140         spin_unlock_irqrestore(&rpm->debug.lock, flags);
141
142         return stack;
143 }
144
145 static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
146                                              depot_stack_handle_t stack)
147 {
148         struct i915_runtime_pm *rpm = &i915->runtime_pm;
149         unsigned long flags, n;
150         bool found = false;
151
152         if (unlikely(stack == -1))
153                 return;
154
155         spin_lock_irqsave(&rpm->debug.lock, flags);
156         for (n = rpm->debug.count; n--; ) {
157                 if (rpm->debug.owners[n] == stack) {
158                         memmove(rpm->debug.owners + n,
159                                 rpm->debug.owners + n + 1,
160                                 (--rpm->debug.count - n) * sizeof(stack));
161                         found = true;
162                         break;
163                 }
164         }
165         spin_unlock_irqrestore(&rpm->debug.lock, flags);
166
167         if (WARN(!found,
168                  "Unmatched wakeref (tracking %lu), count %u\n",
169                  rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
170                 char *buf;
171
172                 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
173                 if (!buf)
174                         return;
175
176                 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
177                 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
178
179                 stack = READ_ONCE(rpm->debug.last_release);
180                 if (stack) {
181                         __print_depot_stack(stack, buf, PAGE_SIZE, 2);
182                         DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
183                 }
184
185                 kfree(buf);
186         }
187 }
188
189 static int cmphandle(const void *_a, const void *_b)
190 {
191         const depot_stack_handle_t * const a = _a, * const b = _b;
192
193         if (*a < *b)
194                 return -1;
195         else if (*a > *b)
196                 return 1;
197         else
198                 return 0;
199 }
200
201 static void
202 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
203                                  const struct intel_runtime_pm_debug *dbg)
204 {
205         unsigned long i;
206         char *buf;
207
208         buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
209         if (!buf)
210                 return;
211
212         if (dbg->last_acquire) {
213                 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
214                 drm_printf(p, "Wakeref last acquired:\n%s", buf);
215         }
216
217         if (dbg->last_release) {
218                 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
219                 drm_printf(p, "Wakeref last released:\n%s", buf);
220         }
221
222         drm_printf(p, "Wakeref count: %lu\n", dbg->count);
223
224         sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
225
226         for (i = 0; i < dbg->count; i++) {
227                 depot_stack_handle_t stack = dbg->owners[i];
228                 unsigned long rep;
229
230                 rep = 1;
231                 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
232                         rep++, i++;
233                 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
234                 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
235         }
236
237         kfree(buf);
238 }
239
240 static noinline void
241 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
242                        struct intel_runtime_pm_debug *saved)
243 {
244         *saved = *debug;
245
246         debug->owners = NULL;
247         debug->count = 0;
248         debug->last_release = __save_depot_stack();
249 }
250
251 static void
252 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
253 {
254         struct drm_printer p;
255
256         if (!debug->count)
257                 return;
258
259         p = drm_debug_printer("i915");
260         __print_intel_runtime_pm_wakeref(&p, debug);
261
262         kfree(debug->owners);
263 }
264
265 static noinline void
266 __intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
267 {
268         struct i915_runtime_pm *rpm = &i915->runtime_pm;
269         struct intel_runtime_pm_debug dbg = {};
270         unsigned long flags;
271
272         if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
273                                          &rpm->debug.lock,
274                                          flags))
275                 return;
276
277         __untrack_all_wakerefs(&rpm->debug, &dbg);
278         spin_unlock_irqrestore(&rpm->debug.lock, flags);
279
280         dump_and_free_wakeref_tracking(&dbg);
281 }
282
283 static noinline void
284 untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
285 {
286         struct i915_runtime_pm *rpm = &i915->runtime_pm;
287         struct intel_runtime_pm_debug dbg = {};
288         unsigned long flags;
289
290         spin_lock_irqsave(&rpm->debug.lock, flags);
291         __untrack_all_wakerefs(&rpm->debug, &dbg);
292         spin_unlock_irqrestore(&rpm->debug.lock, flags);
293
294         dump_and_free_wakeref_tracking(&dbg);
295 }
296
297 void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
298                                     struct drm_printer *p)
299 {
300         struct intel_runtime_pm_debug dbg = {};
301
302         do {
303                 struct i915_runtime_pm *rpm = &i915->runtime_pm;
304                 unsigned long alloc = dbg.count;
305                 depot_stack_handle_t *s;
306
307                 spin_lock_irq(&rpm->debug.lock);
308                 dbg.count = rpm->debug.count;
309                 if (dbg.count <= alloc) {
310                         memcpy(dbg.owners,
311                                rpm->debug.owners,
312                                dbg.count * sizeof(*s));
313                 }
314                 dbg.last_acquire = rpm->debug.last_acquire;
315                 dbg.last_release = rpm->debug.last_release;
316                 spin_unlock_irq(&rpm->debug.lock);
317                 if (dbg.count <= alloc)
318                         break;
319
320                 s = krealloc(dbg.owners,
321                              dbg.count * sizeof(*s),
322                              GFP_NOWAIT | __GFP_NOWARN);
323                 if (!s)
324                         goto out;
325
326                 dbg.owners = s;
327         } while (1);
328
329         __print_intel_runtime_pm_wakeref(p, &dbg);
330
331 out:
332         kfree(dbg.owners);
333 }
334
335 #else
336
337 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
338 {
339 }
340
341 static depot_stack_handle_t
342 track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
343 {
344         return -1;
345 }
346
347 static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
348                                              intel_wakeref_t wref)
349 {
350 }
351
352 static void
353 __intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
354 {
355         atomic_dec(&i915->runtime_pm.wakeref_count);
356 }
357
358 static void
359 untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
360 {
361 }
362
363 #endif
364
365 static void
366 intel_runtime_pm_acquire(struct drm_i915_private *i915, bool wakelock)
367 {
368         struct i915_runtime_pm *rpm = &i915->runtime_pm;
369
370         if (wakelock) {
371                 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
372                 assert_rpm_wakelock_held(i915);
373         } else {
374                 atomic_inc(&rpm->wakeref_count);
375                 assert_rpm_raw_wakeref_held(i915);
376         }
377 }
378
379 static void
380 intel_runtime_pm_release(struct drm_i915_private *i915, int wakelock)
381 {
382         struct i915_runtime_pm *rpm = &i915->runtime_pm;
383
384         if (wakelock) {
385                 assert_rpm_wakelock_held(i915);
386                 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
387         } else {
388                 assert_rpm_raw_wakeref_held(i915);
389         }
390
391         __intel_wakeref_dec_and_check_tracking(i915);
392 }
393
394 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
395                                          enum i915_power_well_id power_well_id);
396
397 const char *
398 intel_display_power_domain_str(enum intel_display_power_domain domain)
399 {
400         switch (domain) {
401         case POWER_DOMAIN_DISPLAY_CORE:
402                 return "DISPLAY_CORE";
403         case POWER_DOMAIN_PIPE_A:
404                 return "PIPE_A";
405         case POWER_DOMAIN_PIPE_B:
406                 return "PIPE_B";
407         case POWER_DOMAIN_PIPE_C:
408                 return "PIPE_C";
409         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
410                 return "PIPE_A_PANEL_FITTER";
411         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
412                 return "PIPE_B_PANEL_FITTER";
413         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
414                 return "PIPE_C_PANEL_FITTER";
415         case POWER_DOMAIN_TRANSCODER_A:
416                 return "TRANSCODER_A";
417         case POWER_DOMAIN_TRANSCODER_B:
418                 return "TRANSCODER_B";
419         case POWER_DOMAIN_TRANSCODER_C:
420                 return "TRANSCODER_C";
421         case POWER_DOMAIN_TRANSCODER_EDP:
422                 return "TRANSCODER_EDP";
423         case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
424                 return "TRANSCODER_EDP_VDSC";
425         case POWER_DOMAIN_TRANSCODER_DSI_A:
426                 return "TRANSCODER_DSI_A";
427         case POWER_DOMAIN_TRANSCODER_DSI_C:
428                 return "TRANSCODER_DSI_C";
429         case POWER_DOMAIN_PORT_DDI_A_LANES:
430                 return "PORT_DDI_A_LANES";
431         case POWER_DOMAIN_PORT_DDI_B_LANES:
432                 return "PORT_DDI_B_LANES";
433         case POWER_DOMAIN_PORT_DDI_C_LANES:
434                 return "PORT_DDI_C_LANES";
435         case POWER_DOMAIN_PORT_DDI_D_LANES:
436                 return "PORT_DDI_D_LANES";
437         case POWER_DOMAIN_PORT_DDI_E_LANES:
438                 return "PORT_DDI_E_LANES";
439         case POWER_DOMAIN_PORT_DDI_F_LANES:
440                 return "PORT_DDI_F_LANES";
441         case POWER_DOMAIN_PORT_DDI_A_IO:
442                 return "PORT_DDI_A_IO";
443         case POWER_DOMAIN_PORT_DDI_B_IO:
444                 return "PORT_DDI_B_IO";
445         case POWER_DOMAIN_PORT_DDI_C_IO:
446                 return "PORT_DDI_C_IO";
447         case POWER_DOMAIN_PORT_DDI_D_IO:
448                 return "PORT_DDI_D_IO";
449         case POWER_DOMAIN_PORT_DDI_E_IO:
450                 return "PORT_DDI_E_IO";
451         case POWER_DOMAIN_PORT_DDI_F_IO:
452                 return "PORT_DDI_F_IO";
453         case POWER_DOMAIN_PORT_DSI:
454                 return "PORT_DSI";
455         case POWER_DOMAIN_PORT_CRT:
456                 return "PORT_CRT";
457         case POWER_DOMAIN_PORT_OTHER:
458                 return "PORT_OTHER";
459         case POWER_DOMAIN_VGA:
460                 return "VGA";
461         case POWER_DOMAIN_AUDIO:
462                 return "AUDIO";
463         case POWER_DOMAIN_AUX_A:
464                 return "AUX_A";
465         case POWER_DOMAIN_AUX_B:
466                 return "AUX_B";
467         case POWER_DOMAIN_AUX_C:
468                 return "AUX_C";
469         case POWER_DOMAIN_AUX_D:
470                 return "AUX_D";
471         case POWER_DOMAIN_AUX_E:
472                 return "AUX_E";
473         case POWER_DOMAIN_AUX_F:
474                 return "AUX_F";
475         case POWER_DOMAIN_AUX_IO_A:
476                 return "AUX_IO_A";
477         case POWER_DOMAIN_AUX_TBT1:
478                 return "AUX_TBT1";
479         case POWER_DOMAIN_AUX_TBT2:
480                 return "AUX_TBT2";
481         case POWER_DOMAIN_AUX_TBT3:
482                 return "AUX_TBT3";
483         case POWER_DOMAIN_AUX_TBT4:
484                 return "AUX_TBT4";
485         case POWER_DOMAIN_GMBUS:
486                 return "GMBUS";
487         case POWER_DOMAIN_INIT:
488                 return "INIT";
489         case POWER_DOMAIN_MODESET:
490                 return "MODESET";
491         case POWER_DOMAIN_GT_IRQ:
492                 return "GT_IRQ";
493         default:
494                 MISSING_CASE(domain);
495                 return "?";
496         }
497 }
498
499 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
500                                     struct i915_power_well *power_well)
501 {
502         DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
503         power_well->desc->ops->enable(dev_priv, power_well);
504         power_well->hw_enabled = true;
505 }
506
507 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
508                                      struct i915_power_well *power_well)
509 {
510         DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
511         power_well->hw_enabled = false;
512         power_well->desc->ops->disable(dev_priv, power_well);
513 }
514
515 static void intel_power_well_get(struct drm_i915_private *dev_priv,
516                                  struct i915_power_well *power_well)
517 {
518         if (!power_well->count++)
519                 intel_power_well_enable(dev_priv, power_well);
520 }
521
522 static void intel_power_well_put(struct drm_i915_private *dev_priv,
523                                  struct i915_power_well *power_well)
524 {
525         WARN(!power_well->count, "Use count on power well %s is already zero",
526              power_well->desc->name);
527
528         if (!--power_well->count)
529                 intel_power_well_disable(dev_priv, power_well);
530 }
531
532 /**
533  * __intel_display_power_is_enabled - unlocked check for a power domain
534  * @dev_priv: i915 device instance
535  * @domain: power domain to check
536  *
537  * This is the unlocked version of intel_display_power_is_enabled() and should
538  * only be used from error capture and recovery code where deadlocks are
539  * possible.
540  *
541  * Returns:
542  * True when the power domain is enabled, false otherwise.
543  */
544 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
545                                       enum intel_display_power_domain domain)
546 {
547         struct i915_power_well *power_well;
548         bool is_enabled;
549
550         if (dev_priv->runtime_pm.suspended)
551                 return false;
552
553         is_enabled = true;
554
555         for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
556                 if (power_well->desc->always_on)
557                         continue;
558
559                 if (!power_well->hw_enabled) {
560                         is_enabled = false;
561                         break;
562                 }
563         }
564
565         return is_enabled;
566 }
567
568 /**
569  * intel_display_power_is_enabled - check for a power domain
570  * @dev_priv: i915 device instance
571  * @domain: power domain to check
572  *
573  * This function can be used to check the hw power domain state. It is mostly
574  * used in hardware state readout functions. Everywhere else code should rely
575  * upon explicit power domain reference counting to ensure that the hardware
576  * block is powered up before accessing it.
577  *
578  * Callers must hold the relevant modesetting locks to ensure that concurrent
579  * threads can't disable the power well while the caller tries to read a few
580  * registers.
581  *
582  * Returns:
583  * True when the power domain is enabled, false otherwise.
584  */
585 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
586                                     enum intel_display_power_domain domain)
587 {
588         struct i915_power_domains *power_domains;
589         bool ret;
590
591         power_domains = &dev_priv->power_domains;
592
593         mutex_lock(&power_domains->lock);
594         ret = __intel_display_power_is_enabled(dev_priv, domain);
595         mutex_unlock(&power_domains->lock);
596
597         return ret;
598 }
599
600 /*
601  * Starting with Haswell, we have a "Power Down Well" that can be turned off
602  * when not needed anymore. We have 4 registers that can request the power well
603  * to be enabled, and it will only be disabled if none of the registers is
604  * requesting it to be enabled.
605  */
606 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
607                                        u8 irq_pipe_mask, bool has_vga)
608 {
609         struct pci_dev *pdev = dev_priv->drm.pdev;
610
611         /*
612          * After we re-enable the power well, if we touch VGA register 0x3d5
613          * we'll get unclaimed register interrupts. This stops after we write
614          * anything to the VGA MSR register. The vgacon module uses this
615          * register all the time, so if we unbind our driver and, as a
616          * consequence, bind vgacon, we'll get stuck in an infinite loop at
617          * console_unlock(). So make here we touch the VGA MSR register, making
618          * sure vgacon can keep working normally without triggering interrupts
619          * and error messages.
620          */
621         if (has_vga) {
622                 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
623                 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
624                 vga_put(pdev, VGA_RSRC_LEGACY_IO);
625         }
626
627         if (irq_pipe_mask)
628                 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
629 }
630
631 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
632                                        u8 irq_pipe_mask)
633 {
634         if (irq_pipe_mask)
635                 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
636 }
637
638
639 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
640                                            struct i915_power_well *power_well)
641 {
642         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
643         int pw_idx = power_well->desc->hsw.idx;
644
645         /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
646         WARN_ON(intel_wait_for_register(&dev_priv->uncore,
647                                         regs->driver,
648                                         HSW_PWR_WELL_CTL_STATE(pw_idx),
649                                         HSW_PWR_WELL_CTL_STATE(pw_idx),
650                                         1));
651 }
652
653 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
654                                      const struct i915_power_well_regs *regs,
655                                      int pw_idx)
656 {
657         u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
658         u32 ret;
659
660         ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
661         ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
662         if (regs->kvmr.reg)
663                 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
664         ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
665
666         return ret;
667 }
668
669 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
670                                             struct i915_power_well *power_well)
671 {
672         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
673         int pw_idx = power_well->desc->hsw.idx;
674         bool disabled;
675         u32 reqs;
676
677         /*
678          * Bspec doesn't require waiting for PWs to get disabled, but still do
679          * this for paranoia. The known cases where a PW will be forced on:
680          * - a KVMR request on any power well via the KVMR request register
681          * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
682          *   DEBUG request registers
683          * Skip the wait in case any of the request bits are set and print a
684          * diagnostic message.
685          */
686         wait_for((disabled = !(I915_READ(regs->driver) &
687                                HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
688                  (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
689         if (disabled)
690                 return;
691
692         DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
693                       power_well->desc->name,
694                       !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
695 }
696
697 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
698                                            enum skl_power_gate pg)
699 {
700         /* Timeout 5us for PG#0, for other PGs 1us */
701         WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
702                                         SKL_FUSE_PG_DIST_STATUS(pg),
703                                         SKL_FUSE_PG_DIST_STATUS(pg), 1));
704 }
705
706 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
707                                   struct i915_power_well *power_well)
708 {
709         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
710         int pw_idx = power_well->desc->hsw.idx;
711         bool wait_fuses = power_well->desc->hsw.has_fuses;
712         enum skl_power_gate uninitialized_var(pg);
713         u32 val;
714
715         if (wait_fuses) {
716                 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
717                                                  SKL_PW_CTL_IDX_TO_PG(pw_idx);
718                 /*
719                  * For PW1 we have to wait both for the PW0/PG0 fuse state
720                  * before enabling the power well and PW1/PG1's own fuse
721                  * state after the enabling. For all other power wells with
722                  * fuses we only have to wait for that PW/PG's fuse state
723                  * after the enabling.
724                  */
725                 if (pg == SKL_PG1)
726                         gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
727         }
728
729         val = I915_READ(regs->driver);
730         I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
731         hsw_wait_for_power_well_enable(dev_priv, power_well);
732
733         /* Display WA #1178: cnl */
734         if (IS_CANNONLAKE(dev_priv) &&
735             pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
736             pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
737                 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
738                 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
739                 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
740         }
741
742         if (wait_fuses)
743                 gen9_wait_for_power_well_fuses(dev_priv, pg);
744
745         hsw_power_well_post_enable(dev_priv,
746                                    power_well->desc->hsw.irq_pipe_mask,
747                                    power_well->desc->hsw.has_vga);
748 }
749
750 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
751                                    struct i915_power_well *power_well)
752 {
753         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
754         int pw_idx = power_well->desc->hsw.idx;
755         u32 val;
756
757         hsw_power_well_pre_disable(dev_priv,
758                                    power_well->desc->hsw.irq_pipe_mask);
759
760         val = I915_READ(regs->driver);
761         I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
762         hsw_wait_for_power_well_disable(dev_priv, power_well);
763 }
764
765 #define ICL_AUX_PW_TO_PORT(pw_idx)      ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
766
767 static void
768 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
769                                     struct i915_power_well *power_well)
770 {
771         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
772         int pw_idx = power_well->desc->hsw.idx;
773         enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
774         u32 val;
775
776         val = I915_READ(regs->driver);
777         I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
778
779         val = I915_READ(ICL_PORT_CL_DW12(port));
780         I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
781
782         hsw_wait_for_power_well_enable(dev_priv, power_well);
783
784         /* Display WA #1178: icl */
785         if (IS_ICELAKE(dev_priv) &&
786             pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
787             !intel_bios_is_port_edp(dev_priv, port)) {
788                 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
789                 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
790                 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
791         }
792 }
793
794 static void
795 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
796                                      struct i915_power_well *power_well)
797 {
798         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
799         int pw_idx = power_well->desc->hsw.idx;
800         enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
801         u32 val;
802
803         val = I915_READ(ICL_PORT_CL_DW12(port));
804         I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
805
806         val = I915_READ(regs->driver);
807         I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
808
809         hsw_wait_for_power_well_disable(dev_priv, power_well);
810 }
811
812 #define ICL_AUX_PW_TO_CH(pw_idx)        \
813         ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
814
815 static void
816 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
817                                  struct i915_power_well *power_well)
818 {
819         enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
820         u32 val;
821
822         val = I915_READ(DP_AUX_CH_CTL(aux_ch));
823         val &= ~DP_AUX_CH_CTL_TBT_IO;
824         if (power_well->desc->hsw.is_tc_tbt)
825                 val |= DP_AUX_CH_CTL_TBT_IO;
826         I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
827
828         hsw_power_well_enable(dev_priv, power_well);
829 }
830
831 /*
832  * We should only use the power well if we explicitly asked the hardware to
833  * enable it, so check if it's enabled and also check if we've requested it to
834  * be enabled.
835  */
836 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
837                                    struct i915_power_well *power_well)
838 {
839         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
840         enum i915_power_well_id id = power_well->desc->id;
841         int pw_idx = power_well->desc->hsw.idx;
842         u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
843                    HSW_PWR_WELL_CTL_STATE(pw_idx);
844         u32 val;
845
846         val = I915_READ(regs->driver);
847
848         /*
849          * On GEN9 big core due to a DMC bug the driver's request bits for PW1
850          * and the MISC_IO PW will be not restored, so check instead for the
851          * BIOS's own request bits, which are forced-on for these power wells
852          * when exiting DC5/6.
853          */
854         if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
855             (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
856                 val |= I915_READ(regs->bios);
857
858         return (val & mask) == mask;
859 }
860
861 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
862 {
863         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
864                   "DC9 already programmed to be enabled.\n");
865         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
866                   "DC5 still not disabled to enable DC9.\n");
867         WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
868                   HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
869                   "Power well 2 on.\n");
870         WARN_ONCE(intel_irqs_enabled(dev_priv),
871                   "Interrupts not disabled yet.\n");
872
873          /*
874           * TODO: check for the following to verify the conditions to enter DC9
875           * state are satisfied:
876           * 1] Check relevant display engine registers to verify if mode set
877           * disable sequence was followed.
878           * 2] Check if display uninitialize sequence is initialized.
879           */
880 }
881
882 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
883 {
884         WARN_ONCE(intel_irqs_enabled(dev_priv),
885                   "Interrupts not disabled yet.\n");
886         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
887                   "DC5 still not disabled.\n");
888
889          /*
890           * TODO: check for the following to verify DC9 state was indeed
891           * entered before programming to disable it:
892           * 1] Check relevant display engine registers to verify if mode
893           *  set disable sequence was followed.
894           * 2] Check if display uninitialize sequence is initialized.
895           */
896 }
897
898 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
899                                 u32 state)
900 {
901         int rewrites = 0;
902         int rereads = 0;
903         u32 v;
904
905         I915_WRITE(DC_STATE_EN, state);
906
907         /* It has been observed that disabling the dc6 state sometimes
908          * doesn't stick and dmc keeps returning old value. Make sure
909          * the write really sticks enough times and also force rewrite until
910          * we are confident that state is exactly what we want.
911          */
912         do  {
913                 v = I915_READ(DC_STATE_EN);
914
915                 if (v != state) {
916                         I915_WRITE(DC_STATE_EN, state);
917                         rewrites++;
918                         rereads = 0;
919                 } else if (rereads++ > 5) {
920                         break;
921                 }
922
923         } while (rewrites < 100);
924
925         if (v != state)
926                 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
927                           state, v);
928
929         /* Most of the times we need one retry, avoid spam */
930         if (rewrites > 1)
931                 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
932                               state, rewrites);
933 }
934
935 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
936 {
937         u32 mask;
938
939         mask = DC_STATE_EN_UPTO_DC5;
940         if (INTEL_GEN(dev_priv) >= 11)
941                 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
942         else if (IS_GEN9_LP(dev_priv))
943                 mask |= DC_STATE_EN_DC9;
944         else
945                 mask |= DC_STATE_EN_UPTO_DC6;
946
947         return mask;
948 }
949
950 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
951 {
952         u32 val;
953
954         val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
955
956         DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
957                       dev_priv->csr.dc_state, val);
958         dev_priv->csr.dc_state = val;
959 }
960
961 /**
962  * gen9_set_dc_state - set target display C power state
963  * @dev_priv: i915 device instance
964  * @state: target DC power state
965  * - DC_STATE_DISABLE
966  * - DC_STATE_EN_UPTO_DC5
967  * - DC_STATE_EN_UPTO_DC6
968  * - DC_STATE_EN_DC9
969  *
970  * Signal to DMC firmware/HW the target DC power state passed in @state.
971  * DMC/HW can turn off individual display clocks and power rails when entering
972  * a deeper DC power state (higher in number) and turns these back when exiting
973  * that state to a shallower power state (lower in number). The HW will decide
974  * when to actually enter a given state on an on-demand basis, for instance
975  * depending on the active state of display pipes. The state of display
976  * registers backed by affected power rails are saved/restored as needed.
977  *
978  * Based on the above enabling a deeper DC power state is asynchronous wrt.
979  * enabling it. Disabling a deeper power state is synchronous: for instance
980  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
981  * back on and register state is restored. This is guaranteed by the MMIO write
982  * to DC_STATE_EN blocking until the state is restored.
983  */
984 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
985 {
986         u32 val;
987         u32 mask;
988
989         if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
990                 state &= dev_priv->csr.allowed_dc_mask;
991
992         val = I915_READ(DC_STATE_EN);
993         mask = gen9_dc_mask(dev_priv);
994         DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
995                       val & mask, state);
996
997         /* Check if DMC is ignoring our DC state requests */
998         if ((val & mask) != dev_priv->csr.dc_state)
999                 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
1000                           dev_priv->csr.dc_state, val & mask);
1001
1002         val &= ~mask;
1003         val |= state;
1004
1005         gen9_write_dc_state(dev_priv, val);
1006
1007         dev_priv->csr.dc_state = val & mask;
1008 }
1009
1010 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
1011 {
1012         assert_can_enable_dc9(dev_priv);
1013
1014         DRM_DEBUG_KMS("Enabling DC9\n");
1015         /*
1016          * Power sequencer reset is not needed on
1017          * platforms with South Display Engine on PCH,
1018          * because PPS registers are always on.
1019          */
1020         if (!HAS_PCH_SPLIT(dev_priv))
1021                 intel_power_sequencer_reset(dev_priv);
1022         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
1023 }
1024
1025 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
1026 {
1027         assert_can_disable_dc9(dev_priv);
1028
1029         DRM_DEBUG_KMS("Disabling DC9\n");
1030
1031         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1032
1033         intel_pps_unlock_regs_wa(dev_priv);
1034 }
1035
1036 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
1037 {
1038         WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
1039                   "CSR program storage start is NULL\n");
1040         WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
1041         WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
1042 }
1043
1044 static struct i915_power_well *
1045 lookup_power_well(struct drm_i915_private *dev_priv,
1046                   enum i915_power_well_id power_well_id)
1047 {
1048         struct i915_power_well *power_well;
1049
1050         for_each_power_well(dev_priv, power_well)
1051                 if (power_well->desc->id == power_well_id)
1052                         return power_well;
1053
1054         /*
1055          * It's not feasible to add error checking code to the callers since
1056          * this condition really shouldn't happen and it doesn't even make sense
1057          * to abort things like display initialization sequences. Just return
1058          * the first power well and hope the WARN gets reported so we can fix
1059          * our driver.
1060          */
1061         WARN(1, "Power well %d not defined for this platform\n", power_well_id);
1062         return &dev_priv->power_domains.power_wells[0];
1063 }
1064
1065 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1066 {
1067         bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
1068                                         SKL_DISP_PW_2);
1069
1070         WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
1071
1072         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
1073                   "DC5 already programmed to be enabled.\n");
1074         assert_rpm_wakelock_held(dev_priv);
1075
1076         assert_csr_loaded(dev_priv);
1077 }
1078
1079 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1080 {
1081         assert_can_enable_dc5(dev_priv);
1082
1083         DRM_DEBUG_KMS("Enabling DC5\n");
1084
1085         /* Wa Display #1183: skl,kbl,cfl */
1086         if (IS_GEN9_BC(dev_priv))
1087                 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1088                            SKL_SELECT_ALTERNATE_DC_EXIT);
1089
1090         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1091 }
1092
1093 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1094 {
1095         WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1096                   "Backlight is not disabled.\n");
1097         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
1098                   "DC6 already programmed to be enabled.\n");
1099
1100         assert_csr_loaded(dev_priv);
1101 }
1102
1103 void skl_enable_dc6(struct drm_i915_private *dev_priv)
1104 {
1105         assert_can_enable_dc6(dev_priv);
1106
1107         DRM_DEBUG_KMS("Enabling DC6\n");
1108
1109         /* Wa Display #1183: skl,kbl,cfl */
1110         if (IS_GEN9_BC(dev_priv))
1111                 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1112                            SKL_SELECT_ALTERNATE_DC_EXIT);
1113
1114         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1115 }
1116
1117 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1118                                    struct i915_power_well *power_well)
1119 {
1120         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1121         int pw_idx = power_well->desc->hsw.idx;
1122         u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1123         u32 bios_req = I915_READ(regs->bios);
1124
1125         /* Take over the request bit if set by BIOS. */
1126         if (bios_req & mask) {
1127                 u32 drv_req = I915_READ(regs->driver);
1128
1129                 if (!(drv_req & mask))
1130                         I915_WRITE(regs->driver, drv_req | mask);
1131                 I915_WRITE(regs->bios, bios_req & ~mask);
1132         }
1133 }
1134
1135 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1136                                            struct i915_power_well *power_well)
1137 {
1138         bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1139 }
1140
1141 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1142                                             struct i915_power_well *power_well)
1143 {
1144         bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1145 }
1146
1147 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1148                                             struct i915_power_well *power_well)
1149 {
1150         return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1151 }
1152
1153 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1154 {
1155         struct i915_power_well *power_well;
1156
1157         power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1158         if (power_well->count > 0)
1159                 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1160
1161         power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1162         if (power_well->count > 0)
1163                 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1164
1165         if (IS_GEMINILAKE(dev_priv)) {
1166                 power_well = lookup_power_well(dev_priv,
1167                                                GLK_DISP_PW_DPIO_CMN_C);
1168                 if (power_well->count > 0)
1169                         bxt_ddi_phy_verify_state(dev_priv,
1170                                                  power_well->desc->bxt.phy);
1171         }
1172 }
1173
1174 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1175                                            struct i915_power_well *power_well)
1176 {
1177         return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
1178 }
1179
1180 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1181 {
1182         u32 tmp = I915_READ(DBUF_CTL);
1183
1184         WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1185              (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1186              "Unexpected DBuf power power state (0x%08x)\n", tmp);
1187 }
1188
1189 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1190                                           struct i915_power_well *power_well)
1191 {
1192         struct intel_cdclk_state cdclk_state = {};
1193
1194         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1195
1196         dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1197         /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1198         WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
1199
1200         gen9_assert_dbuf_enabled(dev_priv);
1201
1202         if (IS_GEN9_LP(dev_priv))
1203                 bxt_verify_ddi_phy_power_wells(dev_priv);
1204
1205         if (INTEL_GEN(dev_priv) >= 11)
1206                 /*
1207                  * DMC retains HW context only for port A, the other combo
1208                  * PHY's HW context for port B is lost after DC transitions,
1209                  * so we need to restore it manually.
1210                  */
1211                 intel_combo_phy_init(dev_priv);
1212 }
1213
1214 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1215                                            struct i915_power_well *power_well)
1216 {
1217         if (!dev_priv->csr.dmc_payload)
1218                 return;
1219
1220         if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1221                 skl_enable_dc6(dev_priv);
1222         else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1223                 gen9_enable_dc5(dev_priv);
1224 }
1225
1226 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1227                                          struct i915_power_well *power_well)
1228 {
1229 }
1230
1231 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1232                                            struct i915_power_well *power_well)
1233 {
1234 }
1235
1236 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1237                                              struct i915_power_well *power_well)
1238 {
1239         return true;
1240 }
1241
1242 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1243                                          struct i915_power_well *power_well)
1244 {
1245         if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1246                 i830_enable_pipe(dev_priv, PIPE_A);
1247         if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1248                 i830_enable_pipe(dev_priv, PIPE_B);
1249 }
1250
1251 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1252                                           struct i915_power_well *power_well)
1253 {
1254         i830_disable_pipe(dev_priv, PIPE_B);
1255         i830_disable_pipe(dev_priv, PIPE_A);
1256 }
1257
1258 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1259                                           struct i915_power_well *power_well)
1260 {
1261         return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1262                 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1263 }
1264
1265 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1266                                           struct i915_power_well *power_well)
1267 {
1268         if (power_well->count > 0)
1269                 i830_pipes_power_well_enable(dev_priv, power_well);
1270         else
1271                 i830_pipes_power_well_disable(dev_priv, power_well);
1272 }
1273
1274 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1275                                struct i915_power_well *power_well, bool enable)
1276 {
1277         int pw_idx = power_well->desc->vlv.idx;
1278         u32 mask;
1279         u32 state;
1280         u32 ctrl;
1281
1282         mask = PUNIT_PWRGT_MASK(pw_idx);
1283         state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1284                          PUNIT_PWRGT_PWR_GATE(pw_idx);
1285
1286         vlv_punit_get(dev_priv);
1287
1288 #define COND \
1289         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1290
1291         if (COND)
1292                 goto out;
1293
1294         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1295         ctrl &= ~mask;
1296         ctrl |= state;
1297         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1298
1299         if (wait_for(COND, 100))
1300                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1301                           state,
1302                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1303
1304 #undef COND
1305
1306 out:
1307         vlv_punit_put(dev_priv);
1308 }
1309
1310 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1311                                   struct i915_power_well *power_well)
1312 {
1313         vlv_set_power_well(dev_priv, power_well, true);
1314 }
1315
1316 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1317                                    struct i915_power_well *power_well)
1318 {
1319         vlv_set_power_well(dev_priv, power_well, false);
1320 }
1321
1322 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1323                                    struct i915_power_well *power_well)
1324 {
1325         int pw_idx = power_well->desc->vlv.idx;
1326         bool enabled = false;
1327         u32 mask;
1328         u32 state;
1329         u32 ctrl;
1330
1331         mask = PUNIT_PWRGT_MASK(pw_idx);
1332         ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1333
1334         vlv_punit_get(dev_priv);
1335
1336         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1337         /*
1338          * We only ever set the power-on and power-gate states, anything
1339          * else is unexpected.
1340          */
1341         WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1342                 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1343         if (state == ctrl)
1344                 enabled = true;
1345
1346         /*
1347          * A transient state at this point would mean some unexpected party
1348          * is poking at the power controls too.
1349          */
1350         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1351         WARN_ON(ctrl != state);
1352
1353         vlv_punit_put(dev_priv);
1354
1355         return enabled;
1356 }
1357
1358 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1359 {
1360         u32 val;
1361
1362         /*
1363          * On driver load, a pipe may be active and driving a DSI display.
1364          * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1365          * (and never recovering) in this case. intel_dsi_post_disable() will
1366          * clear it when we turn off the display.
1367          */
1368         val = I915_READ(DSPCLK_GATE_D);
1369         val &= DPOUNIT_CLOCK_GATE_DISABLE;
1370         val |= VRHUNIT_CLOCK_GATE_DISABLE;
1371         I915_WRITE(DSPCLK_GATE_D, val);
1372
1373         /*
1374          * Disable trickle feed and enable pnd deadline calculation
1375          */
1376         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1377         I915_WRITE(CBR1_VLV, 0);
1378
1379         WARN_ON(dev_priv->rawclk_freq == 0);
1380
1381         I915_WRITE(RAWCLK_FREQ_VLV,
1382                    DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1383 }
1384
1385 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1386 {
1387         struct intel_encoder *encoder;
1388         enum pipe pipe;
1389
1390         /*
1391          * Enable the CRI clock source so we can get at the
1392          * display and the reference clock for VGA
1393          * hotplug / manual detection. Supposedly DSI also
1394          * needs the ref clock up and running.
1395          *
1396          * CHV DPLL B/C have some issues if VGA mode is enabled.
1397          */
1398         for_each_pipe(dev_priv, pipe) {
1399                 u32 val = I915_READ(DPLL(pipe));
1400
1401                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1402                 if (pipe != PIPE_A)
1403                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1404
1405                 I915_WRITE(DPLL(pipe), val);
1406         }
1407
1408         vlv_init_display_clock_gating(dev_priv);
1409
1410         spin_lock_irq(&dev_priv->irq_lock);
1411         valleyview_enable_display_irqs(dev_priv);
1412         spin_unlock_irq(&dev_priv->irq_lock);
1413
1414         /*
1415          * During driver initialization/resume we can avoid restoring the
1416          * part of the HW/SW state that will be inited anyway explicitly.
1417          */
1418         if (dev_priv->power_domains.initializing)
1419                 return;
1420
1421         intel_hpd_init(dev_priv);
1422
1423         /* Re-enable the ADPA, if we have one */
1424         for_each_intel_encoder(&dev_priv->drm, encoder) {
1425                 if (encoder->type == INTEL_OUTPUT_ANALOG)
1426                         intel_crt_reset(&encoder->base);
1427         }
1428
1429         i915_redisable_vga_power_on(dev_priv);
1430
1431         intel_pps_unlock_regs_wa(dev_priv);
1432 }
1433
1434 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1435 {
1436         spin_lock_irq(&dev_priv->irq_lock);
1437         valleyview_disable_display_irqs(dev_priv);
1438         spin_unlock_irq(&dev_priv->irq_lock);
1439
1440         /* make sure we're done processing display irqs */
1441         synchronize_irq(dev_priv->drm.irq);
1442
1443         intel_power_sequencer_reset(dev_priv);
1444
1445         /* Prevent us from re-enabling polling on accident in late suspend */
1446         if (!dev_priv->drm.dev->power.is_suspended)
1447                 intel_hpd_poll_init(dev_priv);
1448 }
1449
1450 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1451                                           struct i915_power_well *power_well)
1452 {
1453         vlv_set_power_well(dev_priv, power_well, true);
1454
1455         vlv_display_power_well_init(dev_priv);
1456 }
1457
1458 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1459                                            struct i915_power_well *power_well)
1460 {
1461         vlv_display_power_well_deinit(dev_priv);
1462
1463         vlv_set_power_well(dev_priv, power_well, false);
1464 }
1465
1466 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1467                                            struct i915_power_well *power_well)
1468 {
1469         /* since ref/cri clock was enabled */
1470         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1471
1472         vlv_set_power_well(dev_priv, power_well, true);
1473
1474         /*
1475          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1476          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1477          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1478          *   b. The other bits such as sfr settings / modesel may all
1479          *      be set to 0.
1480          *
1481          * This should only be done on init and resume from S3 with
1482          * both PLLs disabled, or we risk losing DPIO and PLL
1483          * synchronization.
1484          */
1485         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1486 }
1487
1488 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1489                                             struct i915_power_well *power_well)
1490 {
1491         enum pipe pipe;
1492
1493         for_each_pipe(dev_priv, pipe)
1494                 assert_pll_disabled(dev_priv, pipe);
1495
1496         /* Assert common reset */
1497         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1498
1499         vlv_set_power_well(dev_priv, power_well, false);
1500 }
1501
1502 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1503
1504 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1505
1506 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1507 {
1508         struct i915_power_well *cmn_bc =
1509                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1510         struct i915_power_well *cmn_d =
1511                 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1512         u32 phy_control = dev_priv->chv_phy_control;
1513         u32 phy_status = 0;
1514         u32 phy_status_mask = 0xffffffff;
1515
1516         /*
1517          * The BIOS can leave the PHY is some weird state
1518          * where it doesn't fully power down some parts.
1519          * Disable the asserts until the PHY has been fully
1520          * reset (ie. the power well has been disabled at
1521          * least once).
1522          */
1523         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1524                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1525                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1526                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1527                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1528                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1529                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1530
1531         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1532                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1533                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1534                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1535
1536         if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1537                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1538
1539                 /* this assumes override is only used to enable lanes */
1540                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1541                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1542
1543                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1544                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1545
1546                 /* CL1 is on whenever anything is on in either channel */
1547                 if (BITS_SET(phy_control,
1548                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1549                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1550                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1551
1552                 /*
1553                  * The DPLLB check accounts for the pipe B + port A usage
1554                  * with CL2 powered up but all the lanes in the second channel
1555                  * powered down.
1556                  */
1557                 if (BITS_SET(phy_control,
1558                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1559                     (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1560                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1561
1562                 if (BITS_SET(phy_control,
1563                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1564                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1565                 if (BITS_SET(phy_control,
1566                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1567                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1568
1569                 if (BITS_SET(phy_control,
1570                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1571                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1572                 if (BITS_SET(phy_control,
1573                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1574                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1575         }
1576
1577         if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1578                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1579
1580                 /* this assumes override is only used to enable lanes */
1581                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1582                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1583
1584                 if (BITS_SET(phy_control,
1585                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1586                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1587
1588                 if (BITS_SET(phy_control,
1589                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1590                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1591                 if (BITS_SET(phy_control,
1592                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1593                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1594         }
1595
1596         phy_status &= phy_status_mask;
1597
1598         /*
1599          * The PHY may be busy with some initial calibration and whatnot,
1600          * so the power state can take a while to actually change.
1601          */
1602         if (intel_wait_for_register(&dev_priv->uncore,
1603                                     DISPLAY_PHY_STATUS,
1604                                     phy_status_mask,
1605                                     phy_status,
1606                                     10))
1607                 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1608                           I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1609                            phy_status, dev_priv->chv_phy_control);
1610 }
1611
1612 #undef BITS_SET
1613
1614 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1615                                            struct i915_power_well *power_well)
1616 {
1617         enum dpio_phy phy;
1618         enum pipe pipe;
1619         u32 tmp;
1620
1621         WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1622                      power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1623
1624         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1625                 pipe = PIPE_A;
1626                 phy = DPIO_PHY0;
1627         } else {
1628                 pipe = PIPE_C;
1629                 phy = DPIO_PHY1;
1630         }
1631
1632         /* since ref/cri clock was enabled */
1633         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1634         vlv_set_power_well(dev_priv, power_well, true);
1635
1636         /* Poll for phypwrgood signal */
1637         if (intel_wait_for_register(&dev_priv->uncore,
1638                                     DISPLAY_PHY_STATUS,
1639                                     PHY_POWERGOOD(phy),
1640                                     PHY_POWERGOOD(phy),
1641                                     1))
1642                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1643
1644         vlv_dpio_get(dev_priv);
1645
1646         /* Enable dynamic power down */
1647         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1648         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1649                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1650         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1651
1652         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1653                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1654                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1655                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1656         } else {
1657                 /*
1658                  * Force the non-existing CL2 off. BXT does this
1659                  * too, so maybe it saves some power even though
1660                  * CL2 doesn't exist?
1661                  */
1662                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1663                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1664                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1665         }
1666
1667         vlv_dpio_put(dev_priv);
1668
1669         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1670         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1671
1672         DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1673                       phy, dev_priv->chv_phy_control);
1674
1675         assert_chv_phy_status(dev_priv);
1676 }
1677
1678 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1679                                             struct i915_power_well *power_well)
1680 {
1681         enum dpio_phy phy;
1682
1683         WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1684                      power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1685
1686         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1687                 phy = DPIO_PHY0;
1688                 assert_pll_disabled(dev_priv, PIPE_A);
1689                 assert_pll_disabled(dev_priv, PIPE_B);
1690         } else {
1691                 phy = DPIO_PHY1;
1692                 assert_pll_disabled(dev_priv, PIPE_C);
1693         }
1694
1695         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1696         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1697
1698         vlv_set_power_well(dev_priv, power_well, false);
1699
1700         DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1701                       phy, dev_priv->chv_phy_control);
1702
1703         /* PHY is fully reset now, so we can enable the PHY state asserts */
1704         dev_priv->chv_phy_assert[phy] = true;
1705
1706         assert_chv_phy_status(dev_priv);
1707 }
1708
1709 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1710                                      enum dpio_channel ch, bool override, unsigned int mask)
1711 {
1712         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1713         u32 reg, val, expected, actual;
1714
1715         /*
1716          * The BIOS can leave the PHY is some weird state
1717          * where it doesn't fully power down some parts.
1718          * Disable the asserts until the PHY has been fully
1719          * reset (ie. the power well has been disabled at
1720          * least once).
1721          */
1722         if (!dev_priv->chv_phy_assert[phy])
1723                 return;
1724
1725         if (ch == DPIO_CH0)
1726                 reg = _CHV_CMN_DW0_CH0;
1727         else
1728                 reg = _CHV_CMN_DW6_CH1;
1729
1730         vlv_dpio_get(dev_priv);
1731         val = vlv_dpio_read(dev_priv, pipe, reg);
1732         vlv_dpio_put(dev_priv);
1733
1734         /*
1735          * This assumes !override is only used when the port is disabled.
1736          * All lanes should power down even without the override when
1737          * the port is disabled.
1738          */
1739         if (!override || mask == 0xf) {
1740                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1741                 /*
1742                  * If CH1 common lane is not active anymore
1743                  * (eg. for pipe B DPLL) the entire channel will
1744                  * shut down, which causes the common lane registers
1745                  * to read as 0. That means we can't actually check
1746                  * the lane power down status bits, but as the entire
1747                  * register reads as 0 it's a good indication that the
1748                  * channel is indeed entirely powered down.
1749                  */
1750                 if (ch == DPIO_CH1 && val == 0)
1751                         expected = 0;
1752         } else if (mask != 0x0) {
1753                 expected = DPIO_ANYDL_POWERDOWN;
1754         } else {
1755                 expected = 0;
1756         }
1757
1758         if (ch == DPIO_CH0)
1759                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1760         else
1761                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1762         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1763
1764         WARN(actual != expected,
1765              "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1766              !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1767              !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1768              reg, val);
1769 }
1770
1771 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1772                           enum dpio_channel ch, bool override)
1773 {
1774         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1775         bool was_override;
1776
1777         mutex_lock(&power_domains->lock);
1778
1779         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1780
1781         if (override == was_override)
1782                 goto out;
1783
1784         if (override)
1785                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1786         else
1787                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1788
1789         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1790
1791         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1792                       phy, ch, dev_priv->chv_phy_control);
1793
1794         assert_chv_phy_status(dev_priv);
1795
1796 out:
1797         mutex_unlock(&power_domains->lock);
1798
1799         return was_override;
1800 }
1801
1802 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1803                              bool override, unsigned int mask)
1804 {
1805         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1806         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1807         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1808         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1809
1810         mutex_lock(&power_domains->lock);
1811
1812         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1813         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1814
1815         if (override)
1816                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1817         else
1818                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1819
1820         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1821
1822         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1823                       phy, ch, mask, dev_priv->chv_phy_control);
1824
1825         assert_chv_phy_status(dev_priv);
1826
1827         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1828
1829         mutex_unlock(&power_domains->lock);
1830 }
1831
1832 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1833                                         struct i915_power_well *power_well)
1834 {
1835         enum pipe pipe = PIPE_A;
1836         bool enabled;
1837         u32 state, ctrl;
1838
1839         vlv_punit_get(dev_priv);
1840
1841         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1842         /*
1843          * We only ever set the power-on and power-gate states, anything
1844          * else is unexpected.
1845          */
1846         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1847         enabled = state == DP_SSS_PWR_ON(pipe);
1848
1849         /*
1850          * A transient state at this point would mean some unexpected party
1851          * is poking at the power controls too.
1852          */
1853         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1854         WARN_ON(ctrl << 16 != state);
1855
1856         vlv_punit_put(dev_priv);
1857
1858         return enabled;
1859 }
1860
1861 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1862                                     struct i915_power_well *power_well,
1863                                     bool enable)
1864 {
1865         enum pipe pipe = PIPE_A;
1866         u32 state;
1867         u32 ctrl;
1868
1869         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1870
1871         vlv_punit_get(dev_priv);
1872
1873 #define COND \
1874         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1875
1876         if (COND)
1877                 goto out;
1878
1879         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1880         ctrl &= ~DP_SSC_MASK(pipe);
1881         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1882         vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1883
1884         if (wait_for(COND, 100))
1885                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1886                           state,
1887                           vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1888
1889 #undef COND
1890
1891 out:
1892         vlv_punit_put(dev_priv);
1893 }
1894
1895 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1896                                        struct i915_power_well *power_well)
1897 {
1898         chv_set_pipe_power_well(dev_priv, power_well, true);
1899
1900         vlv_display_power_well_init(dev_priv);
1901 }
1902
1903 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1904                                         struct i915_power_well *power_well)
1905 {
1906         vlv_display_power_well_deinit(dev_priv);
1907
1908         chv_set_pipe_power_well(dev_priv, power_well, false);
1909 }
1910
1911 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1912 {
1913         return power_domains->async_put_domains[0] |
1914                power_domains->async_put_domains[1];
1915 }
1916
1917 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1918
1919 static bool
1920 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1921 {
1922         return !WARN_ON(power_domains->async_put_domains[0] &
1923                         power_domains->async_put_domains[1]);
1924 }
1925
1926 static bool
1927 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1928 {
1929         enum intel_display_power_domain domain;
1930         bool err = false;
1931
1932         err |= !assert_async_put_domain_masks_disjoint(power_domains);
1933         err |= WARN_ON(!!power_domains->async_put_wakeref !=
1934                        !!__async_put_domains_mask(power_domains));
1935
1936         for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1937                 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1938
1939         return !err;
1940 }
1941
1942 static void print_power_domains(struct i915_power_domains *power_domains,
1943                                 const char *prefix, u64 mask)
1944 {
1945         enum intel_display_power_domain domain;
1946
1947         DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1948         for_each_power_domain(domain, mask)
1949                 DRM_DEBUG_DRIVER("%s use_count %d\n",
1950                                  intel_display_power_domain_str(domain),
1951                                  power_domains->domain_use_count[domain]);
1952 }
1953
1954 static void
1955 print_async_put_domains_state(struct i915_power_domains *power_domains)
1956 {
1957         DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1958                          power_domains->async_put_wakeref);
1959
1960         print_power_domains(power_domains, "async_put_domains[0]",
1961                             power_domains->async_put_domains[0]);
1962         print_power_domains(power_domains, "async_put_domains[1]",
1963                             power_domains->async_put_domains[1]);
1964 }
1965
1966 static void
1967 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1968 {
1969         if (!__async_put_domains_state_ok(power_domains))
1970                 print_async_put_domains_state(power_domains);
1971 }
1972
1973 #else
1974
1975 static void
1976 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1977 {
1978 }
1979
1980 static void
1981 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1982 {
1983 }
1984
1985 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1986
1987 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1988 {
1989         assert_async_put_domain_masks_disjoint(power_domains);
1990
1991         return __async_put_domains_mask(power_domains);
1992 }
1993
1994 static void
1995 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1996                                enum intel_display_power_domain domain)
1997 {
1998         assert_async_put_domain_masks_disjoint(power_domains);
1999
2000         power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2001         power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2002 }
2003
2004 static bool
2005 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2006                                        enum intel_display_power_domain domain)
2007 {
2008         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2009         bool ret = false;
2010
2011         if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2012                 goto out_verify;
2013
2014         async_put_domains_clear_domain(power_domains, domain);
2015
2016         ret = true;
2017
2018         if (async_put_domains_mask(power_domains))
2019                 goto out_verify;
2020
2021         cancel_delayed_work(&power_domains->async_put_work);
2022         intel_runtime_pm_put_raw(dev_priv,
2023                                  fetch_and_zero(&power_domains->async_put_wakeref));
2024 out_verify:
2025         verify_async_put_domains_state(power_domains);
2026
2027         return ret;
2028 }
2029
2030 static void
2031 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2032                                  enum intel_display_power_domain domain)
2033 {
2034         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2035         struct i915_power_well *power_well;
2036
2037         if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2038                 return;
2039
2040         for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2041                 intel_power_well_get(dev_priv, power_well);
2042
2043         power_domains->domain_use_count[domain]++;
2044 }
2045
2046 /**
2047  * intel_display_power_get - grab a power domain reference
2048  * @dev_priv: i915 device instance
2049  * @domain: power domain to reference
2050  *
2051  * This function grabs a power domain reference for @domain and ensures that the
2052  * power domain and all its parents are powered up. Therefore users should only
2053  * grab a reference to the innermost power domain they need.
2054  *
2055  * Any power domain reference obtained by this function must have a symmetric
2056  * call to intel_display_power_put() to release the reference again.
2057  */
2058 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2059                                         enum intel_display_power_domain domain)
2060 {
2061         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2062         intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
2063
2064         mutex_lock(&power_domains->lock);
2065         __intel_display_power_get_domain(dev_priv, domain);
2066         mutex_unlock(&power_domains->lock);
2067
2068         return wakeref;
2069 }
2070
2071 /**
2072  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2073  * @dev_priv: i915 device instance
2074  * @domain: power domain to reference
2075  *
2076  * This function grabs a power domain reference for @domain and ensures that the
2077  * power domain and all its parents are powered up. Therefore users should only
2078  * grab a reference to the innermost power domain they need.
2079  *
2080  * Any power domain reference obtained by this function must have a symmetric
2081  * call to intel_display_power_put() to release the reference again.
2082  */
2083 intel_wakeref_t
2084 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2085                                    enum intel_display_power_domain domain)
2086 {
2087         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2088         intel_wakeref_t wakeref;
2089         bool is_enabled;
2090
2091         wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
2092         if (!wakeref)
2093                 return false;
2094
2095         mutex_lock(&power_domains->lock);
2096
2097         if (__intel_display_power_is_enabled(dev_priv, domain)) {
2098                 __intel_display_power_get_domain(dev_priv, domain);
2099                 is_enabled = true;
2100         } else {
2101                 is_enabled = false;
2102         }
2103
2104         mutex_unlock(&power_domains->lock);
2105
2106         if (!is_enabled) {
2107                 intel_runtime_pm_put(dev_priv, wakeref);
2108                 wakeref = 0;
2109         }
2110
2111         return wakeref;
2112 }
2113
2114 static void
2115 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2116                                  enum intel_display_power_domain domain)
2117 {
2118         struct i915_power_domains *power_domains;
2119         struct i915_power_well *power_well;
2120         const char *name = intel_display_power_domain_str(domain);
2121
2122         power_domains = &dev_priv->power_domains;
2123
2124         WARN(!power_domains->domain_use_count[domain],
2125              "Use count on domain %s is already zero\n",
2126              name);
2127         WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
2128              "Async disabling of domain %s is pending\n",
2129              name);
2130
2131         power_domains->domain_use_count[domain]--;
2132
2133         for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2134                 intel_power_well_put(dev_priv, power_well);
2135 }
2136
2137 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2138                                       enum intel_display_power_domain domain)
2139 {
2140         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2141
2142         mutex_lock(&power_domains->lock);
2143         __intel_display_power_put_domain(dev_priv, domain);
2144         mutex_unlock(&power_domains->lock);
2145 }
2146
2147 /**
2148  * intel_display_power_put_unchecked - release an unchecked power domain reference
2149  * @dev_priv: i915 device instance
2150  * @domain: power domain to reference
2151  *
2152  * This function drops the power domain reference obtained by
2153  * intel_display_power_get() and might power down the corresponding hardware
2154  * block right away if this is the last reference.
2155  *
2156  * This function exists only for historical reasons and should be avoided in
2157  * new code, as the correctness of its use cannot be checked. Always use
2158  * intel_display_power_put() instead.
2159  */
2160 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2161                                        enum intel_display_power_domain domain)
2162 {
2163         __intel_display_power_put(dev_priv, domain);
2164         intel_runtime_pm_put_unchecked(dev_priv);
2165 }
2166
2167 static void
2168 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2169                              intel_wakeref_t wakeref)
2170 {
2171         WARN_ON(power_domains->async_put_wakeref);
2172         power_domains->async_put_wakeref = wakeref;
2173         WARN_ON(!queue_delayed_work(system_unbound_wq,
2174                                     &power_domains->async_put_work,
2175                                     msecs_to_jiffies(100)));
2176 }
2177
2178 static void
2179 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2180 {
2181         struct drm_i915_private *dev_priv =
2182                 container_of(power_domains, struct drm_i915_private,
2183                              power_domains);
2184         enum intel_display_power_domain domain;
2185         intel_wakeref_t wakeref;
2186
2187         /*
2188          * The caller must hold already raw wakeref, upgrade that to a proper
2189          * wakeref to make the state checker happy about the HW access during
2190          * power well disabling.
2191          */
2192         assert_rpm_raw_wakeref_held(dev_priv);
2193         wakeref = intel_runtime_pm_get(dev_priv);
2194
2195         for_each_power_domain(domain, mask) {
2196                 /* Clear before put, so put's sanity check is happy. */
2197                 async_put_domains_clear_domain(power_domains, domain);
2198                 __intel_display_power_put_domain(dev_priv, domain);
2199         }
2200
2201         intel_runtime_pm_put(dev_priv, wakeref);
2202 }
2203
2204 static void
2205 intel_display_power_put_async_work(struct work_struct *work)
2206 {
2207         struct drm_i915_private *dev_priv =
2208                 container_of(work, struct drm_i915_private,
2209                              power_domains.async_put_work.work);
2210         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2211         intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv);
2212         intel_wakeref_t old_work_wakeref = 0;
2213
2214         mutex_lock(&power_domains->lock);
2215
2216         /*
2217          * Bail out if all the domain refs pending to be released were grabbed
2218          * by subsequent gets or a flush_work.
2219          */
2220         old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2221         if (!old_work_wakeref)
2222                 goto out_verify;
2223
2224         release_async_put_domains(power_domains,
2225                                   power_domains->async_put_domains[0]);
2226
2227         /* Requeue the work if more domains were async put meanwhile. */
2228         if (power_domains->async_put_domains[1]) {
2229                 power_domains->async_put_domains[0] =
2230                         fetch_and_zero(&power_domains->async_put_domains[1]);
2231                 queue_async_put_domains_work(power_domains,
2232                                              fetch_and_zero(&new_work_wakeref));
2233         }
2234
2235 out_verify:
2236         verify_async_put_domains_state(power_domains);
2237
2238         mutex_unlock(&power_domains->lock);
2239
2240         if (old_work_wakeref)
2241                 intel_runtime_pm_put_raw(dev_priv, old_work_wakeref);
2242         if (new_work_wakeref)
2243                 intel_runtime_pm_put_raw(dev_priv, new_work_wakeref);
2244 }
2245
2246 /**
2247  * intel_display_power_put_async - release a power domain reference asynchronously
2248  * @i915: i915 device instance
2249  * @domain: power domain to reference
2250  * @wakeref: wakeref acquired for the reference that is being released
2251  *
2252  * This function drops the power domain reference obtained by
2253  * intel_display_power_get*() and schedules a work to power down the
2254  * corresponding hardware block if this is the last reference.
2255  */
2256 void __intel_display_power_put_async(struct drm_i915_private *i915,
2257                                      enum intel_display_power_domain domain,
2258                                      intel_wakeref_t wakeref)
2259 {
2260         struct i915_power_domains *power_domains = &i915->power_domains;
2261         intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915);
2262
2263         mutex_lock(&power_domains->lock);
2264
2265         if (power_domains->domain_use_count[domain] > 1) {
2266                 __intel_display_power_put_domain(i915, domain);
2267
2268                 goto out_verify;
2269         }
2270
2271         WARN_ON(power_domains->domain_use_count[domain] != 1);
2272
2273         /* Let a pending work requeue itself or queue a new one. */
2274         if (power_domains->async_put_wakeref) {
2275                 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2276         } else {
2277                 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2278                 queue_async_put_domains_work(power_domains,
2279                                              fetch_and_zero(&work_wakeref));
2280         }
2281
2282 out_verify:
2283         verify_async_put_domains_state(power_domains);
2284
2285         mutex_unlock(&power_domains->lock);
2286
2287         if (work_wakeref)
2288                 intel_runtime_pm_put_raw(i915, work_wakeref);
2289
2290         intel_runtime_pm_put(i915, wakeref);
2291 }
2292
2293 /**
2294  * intel_display_power_flush_work - flushes the async display power disabling work
2295  * @i915: i915 device instance
2296  *
2297  * Flushes any pending work that was scheduled by a preceding
2298  * intel_display_power_put_async() call, completing the disabling of the
2299  * corresponding power domains.
2300  *
2301  * Note that the work handler function may still be running after this
2302  * function returns; to ensure that the work handler isn't running use
2303  * intel_display_power_flush_work_sync() instead.
2304  */
2305 void intel_display_power_flush_work(struct drm_i915_private *i915)
2306 {
2307         struct i915_power_domains *power_domains = &i915->power_domains;
2308         intel_wakeref_t work_wakeref;
2309
2310         mutex_lock(&power_domains->lock);
2311
2312         work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2313         if (!work_wakeref)
2314                 goto out_verify;
2315
2316         release_async_put_domains(power_domains,
2317                                   async_put_domains_mask(power_domains));
2318         cancel_delayed_work(&power_domains->async_put_work);
2319
2320 out_verify:
2321         verify_async_put_domains_state(power_domains);
2322
2323         mutex_unlock(&power_domains->lock);
2324
2325         if (work_wakeref)
2326                 intel_runtime_pm_put_raw(i915, work_wakeref);
2327 }
2328
2329 /**
2330  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2331  * @i915: i915 device instance
2332  *
2333  * Like intel_display_power_flush_work(), but also ensure that the work
2334  * handler function is not running any more when this function returns.
2335  */
2336 static void
2337 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2338 {
2339         struct i915_power_domains *power_domains = &i915->power_domains;
2340
2341         intel_display_power_flush_work(i915);
2342         cancel_delayed_work_sync(&power_domains->async_put_work);
2343
2344         verify_async_put_domains_state(power_domains);
2345
2346         WARN_ON(power_domains->async_put_wakeref);
2347 }
2348
2349 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2350 /**
2351  * intel_display_power_put - release a power domain reference
2352  * @dev_priv: i915 device instance
2353  * @domain: power domain to reference
2354  * @wakeref: wakeref acquired for the reference that is being released
2355  *
2356  * This function drops the power domain reference obtained by
2357  * intel_display_power_get() and might power down the corresponding hardware
2358  * block right away if this is the last reference.
2359  */
2360 void intel_display_power_put(struct drm_i915_private *dev_priv,
2361                              enum intel_display_power_domain domain,
2362                              intel_wakeref_t wakeref)
2363 {
2364         __intel_display_power_put(dev_priv, domain);
2365         intel_runtime_pm_put(dev_priv, wakeref);
2366 }
2367 #endif
2368
2369 #define I830_PIPES_POWER_DOMAINS (              \
2370         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2371         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2372         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2373         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2374         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2375         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2376         BIT_ULL(POWER_DOMAIN_INIT))
2377
2378 #define VLV_DISPLAY_POWER_DOMAINS (             \
2379         BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
2380         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2381         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2382         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2383         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2384         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2385         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2386         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2387         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2388         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2389         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
2390         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2391         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2392         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2393         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2394         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2395         BIT_ULL(POWER_DOMAIN_INIT))
2396
2397 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
2398         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2399         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2400         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
2401         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2402         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2403         BIT_ULL(POWER_DOMAIN_INIT))
2404
2405 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
2406         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2407         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2408         BIT_ULL(POWER_DOMAIN_INIT))
2409
2410 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
2411         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2412         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2413         BIT_ULL(POWER_DOMAIN_INIT))
2414
2415 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
2416         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2417         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2418         BIT_ULL(POWER_DOMAIN_INIT))
2419
2420 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
2421         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2422         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2423         BIT_ULL(POWER_DOMAIN_INIT))
2424
2425 #define CHV_DISPLAY_POWER_DOMAINS (             \
2426         BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
2427         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2428         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2429         BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
2430         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2431         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2432         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2433         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2434         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2435         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
2436         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2437         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2438         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2439         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2440         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2441         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2442         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2443         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2444         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2445         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2446         BIT_ULL(POWER_DOMAIN_INIT))
2447
2448 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
2449         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2450         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2451         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2452         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2453         BIT_ULL(POWER_DOMAIN_INIT))
2454
2455 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
2456         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2457         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2458         BIT_ULL(POWER_DOMAIN_INIT))
2459
2460 #define HSW_DISPLAY_POWER_DOMAINS (                     \
2461         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2462         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2463         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
2464         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2465         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2466         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2467         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2468         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2469         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2470         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2471         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2472         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2473         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2474         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2475         BIT_ULL(POWER_DOMAIN_INIT))
2476
2477 #define BDW_DISPLAY_POWER_DOMAINS (                     \
2478         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2479         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2480         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2481         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2482         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2483         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2484         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2485         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2486         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2487         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2488         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2489         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2490         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2491         BIT_ULL(POWER_DOMAIN_INIT))
2492
2493 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2494         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2495         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2496         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2497         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2498         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2499         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2500         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2501         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2502         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2503         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2504         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
2505         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2506         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2507         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2508         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2509         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2510         BIT_ULL(POWER_DOMAIN_INIT))
2511 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (          \
2512         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2513         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
2514         BIT_ULL(POWER_DOMAIN_INIT))
2515 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2516         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2517         BIT_ULL(POWER_DOMAIN_INIT))
2518 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2519         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2520         BIT_ULL(POWER_DOMAIN_INIT))
2521 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (            \
2522         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2523         BIT_ULL(POWER_DOMAIN_INIT))
2524 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2525         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2526         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2527         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2528         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2529         BIT_ULL(POWER_DOMAIN_INIT))
2530
2531 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2532         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2533         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2534         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2535         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2536         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2537         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2538         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2539         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2540         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2541         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2542         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2543         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2544         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2545         BIT_ULL(POWER_DOMAIN_INIT))
2546 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2547         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2548         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2549         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2550         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2551         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2552         BIT_ULL(POWER_DOMAIN_INIT))
2553 #define BXT_DPIO_CMN_A_POWER_DOMAINS (                  \
2554         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2555         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2556         BIT_ULL(POWER_DOMAIN_INIT))
2557 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (                 \
2558         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2559         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2560         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2561         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2562         BIT_ULL(POWER_DOMAIN_INIT))
2563
2564 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2565         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2566         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2567         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2568         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2569         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2570         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2571         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2572         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2573         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2574         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2575         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2576         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2577         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2578         BIT_ULL(POWER_DOMAIN_INIT))
2579 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (            \
2580         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2581 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2582         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2583 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2584         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2585 #define GLK_DPIO_CMN_A_POWER_DOMAINS (                  \
2586         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2587         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2588         BIT_ULL(POWER_DOMAIN_INIT))
2589 #define GLK_DPIO_CMN_B_POWER_DOMAINS (                  \
2590         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2591         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2592         BIT_ULL(POWER_DOMAIN_INIT))
2593 #define GLK_DPIO_CMN_C_POWER_DOMAINS (                  \
2594         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2595         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2596         BIT_ULL(POWER_DOMAIN_INIT))
2597 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (               \
2598         BIT_ULL(POWER_DOMAIN_AUX_A) |           \
2599         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2600         BIT_ULL(POWER_DOMAIN_INIT))
2601 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (               \
2602         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2603         BIT_ULL(POWER_DOMAIN_INIT))
2604 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (               \
2605         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2606         BIT_ULL(POWER_DOMAIN_INIT))
2607 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2608         GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2609         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2610         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2611         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2612         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2613         BIT_ULL(POWER_DOMAIN_INIT))
2614
2615 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2616         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2617         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2618         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2619         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2620         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2621         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2622         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2623         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2624         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2625         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2626         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
2627         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2628         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2629         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2630         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2631         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2632         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2633         BIT_ULL(POWER_DOMAIN_INIT))
2634 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (            \
2635         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2636         BIT_ULL(POWER_DOMAIN_INIT))
2637 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (            \
2638         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2639         BIT_ULL(POWER_DOMAIN_INIT))
2640 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (            \
2641         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2642         BIT_ULL(POWER_DOMAIN_INIT))
2643 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (            \
2644         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2645         BIT_ULL(POWER_DOMAIN_INIT))
2646 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (               \
2647         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2648         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2649         BIT_ULL(POWER_DOMAIN_INIT))
2650 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (               \
2651         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2652         BIT_ULL(POWER_DOMAIN_INIT))
2653 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (               \
2654         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2655         BIT_ULL(POWER_DOMAIN_INIT))
2656 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (               \
2657         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2658         BIT_ULL(POWER_DOMAIN_INIT))
2659 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (               \
2660         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2661         BIT_ULL(POWER_DOMAIN_INIT))
2662 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (            \
2663         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
2664         BIT_ULL(POWER_DOMAIN_INIT))
2665 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2666         CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2667         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2668         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2669         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2670         BIT_ULL(POWER_DOMAIN_INIT))
2671
2672 /*
2673  * ICL PW_0/PG_0 domains (HW/DMC control):
2674  * - PCI
2675  * - clocks except port PLL
2676  * - central power except FBC
2677  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2678  * ICL PW_1/PG_1 domains (HW/DMC control):
2679  * - DBUF function
2680  * - PIPE_A and its planes, except VGA
2681  * - transcoder EDP + PSR
2682  * - transcoder DSI
2683  * - DDI_A
2684  * - FBC
2685  */
2686 #define ICL_PW_4_POWER_DOMAINS (                        \
2687         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2688         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2689         BIT_ULL(POWER_DOMAIN_INIT))
2690         /* VDSC/joining */
2691 #define ICL_PW_3_POWER_DOMAINS (                        \
2692         ICL_PW_4_POWER_DOMAINS |                        \
2693         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2694         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2695         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2696         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2697         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2698         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2699         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2700         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2701         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2702         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2703         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2704         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
2705         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
2706         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
2707         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
2708         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2709         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2710         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2711         BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
2712         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2713         BIT_ULL(POWER_DOMAIN_AUX_TBT1) |                \
2714         BIT_ULL(POWER_DOMAIN_AUX_TBT2) |                \
2715         BIT_ULL(POWER_DOMAIN_AUX_TBT3) |                \
2716         BIT_ULL(POWER_DOMAIN_AUX_TBT4) |                \
2717         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2718         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2719         BIT_ULL(POWER_DOMAIN_INIT))
2720         /*
2721          * - transcoder WD
2722          * - KVMR (HW control)
2723          */
2724 #define ICL_PW_2_POWER_DOMAINS (                        \
2725         ICL_PW_3_POWER_DOMAINS |                        \
2726         BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |             \
2727         BIT_ULL(POWER_DOMAIN_INIT))
2728         /*
2729          * - KVMR (HW control)
2730          */
2731 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2732         ICL_PW_2_POWER_DOMAINS |                        \
2733         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2734         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2735         BIT_ULL(POWER_DOMAIN_INIT))
2736
2737 #define ICL_DDI_IO_A_POWER_DOMAINS (                    \
2738         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2739 #define ICL_DDI_IO_B_POWER_DOMAINS (                    \
2740         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2741 #define ICL_DDI_IO_C_POWER_DOMAINS (                    \
2742         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2743 #define ICL_DDI_IO_D_POWER_DOMAINS (                    \
2744         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2745 #define ICL_DDI_IO_E_POWER_DOMAINS (                    \
2746         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2747 #define ICL_DDI_IO_F_POWER_DOMAINS (                    \
2748         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2749
2750 #define ICL_AUX_A_IO_POWER_DOMAINS (                    \
2751         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2752         BIT_ULL(POWER_DOMAIN_AUX_A))
2753 #define ICL_AUX_B_IO_POWER_DOMAINS (                    \
2754         BIT_ULL(POWER_DOMAIN_AUX_B))
2755 #define ICL_AUX_C_IO_POWER_DOMAINS (                    \
2756         BIT_ULL(POWER_DOMAIN_AUX_C))
2757 #define ICL_AUX_D_IO_POWER_DOMAINS (                    \
2758         BIT_ULL(POWER_DOMAIN_AUX_D))
2759 #define ICL_AUX_E_IO_POWER_DOMAINS (                    \
2760         BIT_ULL(POWER_DOMAIN_AUX_E))
2761 #define ICL_AUX_F_IO_POWER_DOMAINS (                    \
2762         BIT_ULL(POWER_DOMAIN_AUX_F))
2763 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (                 \
2764         BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2765 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (                 \
2766         BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2767 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (                 \
2768         BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2769 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (                 \
2770         BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2771
2772 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2773         .sync_hw = i9xx_power_well_sync_hw_noop,
2774         .enable = i9xx_always_on_power_well_noop,
2775         .disable = i9xx_always_on_power_well_noop,
2776         .is_enabled = i9xx_always_on_power_well_enabled,
2777 };
2778
2779 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2780         .sync_hw = i9xx_power_well_sync_hw_noop,
2781         .enable = chv_pipe_power_well_enable,
2782         .disable = chv_pipe_power_well_disable,
2783         .is_enabled = chv_pipe_power_well_enabled,
2784 };
2785
2786 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2787         .sync_hw = i9xx_power_well_sync_hw_noop,
2788         .enable = chv_dpio_cmn_power_well_enable,
2789         .disable = chv_dpio_cmn_power_well_disable,
2790         .is_enabled = vlv_power_well_enabled,
2791 };
2792
2793 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2794         {
2795                 .name = "always-on",
2796                 .always_on = true,
2797                 .domains = POWER_DOMAIN_MASK,
2798                 .ops = &i9xx_always_on_power_well_ops,
2799                 .id = DISP_PW_ID_NONE,
2800         },
2801 };
2802
2803 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2804         .sync_hw = i830_pipes_power_well_sync_hw,
2805         .enable = i830_pipes_power_well_enable,
2806         .disable = i830_pipes_power_well_disable,
2807         .is_enabled = i830_pipes_power_well_enabled,
2808 };
2809
2810 static const struct i915_power_well_desc i830_power_wells[] = {
2811         {
2812                 .name = "always-on",
2813                 .always_on = true,
2814                 .domains = POWER_DOMAIN_MASK,
2815                 .ops = &i9xx_always_on_power_well_ops,
2816                 .id = DISP_PW_ID_NONE,
2817         },
2818         {
2819                 .name = "pipes",
2820                 .domains = I830_PIPES_POWER_DOMAINS,
2821                 .ops = &i830_pipes_power_well_ops,
2822                 .id = DISP_PW_ID_NONE,
2823         },
2824 };
2825
2826 static const struct i915_power_well_ops hsw_power_well_ops = {
2827         .sync_hw = hsw_power_well_sync_hw,
2828         .enable = hsw_power_well_enable,
2829         .disable = hsw_power_well_disable,
2830         .is_enabled = hsw_power_well_enabled,
2831 };
2832
2833 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2834         .sync_hw = i9xx_power_well_sync_hw_noop,
2835         .enable = gen9_dc_off_power_well_enable,
2836         .disable = gen9_dc_off_power_well_disable,
2837         .is_enabled = gen9_dc_off_power_well_enabled,
2838 };
2839
2840 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2841         .sync_hw = i9xx_power_well_sync_hw_noop,
2842         .enable = bxt_dpio_cmn_power_well_enable,
2843         .disable = bxt_dpio_cmn_power_well_disable,
2844         .is_enabled = bxt_dpio_cmn_power_well_enabled,
2845 };
2846
2847 static const struct i915_power_well_regs hsw_power_well_regs = {
2848         .bios   = HSW_PWR_WELL_CTL1,
2849         .driver = HSW_PWR_WELL_CTL2,
2850         .kvmr   = HSW_PWR_WELL_CTL3,
2851         .debug  = HSW_PWR_WELL_CTL4,
2852 };
2853
2854 static const struct i915_power_well_desc hsw_power_wells[] = {
2855         {
2856                 .name = "always-on",
2857                 .always_on = true,
2858                 .domains = POWER_DOMAIN_MASK,
2859                 .ops = &i9xx_always_on_power_well_ops,
2860                 .id = DISP_PW_ID_NONE,
2861         },
2862         {
2863                 .name = "display",
2864                 .domains = HSW_DISPLAY_POWER_DOMAINS,
2865                 .ops = &hsw_power_well_ops,
2866                 .id = HSW_DISP_PW_GLOBAL,
2867                 {
2868                         .hsw.regs = &hsw_power_well_regs,
2869                         .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2870                         .hsw.has_vga = true,
2871                 },
2872         },
2873 };
2874
2875 static const struct i915_power_well_desc bdw_power_wells[] = {
2876         {
2877                 .name = "always-on",
2878                 .always_on = true,
2879                 .domains = POWER_DOMAIN_MASK,
2880                 .ops = &i9xx_always_on_power_well_ops,
2881                 .id = DISP_PW_ID_NONE,
2882         },
2883         {
2884                 .name = "display",
2885                 .domains = BDW_DISPLAY_POWER_DOMAINS,
2886                 .ops = &hsw_power_well_ops,
2887                 .id = HSW_DISP_PW_GLOBAL,
2888                 {
2889                         .hsw.regs = &hsw_power_well_regs,
2890                         .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2891                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2892                         .hsw.has_vga = true,
2893                 },
2894         },
2895 };
2896
2897 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2898         .sync_hw = i9xx_power_well_sync_hw_noop,
2899         .enable = vlv_display_power_well_enable,
2900         .disable = vlv_display_power_well_disable,
2901         .is_enabled = vlv_power_well_enabled,
2902 };
2903
2904 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2905         .sync_hw = i9xx_power_well_sync_hw_noop,
2906         .enable = vlv_dpio_cmn_power_well_enable,
2907         .disable = vlv_dpio_cmn_power_well_disable,
2908         .is_enabled = vlv_power_well_enabled,
2909 };
2910
2911 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2912         .sync_hw = i9xx_power_well_sync_hw_noop,
2913         .enable = vlv_power_well_enable,
2914         .disable = vlv_power_well_disable,
2915         .is_enabled = vlv_power_well_enabled,
2916 };
2917
2918 static const struct i915_power_well_desc vlv_power_wells[] = {
2919         {
2920                 .name = "always-on",
2921                 .always_on = true,
2922                 .domains = POWER_DOMAIN_MASK,
2923                 .ops = &i9xx_always_on_power_well_ops,
2924                 .id = DISP_PW_ID_NONE,
2925         },
2926         {
2927                 .name = "display",
2928                 .domains = VLV_DISPLAY_POWER_DOMAINS,
2929                 .ops = &vlv_display_power_well_ops,
2930                 .id = VLV_DISP_PW_DISP2D,
2931                 {
2932                         .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2933                 },
2934         },
2935         {
2936                 .name = "dpio-tx-b-01",
2937                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2938                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2939                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2940                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2941                 .ops = &vlv_dpio_power_well_ops,
2942                 .id = DISP_PW_ID_NONE,
2943                 {
2944                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2945                 },
2946         },
2947         {
2948                 .name = "dpio-tx-b-23",
2949                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2950                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2951                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2952                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2953                 .ops = &vlv_dpio_power_well_ops,
2954                 .id = DISP_PW_ID_NONE,
2955                 {
2956                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2957                 },
2958         },
2959         {
2960                 .name = "dpio-tx-c-01",
2961                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2962                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2963                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2964                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2965                 .ops = &vlv_dpio_power_well_ops,
2966                 .id = DISP_PW_ID_NONE,
2967                 {
2968                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2969                 },
2970         },
2971         {
2972                 .name = "dpio-tx-c-23",
2973                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2974                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2975                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2976                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2977                 .ops = &vlv_dpio_power_well_ops,
2978                 .id = DISP_PW_ID_NONE,
2979                 {
2980                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2981                 },
2982         },
2983         {
2984                 .name = "dpio-common",
2985                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2986                 .ops = &vlv_dpio_cmn_power_well_ops,
2987                 .id = VLV_DISP_PW_DPIO_CMN_BC,
2988                 {
2989                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2990                 },
2991         },
2992 };
2993
2994 static const struct i915_power_well_desc chv_power_wells[] = {
2995         {
2996                 .name = "always-on",
2997                 .always_on = true,
2998                 .domains = POWER_DOMAIN_MASK,
2999                 .ops = &i9xx_always_on_power_well_ops,
3000                 .id = DISP_PW_ID_NONE,
3001         },
3002         {
3003                 .name = "display",
3004                 /*
3005                  * Pipe A power well is the new disp2d well. Pipe B and C
3006                  * power wells don't actually exist. Pipe A power well is
3007                  * required for any pipe to work.
3008                  */
3009                 .domains = CHV_DISPLAY_POWER_DOMAINS,
3010                 .ops = &chv_pipe_power_well_ops,
3011                 .id = DISP_PW_ID_NONE,
3012         },
3013         {
3014                 .name = "dpio-common-bc",
3015                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3016                 .ops = &chv_dpio_cmn_power_well_ops,
3017                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3018                 {
3019                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3020                 },
3021         },
3022         {
3023                 .name = "dpio-common-d",
3024                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3025                 .ops = &chv_dpio_cmn_power_well_ops,
3026                 .id = CHV_DISP_PW_DPIO_CMN_D,
3027                 {
3028                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3029                 },
3030         },
3031 };
3032
3033 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3034                                          enum i915_power_well_id power_well_id)
3035 {
3036         struct i915_power_well *power_well;
3037         bool ret;
3038
3039         power_well = lookup_power_well(dev_priv, power_well_id);
3040         ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3041
3042         return ret;
3043 }
3044
3045 static const struct i915_power_well_desc skl_power_wells[] = {
3046         {
3047                 .name = "always-on",
3048                 .always_on = true,
3049                 .domains = POWER_DOMAIN_MASK,
3050                 .ops = &i9xx_always_on_power_well_ops,
3051                 .id = DISP_PW_ID_NONE,
3052         },
3053         {
3054                 .name = "power well 1",
3055                 /* Handled by the DMC firmware */
3056                 .always_on = true,
3057                 .domains = 0,
3058                 .ops = &hsw_power_well_ops,
3059                 .id = SKL_DISP_PW_1,
3060                 {
3061                         .hsw.regs = &hsw_power_well_regs,
3062                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3063                         .hsw.has_fuses = true,
3064                 },
3065         },
3066         {
3067                 .name = "MISC IO power well",
3068                 /* Handled by the DMC firmware */
3069                 .always_on = true,
3070                 .domains = 0,
3071                 .ops = &hsw_power_well_ops,
3072                 .id = SKL_DISP_PW_MISC_IO,
3073                 {
3074                         .hsw.regs = &hsw_power_well_regs,
3075                         .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3076                 },
3077         },
3078         {
3079                 .name = "DC off",
3080                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3081                 .ops = &gen9_dc_off_power_well_ops,
3082                 .id = DISP_PW_ID_NONE,
3083         },
3084         {
3085                 .name = "power well 2",
3086                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3087                 .ops = &hsw_power_well_ops,
3088                 .id = SKL_DISP_PW_2,
3089                 {
3090                         .hsw.regs = &hsw_power_well_regs,
3091                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3092                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3093                         .hsw.has_vga = true,
3094                         .hsw.has_fuses = true,
3095                 },
3096         },
3097         {
3098                 .name = "DDI A/E IO power well",
3099                 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3100                 .ops = &hsw_power_well_ops,
3101                 .id = DISP_PW_ID_NONE,
3102                 {
3103                         .hsw.regs = &hsw_power_well_regs,
3104                         .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3105                 },
3106         },
3107         {
3108                 .name = "DDI B IO power well",
3109                 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3110                 .ops = &hsw_power_well_ops,
3111                 .id = DISP_PW_ID_NONE,
3112                 {
3113                         .hsw.regs = &hsw_power_well_regs,
3114                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3115                 },
3116         },
3117         {
3118                 .name = "DDI C IO power well",
3119                 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3120                 .ops = &hsw_power_well_ops,
3121                 .id = DISP_PW_ID_NONE,
3122                 {
3123                         .hsw.regs = &hsw_power_well_regs,
3124                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3125                 },
3126         },
3127         {
3128                 .name = "DDI D IO power well",
3129                 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3130                 .ops = &hsw_power_well_ops,
3131                 .id = DISP_PW_ID_NONE,
3132                 {
3133                         .hsw.regs = &hsw_power_well_regs,
3134                         .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3135                 },
3136         },
3137 };
3138
3139 static const struct i915_power_well_desc bxt_power_wells[] = {
3140         {
3141                 .name = "always-on",
3142                 .always_on = true,
3143                 .domains = POWER_DOMAIN_MASK,
3144                 .ops = &i9xx_always_on_power_well_ops,
3145                 .id = DISP_PW_ID_NONE,
3146         },
3147         {
3148                 .name = "power well 1",
3149                 /* Handled by the DMC firmware */
3150                 .always_on = true,
3151                 .domains = 0,
3152                 .ops = &hsw_power_well_ops,
3153                 .id = SKL_DISP_PW_1,
3154                 {
3155                         .hsw.regs = &hsw_power_well_regs,
3156                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3157                         .hsw.has_fuses = true,
3158                 },
3159         },
3160         {
3161                 .name = "DC off",
3162                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3163                 .ops = &gen9_dc_off_power_well_ops,
3164                 .id = DISP_PW_ID_NONE,
3165         },
3166         {
3167                 .name = "power well 2",
3168                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3169                 .ops = &hsw_power_well_ops,
3170                 .id = SKL_DISP_PW_2,
3171                 {
3172                         .hsw.regs = &hsw_power_well_regs,
3173                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3174                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3175                         .hsw.has_vga = true,
3176                         .hsw.has_fuses = true,
3177                 },
3178         },
3179         {
3180                 .name = "dpio-common-a",
3181                 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3182                 .ops = &bxt_dpio_cmn_power_well_ops,
3183                 .id = BXT_DISP_PW_DPIO_CMN_A,
3184                 {
3185                         .bxt.phy = DPIO_PHY1,
3186                 },
3187         },
3188         {
3189                 .name = "dpio-common-bc",
3190                 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3191                 .ops = &bxt_dpio_cmn_power_well_ops,
3192                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3193                 {
3194                         .bxt.phy = DPIO_PHY0,
3195                 },
3196         },
3197 };
3198
3199 static const struct i915_power_well_desc glk_power_wells[] = {
3200         {
3201                 .name = "always-on",
3202                 .always_on = true,
3203                 .domains = POWER_DOMAIN_MASK,
3204                 .ops = &i9xx_always_on_power_well_ops,
3205                 .id = DISP_PW_ID_NONE,
3206         },
3207         {
3208                 .name = "power well 1",
3209                 /* Handled by the DMC firmware */
3210                 .always_on = true,
3211                 .domains = 0,
3212                 .ops = &hsw_power_well_ops,
3213                 .id = SKL_DISP_PW_1,
3214                 {
3215                         .hsw.regs = &hsw_power_well_regs,
3216                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3217                         .hsw.has_fuses = true,
3218                 },
3219         },
3220         {
3221                 .name = "DC off",
3222                 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3223                 .ops = &gen9_dc_off_power_well_ops,
3224                 .id = DISP_PW_ID_NONE,
3225         },
3226         {
3227                 .name = "power well 2",
3228                 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3229                 .ops = &hsw_power_well_ops,
3230                 .id = SKL_DISP_PW_2,
3231                 {
3232                         .hsw.regs = &hsw_power_well_regs,
3233                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3234                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3235                         .hsw.has_vga = true,
3236                         .hsw.has_fuses = true,
3237                 },
3238         },
3239         {
3240                 .name = "dpio-common-a",
3241                 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3242                 .ops = &bxt_dpio_cmn_power_well_ops,
3243                 .id = BXT_DISP_PW_DPIO_CMN_A,
3244                 {
3245                         .bxt.phy = DPIO_PHY1,
3246                 },
3247         },
3248         {
3249                 .name = "dpio-common-b",
3250                 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3251                 .ops = &bxt_dpio_cmn_power_well_ops,
3252                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3253                 {
3254                         .bxt.phy = DPIO_PHY0,
3255                 },
3256         },
3257         {
3258                 .name = "dpio-common-c",
3259                 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3260                 .ops = &bxt_dpio_cmn_power_well_ops,
3261                 .id = GLK_DISP_PW_DPIO_CMN_C,
3262                 {
3263                         .bxt.phy = DPIO_PHY2,
3264                 },
3265         },
3266         {
3267                 .name = "AUX A",
3268                 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3269                 .ops = &hsw_power_well_ops,
3270                 .id = DISP_PW_ID_NONE,
3271                 {
3272                         .hsw.regs = &hsw_power_well_regs,
3273                         .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3274                 },
3275         },
3276         {
3277                 .name = "AUX B",
3278                 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3279                 .ops = &hsw_power_well_ops,
3280                 .id = DISP_PW_ID_NONE,
3281                 {
3282                         .hsw.regs = &hsw_power_well_regs,
3283                         .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3284                 },
3285         },
3286         {
3287                 .name = "AUX C",
3288                 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3289                 .ops = &hsw_power_well_ops,
3290                 .id = DISP_PW_ID_NONE,
3291                 {
3292                         .hsw.regs = &hsw_power_well_regs,
3293                         .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3294                 },
3295         },
3296         {
3297                 .name = "DDI A IO power well",
3298                 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3299                 .ops = &hsw_power_well_ops,
3300                 .id = DISP_PW_ID_NONE,
3301                 {
3302                         .hsw.regs = &hsw_power_well_regs,
3303                         .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3304                 },
3305         },
3306         {
3307                 .name = "DDI B IO power well",
3308                 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3309                 .ops = &hsw_power_well_ops,
3310                 .id = DISP_PW_ID_NONE,
3311                 {
3312                         .hsw.regs = &hsw_power_well_regs,
3313                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3314                 },
3315         },
3316         {
3317                 .name = "DDI C IO power well",
3318                 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3319                 .ops = &hsw_power_well_ops,
3320                 .id = DISP_PW_ID_NONE,
3321                 {
3322                         .hsw.regs = &hsw_power_well_regs,
3323                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3324                 },
3325         },
3326 };
3327
3328 static const struct i915_power_well_desc cnl_power_wells[] = {
3329         {
3330                 .name = "always-on",
3331                 .always_on = true,
3332                 .domains = POWER_DOMAIN_MASK,
3333                 .ops = &i9xx_always_on_power_well_ops,
3334                 .id = DISP_PW_ID_NONE,
3335         },
3336         {
3337                 .name = "power well 1",
3338                 /* Handled by the DMC firmware */
3339                 .always_on = true,
3340                 .domains = 0,
3341                 .ops = &hsw_power_well_ops,
3342                 .id = SKL_DISP_PW_1,
3343                 {
3344                         .hsw.regs = &hsw_power_well_regs,
3345                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3346                         .hsw.has_fuses = true,
3347                 },
3348         },
3349         {
3350                 .name = "AUX A",
3351                 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3352                 .ops = &hsw_power_well_ops,
3353                 .id = DISP_PW_ID_NONE,
3354                 {
3355                         .hsw.regs = &hsw_power_well_regs,
3356                         .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3357                 },
3358         },
3359         {
3360                 .name = "AUX B",
3361                 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3362                 .ops = &hsw_power_well_ops,
3363                 .id = DISP_PW_ID_NONE,
3364                 {
3365                         .hsw.regs = &hsw_power_well_regs,
3366                         .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3367                 },
3368         },
3369         {
3370                 .name = "AUX C",
3371                 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3372                 .ops = &hsw_power_well_ops,
3373                 .id = DISP_PW_ID_NONE,
3374                 {
3375                         .hsw.regs = &hsw_power_well_regs,
3376                         .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3377                 },
3378         },
3379         {
3380                 .name = "AUX D",
3381                 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3382                 .ops = &hsw_power_well_ops,
3383                 .id = DISP_PW_ID_NONE,
3384                 {
3385                         .hsw.regs = &hsw_power_well_regs,
3386                         .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3387                 },
3388         },
3389         {
3390                 .name = "DC off",
3391                 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3392                 .ops = &gen9_dc_off_power_well_ops,
3393                 .id = DISP_PW_ID_NONE,
3394         },
3395         {
3396                 .name = "power well 2",
3397                 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3398                 .ops = &hsw_power_well_ops,
3399                 .id = SKL_DISP_PW_2,
3400                 {
3401                         .hsw.regs = &hsw_power_well_regs,
3402                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3403                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3404                         .hsw.has_vga = true,
3405                         .hsw.has_fuses = true,
3406                 },
3407         },
3408         {
3409                 .name = "DDI A IO power well",
3410                 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3411                 .ops = &hsw_power_well_ops,
3412                 .id = DISP_PW_ID_NONE,
3413                 {
3414                         .hsw.regs = &hsw_power_well_regs,
3415                         .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3416                 },
3417         },
3418         {
3419                 .name = "DDI B IO power well",
3420                 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3421                 .ops = &hsw_power_well_ops,
3422                 .id = DISP_PW_ID_NONE,
3423                 {
3424                         .hsw.regs = &hsw_power_well_regs,
3425                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3426                 },
3427         },
3428         {
3429                 .name = "DDI C IO power well",
3430                 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3431                 .ops = &hsw_power_well_ops,
3432                 .id = DISP_PW_ID_NONE,
3433                 {
3434                         .hsw.regs = &hsw_power_well_regs,
3435                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3436                 },
3437         },
3438         {
3439                 .name = "DDI D IO power well",
3440                 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3441                 .ops = &hsw_power_well_ops,
3442                 .id = DISP_PW_ID_NONE,
3443                 {
3444                         .hsw.regs = &hsw_power_well_regs,
3445                         .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3446                 },
3447         },
3448         {
3449                 .name = "DDI F IO power well",
3450                 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3451                 .ops = &hsw_power_well_ops,
3452                 .id = DISP_PW_ID_NONE,
3453                 {
3454                         .hsw.regs = &hsw_power_well_regs,
3455                         .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3456                 },
3457         },
3458         {
3459                 .name = "AUX F",
3460                 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3461                 .ops = &hsw_power_well_ops,
3462                 .id = DISP_PW_ID_NONE,
3463                 {
3464                         .hsw.regs = &hsw_power_well_regs,
3465                         .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3466                 },
3467         },
3468 };
3469
3470 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3471         .sync_hw = hsw_power_well_sync_hw,
3472         .enable = icl_combo_phy_aux_power_well_enable,
3473         .disable = icl_combo_phy_aux_power_well_disable,
3474         .is_enabled = hsw_power_well_enabled,
3475 };
3476
3477 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3478         .sync_hw = hsw_power_well_sync_hw,
3479         .enable = icl_tc_phy_aux_power_well_enable,
3480         .disable = hsw_power_well_disable,
3481         .is_enabled = hsw_power_well_enabled,
3482 };
3483
3484 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3485         .bios   = ICL_PWR_WELL_CTL_AUX1,
3486         .driver = ICL_PWR_WELL_CTL_AUX2,
3487         .debug  = ICL_PWR_WELL_CTL_AUX4,
3488 };
3489
3490 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3491         .bios   = ICL_PWR_WELL_CTL_DDI1,
3492         .driver = ICL_PWR_WELL_CTL_DDI2,
3493         .debug  = ICL_PWR_WELL_CTL_DDI4,
3494 };
3495
3496 static const struct i915_power_well_desc icl_power_wells[] = {
3497         {
3498                 .name = "always-on",
3499                 .always_on = true,
3500                 .domains = POWER_DOMAIN_MASK,
3501                 .ops = &i9xx_always_on_power_well_ops,
3502                 .id = DISP_PW_ID_NONE,
3503         },
3504         {
3505                 .name = "power well 1",
3506                 /* Handled by the DMC firmware */
3507                 .always_on = true,
3508                 .domains = 0,
3509                 .ops = &hsw_power_well_ops,
3510                 .id = SKL_DISP_PW_1,
3511                 {
3512                         .hsw.regs = &hsw_power_well_regs,
3513                         .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3514                         .hsw.has_fuses = true,
3515                 },
3516         },
3517         {
3518                 .name = "DC off",
3519                 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3520                 .ops = &gen9_dc_off_power_well_ops,
3521                 .id = DISP_PW_ID_NONE,
3522         },
3523         {
3524                 .name = "power well 2",
3525                 .domains = ICL_PW_2_POWER_DOMAINS,
3526                 .ops = &hsw_power_well_ops,
3527                 .id = SKL_DISP_PW_2,
3528                 {
3529                         .hsw.regs = &hsw_power_well_regs,
3530                         .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3531                         .hsw.has_fuses = true,
3532                 },
3533         },
3534         {
3535                 .name = "power well 3",
3536                 .domains = ICL_PW_3_POWER_DOMAINS,
3537                 .ops = &hsw_power_well_ops,
3538                 .id = DISP_PW_ID_NONE,
3539                 {
3540                         .hsw.regs = &hsw_power_well_regs,
3541                         .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3542                         .hsw.irq_pipe_mask = BIT(PIPE_B),
3543                         .hsw.has_vga = true,
3544                         .hsw.has_fuses = true,
3545                 },
3546         },
3547         {
3548                 .name = "DDI A IO",
3549                 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3550                 .ops = &hsw_power_well_ops,
3551                 .id = DISP_PW_ID_NONE,
3552                 {
3553                         .hsw.regs = &icl_ddi_power_well_regs,
3554                         .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3555                 },
3556         },
3557         {
3558                 .name = "DDI B IO",
3559                 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3560                 .ops = &hsw_power_well_ops,
3561                 .id = DISP_PW_ID_NONE,
3562                 {
3563                         .hsw.regs = &icl_ddi_power_well_regs,
3564                         .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3565                 },
3566         },
3567         {
3568                 .name = "DDI C IO",
3569                 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3570                 .ops = &hsw_power_well_ops,
3571                 .id = DISP_PW_ID_NONE,
3572                 {
3573                         .hsw.regs = &icl_ddi_power_well_regs,
3574                         .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3575                 },
3576         },
3577         {
3578                 .name = "DDI D IO",
3579                 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3580                 .ops = &hsw_power_well_ops,
3581                 .id = DISP_PW_ID_NONE,
3582                 {
3583                         .hsw.regs = &icl_ddi_power_well_regs,
3584                         .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3585                 },
3586         },
3587         {
3588                 .name = "DDI E IO",
3589                 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3590                 .ops = &hsw_power_well_ops,
3591                 .id = DISP_PW_ID_NONE,
3592                 {
3593                         .hsw.regs = &icl_ddi_power_well_regs,
3594                         .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3595                 },
3596         },
3597         {
3598                 .name = "DDI F IO",
3599                 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3600                 .ops = &hsw_power_well_ops,
3601                 .id = DISP_PW_ID_NONE,
3602                 {
3603                         .hsw.regs = &icl_ddi_power_well_regs,
3604                         .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3605                 },
3606         },
3607         {
3608                 .name = "AUX A",
3609                 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3610                 .ops = &icl_combo_phy_aux_power_well_ops,
3611                 .id = DISP_PW_ID_NONE,
3612                 {
3613                         .hsw.regs = &icl_aux_power_well_regs,
3614                         .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3615                 },
3616         },
3617         {
3618                 .name = "AUX B",
3619                 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3620                 .ops = &icl_combo_phy_aux_power_well_ops,
3621                 .id = DISP_PW_ID_NONE,
3622                 {
3623                         .hsw.regs = &icl_aux_power_well_regs,
3624                         .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3625                 },
3626         },
3627         {
3628                 .name = "AUX C",
3629                 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
3630                 .ops = &icl_tc_phy_aux_power_well_ops,
3631                 .id = DISP_PW_ID_NONE,
3632                 {
3633                         .hsw.regs = &icl_aux_power_well_regs,
3634                         .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3635                         .hsw.is_tc_tbt = false,
3636                 },
3637         },
3638         {
3639                 .name = "AUX D",
3640                 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
3641                 .ops = &icl_tc_phy_aux_power_well_ops,
3642                 .id = DISP_PW_ID_NONE,
3643                 {
3644                         .hsw.regs = &icl_aux_power_well_regs,
3645                         .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3646                         .hsw.is_tc_tbt = false,
3647                 },
3648         },
3649         {
3650                 .name = "AUX E",
3651                 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
3652                 .ops = &icl_tc_phy_aux_power_well_ops,
3653                 .id = DISP_PW_ID_NONE,
3654                 {
3655                         .hsw.regs = &icl_aux_power_well_regs,
3656                         .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3657                         .hsw.is_tc_tbt = false,
3658                 },
3659         },
3660         {
3661                 .name = "AUX F",
3662                 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
3663                 .ops = &icl_tc_phy_aux_power_well_ops,
3664                 .id = DISP_PW_ID_NONE,
3665                 {
3666                         .hsw.regs = &icl_aux_power_well_regs,
3667                         .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3668                         .hsw.is_tc_tbt = false,
3669                 },
3670         },
3671         {
3672                 .name = "AUX TBT1",
3673                 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3674                 .ops = &icl_tc_phy_aux_power_well_ops,
3675                 .id = DISP_PW_ID_NONE,
3676                 {
3677                         .hsw.regs = &icl_aux_power_well_regs,
3678                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3679                         .hsw.is_tc_tbt = true,
3680                 },
3681         },
3682         {
3683                 .name = "AUX TBT2",
3684                 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3685                 .ops = &icl_tc_phy_aux_power_well_ops,
3686                 .id = DISP_PW_ID_NONE,
3687                 {
3688                         .hsw.regs = &icl_aux_power_well_regs,
3689                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3690                         .hsw.is_tc_tbt = true,
3691                 },
3692         },
3693         {
3694                 .name = "AUX TBT3",
3695                 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3696                 .ops = &icl_tc_phy_aux_power_well_ops,
3697                 .id = DISP_PW_ID_NONE,
3698                 {
3699                         .hsw.regs = &icl_aux_power_well_regs,
3700                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3701                         .hsw.is_tc_tbt = true,
3702                 },
3703         },
3704         {
3705                 .name = "AUX TBT4",
3706                 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3707                 .ops = &icl_tc_phy_aux_power_well_ops,
3708                 .id = DISP_PW_ID_NONE,
3709                 {
3710                         .hsw.regs = &icl_aux_power_well_regs,
3711                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3712                         .hsw.is_tc_tbt = true,
3713                 },
3714         },
3715         {
3716                 .name = "power well 4",
3717                 .domains = ICL_PW_4_POWER_DOMAINS,
3718                 .ops = &hsw_power_well_ops,
3719                 .id = DISP_PW_ID_NONE,
3720                 {
3721                         .hsw.regs = &hsw_power_well_regs,
3722                         .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3723                         .hsw.has_fuses = true,
3724                         .hsw.irq_pipe_mask = BIT(PIPE_C),
3725                 },
3726         },
3727 };
3728
3729 static int
3730 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3731                                    int disable_power_well)
3732 {
3733         if (disable_power_well >= 0)
3734                 return !!disable_power_well;
3735
3736         return 1;
3737 }
3738
3739 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3740                                int enable_dc)
3741 {
3742         u32 mask;
3743         int requested_dc;
3744         int max_dc;
3745
3746         if (INTEL_GEN(dev_priv) >= 11) {
3747                 max_dc = 2;
3748                 /*
3749                  * DC9 has a separate HW flow from the rest of the DC states,
3750                  * not depending on the DMC firmware. It's needed by system
3751                  * suspend/resume, so allow it unconditionally.
3752                  */
3753                 mask = DC_STATE_EN_DC9;
3754         } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3755                 max_dc = 2;
3756                 mask = 0;
3757         } else if (IS_GEN9_LP(dev_priv)) {
3758                 max_dc = 1;
3759                 mask = DC_STATE_EN_DC9;
3760         } else {
3761                 max_dc = 0;
3762                 mask = 0;
3763         }
3764
3765         if (!i915_modparams.disable_power_well)
3766                 max_dc = 0;
3767
3768         if (enable_dc >= 0 && enable_dc <= max_dc) {
3769                 requested_dc = enable_dc;
3770         } else if (enable_dc == -1) {
3771                 requested_dc = max_dc;
3772         } else if (enable_dc > max_dc && enable_dc <= 2) {
3773                 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3774                               enable_dc, max_dc);
3775                 requested_dc = max_dc;
3776         } else {
3777                 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3778                 requested_dc = max_dc;
3779         }
3780
3781         if (requested_dc > 1)
3782                 mask |= DC_STATE_EN_UPTO_DC6;
3783         if (requested_dc > 0)
3784                 mask |= DC_STATE_EN_UPTO_DC5;
3785
3786         DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3787
3788         return mask;
3789 }
3790
3791 static int
3792 __set_power_wells(struct i915_power_domains *power_domains,
3793                   const struct i915_power_well_desc *power_well_descs,
3794                   int power_well_count)
3795 {
3796         u64 power_well_ids = 0;
3797         int i;
3798
3799         power_domains->power_well_count = power_well_count;
3800         power_domains->power_wells =
3801                                 kcalloc(power_well_count,
3802                                         sizeof(*power_domains->power_wells),
3803                                         GFP_KERNEL);
3804         if (!power_domains->power_wells)
3805                 return -ENOMEM;
3806
3807         for (i = 0; i < power_well_count; i++) {
3808                 enum i915_power_well_id id = power_well_descs[i].id;
3809
3810                 power_domains->power_wells[i].desc = &power_well_descs[i];
3811
3812                 if (id == DISP_PW_ID_NONE)
3813                         continue;
3814
3815                 WARN_ON(id >= sizeof(power_well_ids) * 8);
3816                 WARN_ON(power_well_ids & BIT_ULL(id));
3817                 power_well_ids |= BIT_ULL(id);
3818         }
3819
3820         return 0;
3821 }
3822
3823 #define set_power_wells(power_domains, __power_well_descs) \
3824         __set_power_wells(power_domains, __power_well_descs, \
3825                           ARRAY_SIZE(__power_well_descs))
3826
3827 /**
3828  * intel_power_domains_init - initializes the power domain structures
3829  * @dev_priv: i915 device instance
3830  *
3831  * Initializes the power domain structures for @dev_priv depending upon the
3832  * supported platform.
3833  */
3834 int intel_power_domains_init(struct drm_i915_private *dev_priv)
3835 {
3836         struct i915_power_domains *power_domains = &dev_priv->power_domains;
3837         int err;
3838
3839         i915_modparams.disable_power_well =
3840                 sanitize_disable_power_well_option(dev_priv,
3841                                                    i915_modparams.disable_power_well);
3842         dev_priv->csr.allowed_dc_mask =
3843                 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
3844
3845         BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
3846
3847         mutex_init(&power_domains->lock);
3848
3849         INIT_DELAYED_WORK(&power_domains->async_put_work,
3850                           intel_display_power_put_async_work);
3851
3852         /*
3853          * The enabling order will be from lower to higher indexed wells,
3854          * the disabling order is reversed.
3855          */
3856         if (IS_GEN(dev_priv, 11)) {
3857                 err = set_power_wells(power_domains, icl_power_wells);
3858         } else if (IS_CANNONLAKE(dev_priv)) {
3859                 err = set_power_wells(power_domains, cnl_power_wells);
3860
3861                 /*
3862                  * DDI and Aux IO are getting enabled for all ports
3863                  * regardless the presence or use. So, in order to avoid
3864                  * timeouts, lets remove them from the list
3865                  * for the SKUs without port F.
3866                  */
3867                 if (!IS_CNL_WITH_PORT_F(dev_priv))
3868                         power_domains->power_well_count -= 2;
3869         } else if (IS_GEMINILAKE(dev_priv)) {
3870                 err = set_power_wells(power_domains, glk_power_wells);
3871         } else if (IS_BROXTON(dev_priv)) {
3872                 err = set_power_wells(power_domains, bxt_power_wells);
3873         } else if (IS_GEN9_BC(dev_priv)) {
3874                 err = set_power_wells(power_domains, skl_power_wells);
3875         } else if (IS_CHERRYVIEW(dev_priv)) {
3876                 err = set_power_wells(power_domains, chv_power_wells);
3877         } else if (IS_BROADWELL(dev_priv)) {
3878                 err = set_power_wells(power_domains, bdw_power_wells);
3879         } else if (IS_HASWELL(dev_priv)) {
3880                 err = set_power_wells(power_domains, hsw_power_wells);
3881         } else if (IS_VALLEYVIEW(dev_priv)) {
3882                 err = set_power_wells(power_domains, vlv_power_wells);
3883         } else if (IS_I830(dev_priv)) {
3884                 err = set_power_wells(power_domains, i830_power_wells);
3885         } else {
3886                 err = set_power_wells(power_domains, i9xx_always_on_power_well);
3887         }
3888
3889         return err;
3890 }
3891
3892 /**
3893  * intel_power_domains_cleanup - clean up power domains resources
3894  * @dev_priv: i915 device instance
3895  *
3896  * Release any resources acquired by intel_power_domains_init()
3897  */
3898 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3899 {
3900         kfree(dev_priv->power_domains.power_wells);
3901 }
3902
3903 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
3904 {
3905         struct i915_power_domains *power_domains = &dev_priv->power_domains;
3906         struct i915_power_well *power_well;
3907
3908         mutex_lock(&power_domains->lock);
3909         for_each_power_well(dev_priv, power_well) {
3910                 power_well->desc->ops->sync_hw(dev_priv, power_well);
3911                 power_well->hw_enabled =
3912                         power_well->desc->ops->is_enabled(dev_priv, power_well);
3913         }
3914         mutex_unlock(&power_domains->lock);
3915 }
3916
3917 static inline
3918 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3919                           i915_reg_t reg, bool enable)
3920 {
3921         u32 val, status;
3922
3923         val = I915_READ(reg);
3924         val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3925         I915_WRITE(reg, val);
3926         POSTING_READ(reg);
3927         udelay(10);
3928
3929         status = I915_READ(reg) & DBUF_POWER_STATE;
3930         if ((enable && !status) || (!enable && status)) {
3931                 DRM_ERROR("DBus power %s timeout!\n",
3932                           enable ? "enable" : "disable");
3933                 return false;
3934         }
3935         return true;
3936 }
3937
3938 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3939 {
3940         intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
3941 }
3942
3943 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3944 {
3945         intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3946 }
3947
3948 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3949 {
3950         if (INTEL_GEN(dev_priv) < 11)
3951                 return 1;
3952         return 2;
3953 }
3954
3955 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3956                             u8 req_slices)
3957 {
3958         const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3959         bool ret;
3960
3961         if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3962                 DRM_ERROR("Invalid number of dbuf slices requested\n");
3963                 return;
3964         }
3965
3966         if (req_slices == hw_enabled_slices || req_slices == 0)
3967                 return;
3968
3969         if (req_slices > hw_enabled_slices)
3970                 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3971         else
3972                 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3973
3974         if (ret)
3975                 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3976 }
3977
3978 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3979 {
3980         I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3981         I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3982         POSTING_READ(DBUF_CTL_S2);
3983
3984         udelay(10);
3985
3986         if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3987             !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3988                 DRM_ERROR("DBuf power enable timeout\n");
3989         else
3990                 /*
3991                  * FIXME: for now pretend that we only have 1 slice, see
3992                  * intel_enabled_dbuf_slices_num().
3993                  */
3994                 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
3995 }
3996
3997 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3998 {
3999         I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
4000         I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
4001         POSTING_READ(DBUF_CTL_S2);
4002
4003         udelay(10);
4004
4005         if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4006             (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4007                 DRM_ERROR("DBuf power disable timeout!\n");
4008         else
4009                 /*
4010                  * FIXME: for now pretend that the first slice is always
4011                  * enabled, see intel_enabled_dbuf_slices_num().
4012                  */
4013                 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4014 }
4015
4016 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4017 {
4018         u32 val;
4019
4020         val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4021               MBUS_ABOX_BT_CREDIT_POOL2(16) |
4022               MBUS_ABOX_B_CREDIT(1) |
4023               MBUS_ABOX_BW_CREDIT(1);
4024
4025         I915_WRITE(MBUS_ABOX_CTL, val);
4026 }
4027
4028 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4029 {
4030         u32 val = I915_READ(LCPLL_CTL);
4031
4032         /*
4033          * The LCPLL register should be turned on by the BIOS. For now
4034          * let's just check its state and print errors in case
4035          * something is wrong.  Don't even try to turn it on.
4036          */
4037
4038         if (val & LCPLL_CD_SOURCE_FCLK)
4039                 DRM_ERROR("CDCLK source is not LCPLL\n");
4040
4041         if (val & LCPLL_PLL_DISABLE)
4042                 DRM_ERROR("LCPLL is disabled\n");
4043 }
4044
4045 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4046 {
4047         struct drm_device *dev = &dev_priv->drm;
4048         struct intel_crtc *crtc;
4049
4050         for_each_intel_crtc(dev, crtc)
4051                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4052                                 pipe_name(crtc->pipe));
4053
4054         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
4055                         "Display power well on\n");
4056         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
4057                         "SPLL enabled\n");
4058         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4059                         "WRPLL1 enabled\n");
4060         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4061                         "WRPLL2 enabled\n");
4062         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
4063                         "Panel power on\n");
4064         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4065                         "CPU PWM1 enabled\n");
4066         if (IS_HASWELL(dev_priv))
4067                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4068                                 "CPU PWM2 enabled\n");
4069         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4070                         "PCH PWM1 enabled\n");
4071         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4072                         "Utility pin enabled\n");
4073         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
4074                         "PCH GTC enabled\n");
4075
4076         /*
4077          * In theory we can still leave IRQs enabled, as long as only the HPD
4078          * interrupts remain enabled. We used to check for that, but since it's
4079          * gen-specific and since we only disable LCPLL after we fully disable
4080          * the interrupts, the check below should be enough.
4081          */
4082         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4083 }
4084
4085 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4086 {
4087         if (IS_HASWELL(dev_priv))
4088                 return I915_READ(D_COMP_HSW);
4089         else
4090                 return I915_READ(D_COMP_BDW);
4091 }
4092
4093 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4094 {
4095         if (IS_HASWELL(dev_priv)) {
4096                 if (sandybridge_pcode_write(dev_priv,
4097                                             GEN6_PCODE_WRITE_D_COMP, val))
4098                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
4099         } else {
4100                 I915_WRITE(D_COMP_BDW, val);
4101                 POSTING_READ(D_COMP_BDW);
4102         }
4103 }
4104
4105 /*
4106  * This function implements pieces of two sequences from BSpec:
4107  * - Sequence for display software to disable LCPLL
4108  * - Sequence for display software to allow package C8+
4109  * The steps implemented here are just the steps that actually touch the LCPLL
4110  * register. Callers should take care of disabling all the display engine
4111  * functions, doing the mode unset, fixing interrupts, etc.
4112  */
4113 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4114                               bool switch_to_fclk, bool allow_power_down)
4115 {
4116         u32 val;
4117
4118         assert_can_disable_lcpll(dev_priv);
4119
4120         val = I915_READ(LCPLL_CTL);
4121
4122         if (switch_to_fclk) {
4123                 val |= LCPLL_CD_SOURCE_FCLK;
4124                 I915_WRITE(LCPLL_CTL, val);
4125
4126                 if (wait_for_us(I915_READ(LCPLL_CTL) &
4127                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4128                         DRM_ERROR("Switching to FCLK failed\n");
4129
4130                 val = I915_READ(LCPLL_CTL);
4131         }
4132
4133         val |= LCPLL_PLL_DISABLE;
4134         I915_WRITE(LCPLL_CTL, val);
4135         POSTING_READ(LCPLL_CTL);
4136
4137         if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
4138                                     LCPLL_PLL_LOCK, 0, 1))
4139                 DRM_ERROR("LCPLL still locked\n");
4140
4141         val = hsw_read_dcomp(dev_priv);
4142         val |= D_COMP_COMP_DISABLE;
4143         hsw_write_dcomp(dev_priv, val);
4144         ndelay(100);
4145
4146         if (wait_for((hsw_read_dcomp(dev_priv) &
4147                       D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4148                 DRM_ERROR("D_COMP RCOMP still in progress\n");
4149
4150         if (allow_power_down) {
4151                 val = I915_READ(LCPLL_CTL);
4152                 val |= LCPLL_POWER_DOWN_ALLOW;
4153                 I915_WRITE(LCPLL_CTL, val);
4154                 POSTING_READ(LCPLL_CTL);
4155         }
4156 }
4157
4158 /*
4159  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4160  * source.
4161  */
4162 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4163 {
4164         u32 val;
4165
4166         val = I915_READ(LCPLL_CTL);
4167
4168         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4169                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4170                 return;
4171
4172         /*
4173          * Make sure we're not on PC8 state before disabling PC8, otherwise
4174          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4175          */
4176         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4177
4178         if (val & LCPLL_POWER_DOWN_ALLOW) {
4179                 val &= ~LCPLL_POWER_DOWN_ALLOW;
4180                 I915_WRITE(LCPLL_CTL, val);
4181                 POSTING_READ(LCPLL_CTL);
4182         }
4183
4184         val = hsw_read_dcomp(dev_priv);
4185         val |= D_COMP_COMP_FORCE;
4186         val &= ~D_COMP_COMP_DISABLE;
4187         hsw_write_dcomp(dev_priv, val);
4188
4189         val = I915_READ(LCPLL_CTL);
4190         val &= ~LCPLL_PLL_DISABLE;
4191         I915_WRITE(LCPLL_CTL, val);
4192
4193         if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
4194                                     LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
4195                 DRM_ERROR("LCPLL not locked yet\n");
4196
4197         if (val & LCPLL_CD_SOURCE_FCLK) {
4198                 val = I915_READ(LCPLL_CTL);
4199                 val &= ~LCPLL_CD_SOURCE_FCLK;
4200                 I915_WRITE(LCPLL_CTL, val);
4201
4202                 if (wait_for_us((I915_READ(LCPLL_CTL) &
4203                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4204                         DRM_ERROR("Switching back to LCPLL failed\n");
4205         }
4206
4207         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4208
4209         intel_update_cdclk(dev_priv);
4210         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
4211 }
4212
4213 /*
4214  * Package states C8 and deeper are really deep PC states that can only be
4215  * reached when all the devices on the system allow it, so even if the graphics
4216  * device allows PC8+, it doesn't mean the system will actually get to these
4217  * states. Our driver only allows PC8+ when going into runtime PM.
4218  *
4219  * The requirements for PC8+ are that all the outputs are disabled, the power
4220  * well is disabled and most interrupts are disabled, and these are also
4221  * requirements for runtime PM. When these conditions are met, we manually do
4222  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4223  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4224  * hang the machine.
4225  *
4226  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4227  * the state of some registers, so when we come back from PC8+ we need to
4228  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4229  * need to take care of the registers kept by RC6. Notice that this happens even
4230  * if we don't put the device in PCI D3 state (which is what currently happens
4231  * because of the runtime PM support).
4232  *
4233  * For more, read "Display Sequences for Package C8" on the hardware
4234  * documentation.
4235  */
4236 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4237 {
4238         u32 val;
4239
4240         DRM_DEBUG_KMS("Enabling package C8+\n");
4241
4242         if (HAS_PCH_LPT_LP(dev_priv)) {
4243                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4244                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4245                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4246         }
4247
4248         lpt_disable_clkout_dp(dev_priv);
4249         hsw_disable_lcpll(dev_priv, true, true);
4250 }
4251
4252 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4253 {
4254         u32 val;
4255
4256         DRM_DEBUG_KMS("Disabling package C8+\n");
4257
4258         hsw_restore_lcpll(dev_priv);
4259         intel_init_pch_refclk(dev_priv);
4260
4261         if (HAS_PCH_LPT_LP(dev_priv)) {
4262                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4263                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4264                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4265         }
4266 }
4267
4268 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4269                                       bool enable)
4270 {
4271         i915_reg_t reg;
4272         u32 reset_bits, val;
4273
4274         if (IS_IVYBRIDGE(dev_priv)) {
4275                 reg = GEN7_MSG_CTL;
4276                 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4277         } else {
4278                 reg = HSW_NDE_RSTWRN_OPT;
4279                 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4280         }
4281
4282         val = I915_READ(reg);
4283
4284         if (enable)
4285                 val |= reset_bits;
4286         else
4287                 val &= ~reset_bits;
4288
4289         I915_WRITE(reg, val);
4290 }
4291
4292 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4293                                    bool resume)
4294 {
4295         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4296         struct i915_power_well *well;
4297
4298         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4299
4300         /* enable PCH reset handshake */
4301         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4302
4303         /* enable PG1 and Misc I/O */
4304         mutex_lock(&power_domains->lock);
4305
4306         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4307         intel_power_well_enable(dev_priv, well);
4308
4309         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4310         intel_power_well_enable(dev_priv, well);
4311
4312         mutex_unlock(&power_domains->lock);
4313
4314         intel_cdclk_init(dev_priv);
4315
4316         gen9_dbuf_enable(dev_priv);
4317
4318         if (resume && dev_priv->csr.dmc_payload)
4319                 intel_csr_load_program(dev_priv);
4320 }
4321
4322 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4323 {
4324         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4325         struct i915_power_well *well;
4326
4327         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4328
4329         gen9_dbuf_disable(dev_priv);
4330
4331         intel_cdclk_uninit(dev_priv);
4332
4333         /* The spec doesn't call for removing the reset handshake flag */
4334         /* disable PG1 and Misc I/O */
4335
4336         mutex_lock(&power_domains->lock);
4337
4338         /*
4339          * BSpec says to keep the MISC IO power well enabled here, only
4340          * remove our request for power well 1.
4341          * Note that even though the driver's request is removed power well 1
4342          * may stay enabled after this due to DMC's own request on it.
4343          */
4344         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4345         intel_power_well_disable(dev_priv, well);
4346
4347         mutex_unlock(&power_domains->lock);
4348
4349         usleep_range(10, 30);           /* 10 us delay per Bspec */
4350 }
4351
4352 void bxt_display_core_init(struct drm_i915_private *dev_priv,
4353                            bool resume)
4354 {
4355         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4356         struct i915_power_well *well;
4357
4358         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4359
4360         /*
4361          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4362          * or else the reset will hang because there is no PCH to respond.
4363          * Move the handshake programming to initialization sequence.
4364          * Previously was left up to BIOS.
4365          */
4366         intel_pch_reset_handshake(dev_priv, false);
4367
4368         /* Enable PG1 */
4369         mutex_lock(&power_domains->lock);
4370
4371         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4372         intel_power_well_enable(dev_priv, well);
4373
4374         mutex_unlock(&power_domains->lock);
4375
4376         intel_cdclk_init(dev_priv);
4377
4378         gen9_dbuf_enable(dev_priv);
4379
4380         if (resume && dev_priv->csr.dmc_payload)
4381                 intel_csr_load_program(dev_priv);
4382 }
4383
4384 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4385 {
4386         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4387         struct i915_power_well *well;
4388
4389         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4390
4391         gen9_dbuf_disable(dev_priv);
4392
4393         intel_cdclk_uninit(dev_priv);
4394
4395         /* The spec doesn't call for removing the reset handshake flag */
4396
4397         /*
4398          * Disable PW1 (PG1).
4399          * Note that even though the driver's request is removed power well 1
4400          * may stay enabled after this due to DMC's own request on it.
4401          */
4402         mutex_lock(&power_domains->lock);
4403
4404         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4405         intel_power_well_disable(dev_priv, well);
4406
4407         mutex_unlock(&power_domains->lock);
4408
4409         usleep_range(10, 30);           /* 10 us delay per Bspec */
4410 }
4411
4412 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4413 {
4414         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4415         struct i915_power_well *well;
4416
4417         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4418
4419         /* 1. Enable PCH Reset Handshake */
4420         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4421
4422         /* 2-3. */
4423         intel_combo_phy_init(dev_priv);
4424
4425         /*
4426          * 4. Enable Power Well 1 (PG1).
4427          *    The AUX IO power wells will be enabled on demand.
4428          */
4429         mutex_lock(&power_domains->lock);
4430         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4431         intel_power_well_enable(dev_priv, well);
4432         mutex_unlock(&power_domains->lock);
4433
4434         /* 5. Enable CD clock */
4435         intel_cdclk_init(dev_priv);
4436
4437         /* 6. Enable DBUF */
4438         gen9_dbuf_enable(dev_priv);
4439
4440         if (resume && dev_priv->csr.dmc_payload)
4441                 intel_csr_load_program(dev_priv);
4442 }
4443
4444 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4445 {
4446         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4447         struct i915_power_well *well;
4448
4449         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4450
4451         /* 1. Disable all display engine functions -> aready done */
4452
4453         /* 2. Disable DBUF */
4454         gen9_dbuf_disable(dev_priv);
4455
4456         /* 3. Disable CD clock */
4457         intel_cdclk_uninit(dev_priv);
4458
4459         /*
4460          * 4. Disable Power Well 1 (PG1).
4461          *    The AUX IO power wells are toggled on demand, so they are already
4462          *    disabled at this point.
4463          */
4464         mutex_lock(&power_domains->lock);
4465         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4466         intel_power_well_disable(dev_priv, well);
4467         mutex_unlock(&power_domains->lock);
4468
4469         usleep_range(10, 30);           /* 10 us delay per Bspec */
4470
4471         /* 5. */
4472         intel_combo_phy_uninit(dev_priv);
4473 }
4474
4475 void icl_display_core_init(struct drm_i915_private *dev_priv,
4476                            bool resume)
4477 {
4478         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4479         struct i915_power_well *well;
4480
4481         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4482
4483         /* 1. Enable PCH reset handshake. */
4484         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4485
4486         /* 2. Initialize all combo phys */
4487         intel_combo_phy_init(dev_priv);
4488
4489         /*
4490          * 3. Enable Power Well 1 (PG1).
4491          *    The AUX IO power wells will be enabled on demand.
4492          */
4493         mutex_lock(&power_domains->lock);
4494         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4495         intel_power_well_enable(dev_priv, well);
4496         mutex_unlock(&power_domains->lock);
4497
4498         /* 4. Enable CDCLK. */
4499         intel_cdclk_init(dev_priv);
4500
4501         /* 5. Enable DBUF. */
4502         icl_dbuf_enable(dev_priv);
4503
4504         /* 6. Setup MBUS. */
4505         icl_mbus_init(dev_priv);
4506
4507         if (resume && dev_priv->csr.dmc_payload)
4508                 intel_csr_load_program(dev_priv);
4509 }
4510
4511 void icl_display_core_uninit(struct drm_i915_private *dev_priv)
4512 {
4513         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4514         struct i915_power_well *well;
4515
4516         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4517
4518         /* 1. Disable all display engine functions -> aready done */
4519
4520         /* 2. Disable DBUF */
4521         icl_dbuf_disable(dev_priv);
4522
4523         /* 3. Disable CD clock */
4524         intel_cdclk_uninit(dev_priv);
4525
4526         /*
4527          * 4. Disable Power Well 1 (PG1).
4528          *    The AUX IO power wells are toggled on demand, so they are already
4529          *    disabled at this point.
4530          */
4531         mutex_lock(&power_domains->lock);
4532         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4533         intel_power_well_disable(dev_priv, well);
4534         mutex_unlock(&power_domains->lock);
4535
4536         /* 5. */
4537         intel_combo_phy_uninit(dev_priv);
4538 }
4539
4540 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
4541 {
4542         struct i915_power_well *cmn_bc =
4543                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4544         struct i915_power_well *cmn_d =
4545                 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
4546
4547         /*
4548          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
4549          * workaround never ever read DISPLAY_PHY_CONTROL, and
4550          * instead maintain a shadow copy ourselves. Use the actual
4551          * power well state and lane status to reconstruct the
4552          * expected initial value.
4553          */
4554         dev_priv->chv_phy_control =
4555                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
4556                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
4557                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
4558                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
4559                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
4560
4561         /*
4562          * If all lanes are disabled we leave the override disabled
4563          * with all power down bits cleared to match the state we
4564          * would use after disabling the port. Otherwise enable the
4565          * override and set the lane powerdown bits accding to the
4566          * current lane status.
4567          */
4568         if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
4569                 u32 status = I915_READ(DPLL(PIPE_A));
4570                 unsigned int mask;
4571
4572                 mask = status & DPLL_PORTB_READY_MASK;
4573                 if (mask == 0xf)
4574                         mask = 0x0;
4575                 else
4576                         dev_priv->chv_phy_control |=
4577                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
4578
4579                 dev_priv->chv_phy_control |=
4580                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
4581
4582                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
4583                 if (mask == 0xf)
4584                         mask = 0x0;
4585                 else
4586                         dev_priv->chv_phy_control |=
4587                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
4588
4589                 dev_priv->chv_phy_control |=
4590                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
4591
4592                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
4593
4594                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
4595         } else {
4596                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
4597         }
4598
4599         if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
4600                 u32 status = I915_READ(DPIO_PHY_STATUS);
4601                 unsigned int mask;
4602
4603                 mask = status & DPLL_PORTD_READY_MASK;
4604
4605                 if (mask == 0xf)
4606                         mask = 0x0;
4607                 else
4608                         dev_priv->chv_phy_control |=
4609                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
4610
4611                 dev_priv->chv_phy_control |=
4612                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
4613
4614                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
4615
4616                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
4617         } else {
4618                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
4619         }
4620
4621         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
4622
4623         DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
4624                       dev_priv->chv_phy_control);
4625 }
4626
4627 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
4628 {
4629         struct i915_power_well *cmn =
4630                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4631         struct i915_power_well *disp2d =
4632                 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
4633
4634         /* If the display might be already active skip this */
4635         if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
4636             disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
4637             I915_READ(DPIO_CTL) & DPIO_CMNRST)
4638                 return;
4639
4640         DRM_DEBUG_KMS("toggling display PHY side reset\n");
4641
4642         /* cmnlane needs DPLL registers */
4643         disp2d->desc->ops->enable(dev_priv, disp2d);
4644
4645         /*
4646          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
4647          * Need to assert and de-assert PHY SB reset by gating the
4648          * common lane power, then un-gating it.
4649          * Simply ungating isn't enough to reset the PHY enough to get
4650          * ports and lanes running.
4651          */
4652         cmn->desc->ops->disable(dev_priv, cmn);
4653 }
4654
4655 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4656 {
4657         bool ret;
4658
4659         vlv_punit_get(dev_priv);
4660         ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
4661         vlv_punit_put(dev_priv);
4662
4663         return ret;
4664 }
4665
4666 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4667 {
4668         WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4669              "VED not power gated\n");
4670 }
4671
4672 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4673 {
4674         static const struct pci_device_id isp_ids[] = {
4675                 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4676                 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4677                 {}
4678         };
4679
4680         WARN(!pci_dev_present(isp_ids) &&
4681              !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4682              "ISP not power gated\n");
4683 }
4684
4685 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4686
4687 /**
4688  * intel_power_domains_init_hw - initialize hardware power domain state
4689  * @i915: i915 device instance
4690  * @resume: Called from resume code paths or not
4691  *
4692  * This function initializes the hardware power domain state and enables all
4693  * power wells belonging to the INIT power domain. Power wells in other
4694  * domains (and not in the INIT domain) are referenced or disabled by
4695  * intel_modeset_readout_hw_state(). After that the reference count of each
4696  * power well must match its HW enabled state, see
4697  * intel_power_domains_verify_state().
4698  *
4699  * It will return with power domains disabled (to be enabled later by
4700  * intel_power_domains_enable()) and must be paired with
4701  * intel_power_domains_fini_hw().
4702  */
4703 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4704 {
4705         struct i915_power_domains *power_domains = &i915->power_domains;
4706
4707         power_domains->initializing = true;
4708
4709         if (INTEL_GEN(i915) >= 11) {
4710                 icl_display_core_init(i915, resume);
4711         } else if (IS_CANNONLAKE(i915)) {
4712                 cnl_display_core_init(i915, resume);
4713         } else if (IS_GEN9_BC(i915)) {
4714                 skl_display_core_init(i915, resume);
4715         } else if (IS_GEN9_LP(i915)) {
4716                 bxt_display_core_init(i915, resume);
4717         } else if (IS_CHERRYVIEW(i915)) {
4718                 mutex_lock(&power_domains->lock);
4719                 chv_phy_control_init(i915);
4720                 mutex_unlock(&power_domains->lock);
4721                 assert_isp_power_gated(i915);
4722         } else if (IS_VALLEYVIEW(i915)) {
4723                 mutex_lock(&power_domains->lock);
4724                 vlv_cmnlane_wa(i915);
4725                 mutex_unlock(&power_domains->lock);
4726                 assert_ved_power_gated(i915);
4727                 assert_isp_power_gated(i915);
4728         } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
4729                 hsw_assert_cdclk(i915);
4730                 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4731         } else if (IS_IVYBRIDGE(i915)) {
4732                 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4733         }
4734
4735         /*
4736          * Keep all power wells enabled for any dependent HW access during
4737          * initialization and to make sure we keep BIOS enabled display HW
4738          * resources powered until display HW readout is complete. We drop
4739          * this reference in intel_power_domains_enable().
4740          */
4741         power_domains->wakeref =
4742                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4743
4744         /* Disable power support if the user asked so. */
4745         if (!i915_modparams.disable_power_well)
4746                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4747         intel_power_domains_sync_hw(i915);
4748
4749         power_domains->initializing = false;
4750 }
4751
4752 /**
4753  * intel_power_domains_fini_hw - deinitialize hw power domain state
4754  * @i915: i915 device instance
4755  *
4756  * De-initializes the display power domain HW state. It also ensures that the
4757  * device stays powered up so that the driver can be reloaded.
4758  *
4759  * It must be called with power domains already disabled (after a call to
4760  * intel_power_domains_disable()) and must be paired with
4761  * intel_power_domains_init_hw().
4762  */
4763 void intel_power_domains_fini_hw(struct drm_i915_private *i915)
4764 {
4765         intel_wakeref_t wakeref __maybe_unused =
4766                 fetch_and_zero(&i915->power_domains.wakeref);
4767
4768         /* Remove the refcount we took to keep power well support disabled. */
4769         if (!i915_modparams.disable_power_well)
4770                 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4771
4772         intel_display_power_flush_work_sync(i915);
4773
4774         intel_power_domains_verify_state(i915);
4775
4776         /* Keep the power well enabled, but cancel its rpm wakeref. */
4777         intel_runtime_pm_put(i915, wakeref);
4778 }
4779
4780 /**
4781  * intel_power_domains_enable - enable toggling of display power wells
4782  * @i915: i915 device instance
4783  *
4784  * Enable the ondemand enabling/disabling of the display power wells. Note that
4785  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4786  * only at specific points of the display modeset sequence, thus they are not
4787  * affected by the intel_power_domains_enable()/disable() calls. The purpose
4788  * of these function is to keep the rest of power wells enabled until the end
4789  * of display HW readout (which will acquire the power references reflecting
4790  * the current HW state).
4791  */
4792 void intel_power_domains_enable(struct drm_i915_private *i915)
4793 {
4794         intel_wakeref_t wakeref __maybe_unused =
4795                 fetch_and_zero(&i915->power_domains.wakeref);
4796
4797         intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4798         intel_power_domains_verify_state(i915);
4799 }
4800
4801 /**
4802  * intel_power_domains_disable - disable toggling of display power wells
4803  * @i915: i915 device instance
4804  *
4805  * Disable the ondemand enabling/disabling of the display power wells. See
4806  * intel_power_domains_enable() for which power wells this call controls.
4807  */
4808 void intel_power_domains_disable(struct drm_i915_private *i915)
4809 {
4810         struct i915_power_domains *power_domains = &i915->power_domains;
4811
4812         WARN_ON(power_domains->wakeref);
4813         power_domains->wakeref =
4814                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4815
4816         intel_power_domains_verify_state(i915);
4817 }
4818
4819 /**
4820  * intel_power_domains_suspend - suspend power domain state
4821  * @i915: i915 device instance
4822  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
4823  *
4824  * This function prepares the hardware power domain state before entering
4825  * system suspend.
4826  *
4827  * It must be called with power domains already disabled (after a call to
4828  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
4829  */
4830 void intel_power_domains_suspend(struct drm_i915_private *i915,
4831                                  enum i915_drm_suspend_mode suspend_mode)
4832 {
4833         struct i915_power_domains *power_domains = &i915->power_domains;
4834         intel_wakeref_t wakeref __maybe_unused =
4835                 fetch_and_zero(&power_domains->wakeref);
4836
4837         intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4838
4839         /*
4840          * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4841          * support don't manually deinit the power domains. This also means the
4842          * CSR/DMC firmware will stay active, it will power down any HW
4843          * resources as required and also enable deeper system power states
4844          * that would be blocked if the firmware was inactive.
4845          */
4846         if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
4847             suspend_mode == I915_DRM_SUSPEND_IDLE &&
4848             i915->csr.dmc_payload) {
4849                 intel_display_power_flush_work(i915);
4850                 intel_power_domains_verify_state(i915);
4851                 return;
4852         }
4853
4854         /*
4855          * Even if power well support was disabled we still want to disable
4856          * power wells if power domains must be deinitialized for suspend.
4857          */
4858         if (!i915_modparams.disable_power_well)
4859                 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4860
4861         intel_display_power_flush_work(i915);
4862         intel_power_domains_verify_state(i915);
4863
4864         if (INTEL_GEN(i915) >= 11)
4865                 icl_display_core_uninit(i915);
4866         else if (IS_CANNONLAKE(i915))
4867                 cnl_display_core_uninit(i915);
4868         else if (IS_GEN9_BC(i915))
4869                 skl_display_core_uninit(i915);
4870         else if (IS_GEN9_LP(i915))
4871                 bxt_display_core_uninit(i915);
4872
4873         power_domains->display_core_suspended = true;
4874 }
4875
4876 /**
4877  * intel_power_domains_resume - resume power domain state
4878  * @i915: i915 device instance
4879  *
4880  * This function resume the hardware power domain state during system resume.
4881  *
4882  * It will return with power domain support disabled (to be enabled later by
4883  * intel_power_domains_enable()) and must be paired with
4884  * intel_power_domains_suspend().
4885  */
4886 void intel_power_domains_resume(struct drm_i915_private *i915)
4887 {
4888         struct i915_power_domains *power_domains = &i915->power_domains;
4889
4890         if (power_domains->display_core_suspended) {
4891                 intel_power_domains_init_hw(i915, true);
4892                 power_domains->display_core_suspended = false;
4893         } else {
4894                 WARN_ON(power_domains->wakeref);
4895                 power_domains->wakeref =
4896                         intel_display_power_get(i915, POWER_DOMAIN_INIT);
4897         }
4898
4899         intel_power_domains_verify_state(i915);
4900 }
4901
4902 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4903
4904 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
4905 {
4906         struct i915_power_domains *power_domains = &i915->power_domains;
4907         struct i915_power_well *power_well;
4908
4909         for_each_power_well(i915, power_well) {
4910                 enum intel_display_power_domain domain;
4911
4912                 DRM_DEBUG_DRIVER("%-25s %d\n",
4913                                  power_well->desc->name, power_well->count);
4914
4915                 for_each_power_domain(domain, power_well->desc->domains)
4916                         DRM_DEBUG_DRIVER("  %-23s %d\n",
4917                                          intel_display_power_domain_str(domain),
4918                                          power_domains->domain_use_count[domain]);
4919         }
4920 }
4921
4922 /**
4923  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
4924  * @i915: i915 device instance
4925  *
4926  * Verify if the reference count of each power well matches its HW enabled
4927  * state and the total refcount of the domains it belongs to. This must be
4928  * called after modeset HW state sanitization, which is responsible for
4929  * acquiring reference counts for any power wells in use and disabling the
4930  * ones left on by BIOS but not required by any active output.
4931  */
4932 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4933 {
4934         struct i915_power_domains *power_domains = &i915->power_domains;
4935         struct i915_power_well *power_well;
4936         bool dump_domain_info;
4937
4938         mutex_lock(&power_domains->lock);
4939
4940         verify_async_put_domains_state(power_domains);
4941
4942         dump_domain_info = false;
4943         for_each_power_well(i915, power_well) {
4944                 enum intel_display_power_domain domain;
4945                 int domains_count;
4946                 bool enabled;
4947
4948                 enabled = power_well->desc->ops->is_enabled(i915, power_well);
4949                 if ((power_well->count || power_well->desc->always_on) !=
4950                     enabled)
4951                         DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
4952                                   power_well->desc->name,
4953                                   power_well->count, enabled);
4954
4955                 domains_count = 0;
4956                 for_each_power_domain(domain, power_well->desc->domains)
4957                         domains_count += power_domains->domain_use_count[domain];
4958
4959                 if (power_well->count != domains_count) {
4960                         DRM_ERROR("power well %s refcount/domain refcount mismatch "
4961                                   "(refcount %d/domains refcount %d)\n",
4962                                   power_well->desc->name, power_well->count,
4963                                   domains_count);
4964                         dump_domain_info = true;
4965                 }
4966         }
4967
4968         if (dump_domain_info) {
4969                 static bool dumped;
4970
4971                 if (!dumped) {
4972                         intel_power_domains_dump_info(i915);
4973                         dumped = true;
4974                 }
4975         }
4976
4977         mutex_unlock(&power_domains->lock);
4978 }
4979
4980 #else
4981
4982 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4983 {
4984 }
4985
4986 #endif
4987
4988 static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915,
4989                                               bool wakelock)
4990 {
4991         struct pci_dev *pdev = i915->drm.pdev;
4992         struct device *kdev = &pdev->dev;
4993         int ret;
4994
4995         ret = pm_runtime_get_sync(kdev);
4996         WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4997
4998         intel_runtime_pm_acquire(i915, wakelock);
4999
5000         return track_intel_runtime_pm_wakeref(i915);
5001 }
5002
5003 static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
5004 {
5005         return __intel_runtime_pm_get(i915, false);
5006 }
5007
5008 /**
5009  * intel_runtime_pm_get - grab a runtime pm reference
5010  * @i915: i915 device instance
5011  *
5012  * This function grabs a device-level runtime pm reference (mostly used for GEM
5013  * code to ensure the GTT or GT is on) and ensures that it is powered up.
5014  *
5015  * Any runtime pm reference obtained by this function must have a symmetric
5016  * call to intel_runtime_pm_put() to release the reference again.
5017  *
5018  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
5019  */
5020 intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
5021 {
5022         return __intel_runtime_pm_get(i915, true);
5023 }
5024
5025 /**
5026  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
5027  * @i915: i915 device instance
5028  *
5029  * This function grabs a device-level runtime pm reference if the device is
5030  * already in use and ensures that it is powered up. It is illegal to try
5031  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
5032  *
5033  * Any runtime pm reference obtained by this function must have a symmetric
5034  * call to intel_runtime_pm_put() to release the reference again.
5035  *
5036  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
5037  * as True if the wakeref was acquired, or False otherwise.
5038  */
5039 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
5040 {
5041         if (IS_ENABLED(CONFIG_PM)) {
5042                 struct pci_dev *pdev = i915->drm.pdev;
5043                 struct device *kdev = &pdev->dev;
5044
5045                 /*
5046                  * In cases runtime PM is disabled by the RPM core and we get
5047                  * an -EINVAL return value we are not supposed to call this
5048                  * function, since the power state is undefined. This applies
5049                  * atm to the late/early system suspend/resume handlers.
5050                  */
5051                 if (pm_runtime_get_if_in_use(kdev) <= 0)
5052                         return 0;
5053         }
5054
5055         intel_runtime_pm_acquire(i915, true);
5056
5057         return track_intel_runtime_pm_wakeref(i915);
5058 }
5059
5060 /**
5061  * intel_runtime_pm_get_noresume - grab a runtime pm reference
5062  * @i915: i915 device instance
5063  *
5064  * This function grabs a device-level runtime pm reference (mostly used for GEM
5065  * code to ensure the GTT or GT is on).
5066  *
5067  * It will _not_ power up the device but instead only check that it's powered
5068  * on.  Therefore it is only valid to call this functions from contexts where
5069  * the device is known to be powered up and where trying to power it up would
5070  * result in hilarity and deadlocks. That pretty much means only the system
5071  * suspend/resume code where this is used to grab runtime pm references for
5072  * delayed setup down in work items.
5073  *
5074  * Any runtime pm reference obtained by this function must have a symmetric
5075  * call to intel_runtime_pm_put() to release the reference again.
5076  *
5077  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
5078  */
5079 intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
5080 {
5081         struct pci_dev *pdev = i915->drm.pdev;
5082         struct device *kdev = &pdev->dev;
5083
5084         assert_rpm_wakelock_held(i915);
5085         pm_runtime_get_noresume(kdev);
5086
5087         intel_runtime_pm_acquire(i915, true);
5088
5089         return track_intel_runtime_pm_wakeref(i915);
5090 }
5091
5092 static void __intel_runtime_pm_put(struct drm_i915_private *i915,
5093                                    intel_wakeref_t wref,
5094                                    bool wakelock)
5095 {
5096         struct pci_dev *pdev = i915->drm.pdev;
5097         struct device *kdev = &pdev->dev;
5098
5099         untrack_intel_runtime_pm_wakeref(i915, wref);
5100
5101         intel_runtime_pm_release(i915, wakelock);
5102
5103         pm_runtime_mark_last_busy(kdev);
5104         pm_runtime_put_autosuspend(kdev);
5105 }
5106
5107 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5108 static void
5109 intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref)
5110 {
5111         __intel_runtime_pm_put(i915, wref, false);
5112 }
5113 #endif
5114
5115 /**
5116  * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
5117  * @i915: i915 device instance
5118  *
5119  * This function drops the device-level runtime pm reference obtained by
5120  * intel_runtime_pm_get() and might power down the corresponding
5121  * hardware block right away if this is the last reference.
5122  *
5123  * This function exists only for historical reasons and should be avoided in
5124  * new code, as the correctness of its use cannot be checked. Always use
5125  * intel_runtime_pm_put() instead.
5126  */
5127 void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
5128 {
5129         __intel_runtime_pm_put(i915, -1, true);
5130 }
5131
5132 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5133 /**
5134  * intel_runtime_pm_put - release a runtime pm reference
5135  * @i915: i915 device instance
5136  * @wref: wakeref acquired for the reference that is being released
5137  *
5138  * This function drops the device-level runtime pm reference obtained by
5139  * intel_runtime_pm_get() and might power down the corresponding
5140  * hardware block right away if this is the last reference.
5141  */
5142 void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
5143 {
5144         __intel_runtime_pm_put(i915, wref, true);
5145 }
5146 #endif
5147
5148 /**
5149  * intel_runtime_pm_enable - enable runtime pm
5150  * @i915: i915 device instance
5151  *
5152  * This function enables runtime pm at the end of the driver load sequence.
5153  *
5154  * Note that this function does currently not enable runtime pm for the
5155  * subordinate display power domains. That is done by
5156  * intel_power_domains_enable().
5157  */
5158 void intel_runtime_pm_enable(struct drm_i915_private *i915)
5159 {
5160         struct pci_dev *pdev = i915->drm.pdev;
5161         struct device *kdev = &pdev->dev;
5162
5163         /*
5164          * Disable the system suspend direct complete optimization, which can
5165          * leave the device suspended skipping the driver's suspend handlers
5166          * if the device was already runtime suspended. This is needed due to
5167          * the difference in our runtime and system suspend sequence and
5168          * becaue the HDA driver may require us to enable the audio power
5169          * domain during system suspend.
5170          */
5171         dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
5172
5173         pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
5174         pm_runtime_mark_last_busy(kdev);
5175
5176         /*
5177          * Take a permanent reference to disable the RPM functionality and drop
5178          * it only when unloading the driver. Use the low level get/put helpers,
5179          * so the driver's own RPM reference tracking asserts also work on
5180          * platforms without RPM support.
5181          */
5182         if (!HAS_RUNTIME_PM(i915)) {
5183                 int ret;
5184
5185                 pm_runtime_dont_use_autosuspend(kdev);
5186                 ret = pm_runtime_get_sync(kdev);
5187                 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
5188         } else {
5189                 pm_runtime_use_autosuspend(kdev);
5190         }
5191
5192         /*
5193          * The core calls the driver load handler with an RPM reference held.
5194          * We drop that here and will reacquire it during unloading in
5195          * intel_power_domains_fini().
5196          */
5197         pm_runtime_put_autosuspend(kdev);
5198 }
5199
5200 void intel_runtime_pm_disable(struct drm_i915_private *i915)
5201 {
5202         struct pci_dev *pdev = i915->drm.pdev;
5203         struct device *kdev = &pdev->dev;
5204
5205         /* Transfer rpm ownership back to core */
5206         WARN(pm_runtime_get_sync(kdev) < 0,
5207              "Failed to pass rpm ownership back to core\n");
5208
5209         pm_runtime_dont_use_autosuspend(kdev);
5210
5211         if (!HAS_RUNTIME_PM(i915))
5212                 pm_runtime_put(kdev);
5213 }
5214
5215 void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
5216 {
5217         struct i915_runtime_pm *rpm = &i915->runtime_pm;
5218         int count = atomic_read(&rpm->wakeref_count);
5219
5220         WARN(count,
5221              "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
5222              intel_rpm_raw_wakeref_count(count),
5223              intel_rpm_wakelock_count(count));
5224
5225         untrack_all_intel_runtime_pm_wakerefs(i915);
5226 }
5227
5228 void intel_runtime_pm_init_early(struct drm_i915_private *i915)
5229 {
5230         init_intel_runtime_pm_wakeref(i915);
5231 }