2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <subdev/bios.h>
28 #include <subdev/bios/pll.h>
29 #include <subdev/timer.h>
33 enum nv_clk_src csrc, ssrc, vsrc;
41 read_div(struct mcp77_clk *clk)
43 struct nvkm_device *device = clk->base.subdev.device;
44 return nvkm_rd32(device, 0x004600);
48 read_pll(struct mcp77_clk *clk, u32 base)
50 struct nvkm_device *device = clk->base.subdev.device;
51 u32 ctrl = nvkm_rd32(device, base + 0);
52 u32 coef = nvkm_rd32(device, base + 4);
53 u32 ref = clk->base.read(&clk->base, nv_clk_src_href);
60 post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
63 post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
69 N1 = (coef & 0x0000ff00) >> 8;
70 M1 = (coef & 0x000000ff);
71 if ((ctrl & 0x80000000) && M1) {
72 clock = ref * N1 / M1;
73 clock = clock / post_div;
80 mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
82 struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
83 struct nvkm_subdev *subdev = &clk->base.subdev;
84 struct nvkm_device *device = subdev->device;
85 u32 mast = nvkm_rd32(device, 0x00c054);
89 case nv_clk_src_crystal:
90 return device->crystal;
92 return 100000; /* PCIE reference clock */
93 case nv_clk_src_hclkm4:
94 return clk->base.read(&clk->base, nv_clk_src_href) * 4;
95 case nv_clk_src_hclkm2d3:
96 return clk->base.read(&clk->base, nv_clk_src_href) * 2 / 3;
98 switch (mast & 0x000c0000) {
99 case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
100 case 0x00040000: break;
101 case 0x00080000: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
102 case 0x000c0000: return clk->base.read(&clk->base, nv_clk_src_cclk);
105 case nv_clk_src_core:
106 P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
108 switch (mast & 0x00000003) {
109 case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
110 case 0x00000001: return 0;
111 case 0x00000002: return clk->base.read(&clk->base, nv_clk_src_hclkm4) >> P;
112 case 0x00000003: return read_pll(clk, 0x004028) >> P;
115 case nv_clk_src_cclk:
116 if ((mast & 0x03000000) != 0x03000000)
117 return clk->base.read(&clk->base, nv_clk_src_core);
119 if ((mast & 0x00000200) == 0x00000000)
120 return clk->base.read(&clk->base, nv_clk_src_core);
122 switch (mast & 0x00000c00) {
123 case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_href);
124 case 0x00000400: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
125 case 0x00000800: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
128 case nv_clk_src_shader:
129 P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
130 switch (mast & 0x00000030) {
132 if (mast & 0x00000040)
133 return clk->base.read(&clk->base, nv_clk_src_href) >> P;
134 return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
135 case 0x00000010: break;
136 case 0x00000020: return read_pll(clk, 0x004028) >> P;
137 case 0x00000030: return read_pll(clk, 0x004020) >> P;
143 case nv_clk_src_vdec:
144 P = (read_div(clk) & 0x00000700) >> 8;
146 switch (mast & 0x00400000) {
148 return clk->base.read(&clk->base, nv_clk_src_core) >> P;
159 nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
164 calc_pll(struct mcp77_clk *clk, u32 reg,
165 u32 clock, int *N, int *M, int *P)
167 struct nvkm_subdev *subdev = &clk->base.subdev;
168 struct nvbios_pll pll;
171 ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
175 pll.vco2.max_freq = 0;
176 pll.refclk = clk->base.read(&clk->base, nv_clk_src_href);
180 return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
184 calc_P(u32 src, u32 target, int *div)
186 u32 clk0 = src, clk1 = src;
187 for (*div = 0; *div <= 7; (*div)++) {
188 if (clk0 <= target) {
189 clk1 = clk0 << (*div ? 1 : 0);
195 if (target - clk0 <= clk1 - target)
202 mcp77_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
204 struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
205 const int shader = cstate->domain[nv_clk_src_shader];
206 const int core = cstate->domain[nv_clk_src_core];
207 const int vdec = cstate->domain[nv_clk_src_vdec];
208 struct nvkm_subdev *subdev = &clk->base.subdev;
209 u32 out = 0, clock = 0;
210 int N, M, P1, P2 = 0;
213 /* cclk: find suitable source, disable PLL if we can */
214 if (core < clk->base.read(&clk->base, nv_clk_src_hclkm4))
215 out = calc_P(clk->base.read(&clk->base, nv_clk_src_hclkm4), core, &divs);
217 /* Calculate clock * 2, so shader clock can use it too */
218 clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
220 if (abs(core - out) <= abs(core - (clock >> 1))) {
221 clk->csrc = nv_clk_src_hclkm4;
222 clk->cctrl = divs << 16;
224 /* NVCTRL is actually used _after_ NVPOST, and after what we
225 * call NVPLL. To make matters worse, NVPOST is an integer
226 * divider instead of a right-shift number. */
232 clk->csrc = nv_clk_src_core;
233 clk->ccoef = (N << 8) | M;
235 clk->cctrl = (P2 + 1) << 16;
236 clk->cpost = (1 << P1) << 16;
239 /* sclk: nvpll + divisor, href or spll */
241 if (shader == clk->base.read(&clk->base, nv_clk_src_href)) {
242 clk->ssrc = nv_clk_src_href;
244 clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
245 if (clk->csrc == nv_clk_src_core)
246 out = calc_P((core << 1), shader, &divs);
248 if (abs(shader - out) <=
249 abs(shader - clock) &&
251 clk->ssrc = nv_clk_src_core;
252 clk->sctrl = (divs + P2) << 16;
254 clk->ssrc = nv_clk_src_shader;
255 clk->scoef = (N << 8) | M;
256 clk->sctrl = P1 << 16;
261 out = calc_P(core, vdec, &divs);
262 clock = calc_P(500000, vdec, &P1);
263 if(abs(vdec - out) <= abs(vdec - clock)) {
264 clk->vsrc = nv_clk_src_cclk;
265 clk->vdiv = divs << 16;
267 clk->vsrc = nv_clk_src_vdec;
268 clk->vdiv = P1 << 16;
271 /* Print strategy! */
272 nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
273 clk->ccoef, clk->cpost, clk->cctrl);
274 nvkm_debug(subdev, " spll: %08x %08x %08x\n",
275 clk->scoef, clk->spost, clk->sctrl);
276 nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
277 if (clk->csrc == nv_clk_src_hclkm4)
278 nvkm_debug(subdev, "core: hrefm4\n");
280 nvkm_debug(subdev, "core: nvpll\n");
282 if (clk->ssrc == nv_clk_src_hclkm4)
283 nvkm_debug(subdev, "shader: hrefm4\n");
284 else if (clk->ssrc == nv_clk_src_core)
285 nvkm_debug(subdev, "shader: nvpll\n");
287 nvkm_debug(subdev, "shader: spll\n");
289 if (clk->vsrc == nv_clk_src_hclkm4)
290 nvkm_debug(subdev, "vdec: 500MHz\n");
292 nvkm_debug(subdev, "vdec: core\n");
298 mcp77_clk_prog(struct nvkm_clk *obj)
300 struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
301 struct nvkm_subdev *subdev = &clk->base.subdev;
302 struct nvkm_device *device = subdev->device;
303 u32 pllmask = 0, mast;
305 unsigned long *f = &flags;
308 ret = gt215_clk_pre(&clk->base, f);
312 /* First switch to safe clocks: href */
313 mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
318 case nv_clk_src_hclkm4:
319 nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
322 case nv_clk_src_core:
323 nvkm_wr32(device, 0x402c, clk->ccoef);
324 nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
325 nvkm_wr32(device, 0x4040, clk->cpost);
326 pllmask |= (0x3 << 8);
330 nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
335 case nv_clk_src_href:
336 nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
337 /* mast |= 0x00000000; */
339 case nv_clk_src_core:
340 nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
343 case nv_clk_src_shader:
344 nvkm_wr32(device, 0x4024, clk->scoef);
345 nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
346 nvkm_wr32(device, 0x4070, clk->spost);
347 pllmask |= (0x3 << 12);
351 nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
355 if (nvkm_msec(device, 2000,
356 u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
363 case nv_clk_src_cclk:
366 nvkm_wr32(device, 0x4600, clk->vdiv);
369 nvkm_wr32(device, 0xc054, mast);
372 /* Disable some PLLs and dividers when unused */
373 if (clk->csrc != nv_clk_src_core) {
374 nvkm_wr32(device, 0x4040, 0x00000000);
375 nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
378 if (clk->ssrc != nv_clk_src_shader) {
379 nvkm_wr32(device, 0x4070, 0x00000000);
380 nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
387 gt215_clk_post(&clk->base, f);
392 mcp77_clk_tidy(struct nvkm_clk *obj)
396 static struct nvkm_domain
398 { nv_clk_src_crystal, 0xff },
399 { nv_clk_src_href , 0xff },
400 { nv_clk_src_core , 0xff, 0, "core", 1000 },
401 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
402 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
407 mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
408 struct nvkm_oclass *oclass, void *data, u32 size,
409 struct nvkm_object **pobject)
411 struct mcp77_clk *clk;
414 ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
415 NULL, 0, true, &clk);
416 *pobject = nv_object(clk);
420 clk->base.read = mcp77_clk_read;
421 clk->base.calc = mcp77_clk_calc;
422 clk->base.prog = mcp77_clk_prog;
423 clk->base.tidy = mcp77_clk_tidy;
428 mcp77_clk_oclass = &(struct nvkm_oclass) {
429 .handle = NV_SUBDEV(CLK, 0xaa),
430 .ofuncs = &(struct nvkm_ofuncs) {
431 .ctor = mcp77_clk_ctor,
432 .dtor = _nvkm_clk_dtor,
433 .init = _nvkm_clk_init,
434 .fini = _nvkm_clk_fini,