2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/utsname.h>
21 #include "adreno_gpu.h"
22 #include "msm_snapshot.h"
27 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
29 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
32 case MSM_PARAM_GPU_ID:
33 *value = adreno_gpu->info->revn;
35 case MSM_PARAM_GMEM_SIZE:
36 *value = adreno_gpu->gmem;
38 case MSM_PARAM_GMEM_BASE:
41 case MSM_PARAM_CHIP_ID:
42 *value = adreno_gpu->rev.patchid |
43 (adreno_gpu->rev.minor << 8) |
44 (adreno_gpu->rev.major << 16) |
45 (adreno_gpu->rev.core << 24);
47 case MSM_PARAM_MAX_FREQ:
48 *value = gpu->gpufreq[gpu->active_level];
50 case MSM_PARAM_TIMESTAMP:
51 if (adreno_gpu->funcs->get_timestamp)
52 return adreno_gpu->funcs->get_timestamp(gpu, value);
55 DBG("%s: invalid param: %u", gpu->name, param);
60 int adreno_hw_init(struct msm_gpu *gpu)
62 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
67 for (i = 0; i < gpu->nr_rings; i++) {
68 int ret = msm_gem_get_iova(gpu->rb[i]->bo, gpu->aspace,
72 dev_err(gpu->dev->dev,
73 "could not map ringbuffer %d: %d\n", i, ret);
79 * Setup REG_CP_RB_CNTL. The same value is used across targets (with
80 * the excpetion of A430 that disables the RPTR shadow) - the cacluation
81 * for the ringbuffer size and block size is moved to msm_gpu.h for the
82 * pre-processor to deal with and the A430 variant is ORed in here
84 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
85 MSM_GPU_RB_CNTL_DEFAULT |
86 (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
88 /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
89 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
90 REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
92 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
93 REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr));
98 /* Use this helper to read rptr, since a430 doesn't update rptr in memory */
99 static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
100 struct msm_ringbuffer *ring)
102 if (adreno_is_a430(adreno_gpu)) {
104 * If index is anything but 0 this will probably break horribly,
105 * but I think that we have enough infrastructure in place to
106 * ensure that it won't be. If not then this is why your
107 * a430 stopped working.
109 return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read(
110 adreno_gpu, REG_ADRENO_CP_RB_RPTR);
112 return adreno_gpu->memptrs->rptr[ring->id];
115 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
120 uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
121 struct msm_ringbuffer *ring)
126 return ring->submitted_fence;
129 uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
131 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
136 return adreno_gpu->memptrs->fence[ring->id];
139 void adreno_recover(struct msm_gpu *gpu)
141 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
142 struct drm_device *dev = gpu->dev;
143 struct msm_ringbuffer *ring;
146 gpu->funcs->pm_suspend(gpu);
148 /* reset ringbuffer(s): */
150 FOR_EACH_RING(gpu, ring, i) {
154 /* No need for a lock here, nobody else is peeking in */
155 ring->cur = ring->start;
156 ring->next = ring->start;
158 /* reset completed fence seqno, discard anything pending: */
159 adreno_gpu->memptrs->fence[ring->id] =
160 adreno_submitted_fence(gpu, ring);
161 adreno_gpu->memptrs->rptr[ring->id] = 0;
164 gpu->funcs->pm_resume(gpu);
166 disable_irq(gpu->irq);
167 ret = gpu->funcs->hw_init(gpu);
169 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
172 enable_irq(gpu->irq);
175 int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
177 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
178 struct msm_ringbuffer *ring = gpu->rb[submit->ring];
181 for (i = 0; i < submit->nr_cmds; i++) {
182 switch (submit->cmd[i].type) {
183 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
184 /* ignore IB-targets */
186 case MSM_SUBMIT_CMD_PROFILE_BUF:
187 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
189 case MSM_SUBMIT_CMD_BUF:
190 OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
191 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
192 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
193 OUT_RING(ring, submit->cmd[i].size);
199 /* on a320, at least, we seem to need to pad things out to an
200 * even number of qwords to avoid issue w/ CP hanging on wrap-
206 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
207 OUT_RING(ring, submit->fence);
209 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
210 /* Flush HLSQ lazy updates to make sure there is nothing
211 * pending for indirect loads after the timestamp has
214 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
215 OUT_RING(ring, HLSQ_FLUSH);
217 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
218 OUT_RING(ring, 0x00000000);
221 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
222 OUT_RING(ring, CACHE_FLUSH_TS);
223 OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence));
224 OUT_RING(ring, submit->fence);
226 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
227 OUT_PKT3(ring, CP_INTERRUPT, 1);
228 OUT_RING(ring, 0x80000000);
230 /* Workaround for missing irq issue on 8x16/a306. Unsure if the
231 * root cause is a platform issue or some a306 quirk, but this
232 * keeps things humming along:
234 if (adreno_is_a306(adreno_gpu)) {
235 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
236 OUT_RING(ring, 0x00000000);
237 OUT_PKT3(ring, CP_INTERRUPT, 1);
238 OUT_RING(ring, 0x80000000);
242 if (adreno_is_a3xx(adreno_gpu)) {
243 /* Dummy set-constant to trigger context rollover */
244 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
245 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
246 OUT_RING(ring, 0x00000000);
250 gpu->funcs->flush(gpu, ring);
255 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
260 /* Copy the shadow to the actual register */
261 ring->cur = ring->next;
264 * Mask the wptr value that we calculate to fit in the HW range. This is
265 * to account for the possibility that the last command fit exactly into
266 * the ringbuffer and rb->next hasn't wrapped to zero yet
268 wptr = get_wptr(ring);
270 /* ensure writes to ringbuffer have hit system memory: */
273 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
276 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
278 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
279 uint32_t wptr = get_wptr(ring);
281 /* wait for CP to drain ringbuffer: */
282 if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
285 /* TODO maybe we need to reset GPU here to recover from hang? */
286 DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
287 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
292 #ifdef CONFIG_DEBUG_FS
293 void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
295 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
296 struct msm_ringbuffer *ring;
299 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
300 adreno_gpu->info->revn, adreno_gpu->rev.core,
301 adreno_gpu->rev.major, adreno_gpu->rev.minor,
302 adreno_gpu->rev.patchid);
304 FOR_EACH_RING(gpu, ring, i) {
308 seq_printf(m, "rb %d: fence: %d/%d\n", i,
309 adreno_last_fence(gpu, ring),
310 adreno_submitted_fence(gpu, ring));
312 seq_printf(m, " rptr: %d\n",
313 get_rptr(adreno_gpu, ring));
314 seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
317 gpu->funcs->pm_resume(gpu);
319 /* dump these out in a form that can be parsed by demsm: */
320 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
321 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
322 uint32_t start = adreno_gpu->registers[i];
323 uint32_t end = adreno_gpu->registers[i+1];
326 for (addr = start; addr <= end; addr++) {
327 uint32_t val = gpu_read(gpu, addr);
328 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
332 gpu->funcs->pm_suspend(gpu);
336 /* Dump common gpu status and scratch registers on any hang, to make
337 * the hangcheck logs more useful. The scratch registers seem always
338 * safe to read when GPU has hung (unlike some other regs, depending
339 * on how the GPU hung), and they are useful to match up to cmdstream
340 * dumps when debugging hangs:
342 void adreno_dump_info(struct msm_gpu *gpu)
344 struct drm_device *dev = gpu->dev;
345 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
346 struct msm_ringbuffer *ring;
349 dev_err(dev->dev, "revision: %d (%d.%d.%d.%d)\n",
350 adreno_gpu->info->revn, adreno_gpu->rev.core,
351 adreno_gpu->rev.major, adreno_gpu->rev.minor,
352 adreno_gpu->rev.patchid);
354 FOR_EACH_RING(gpu, ring, i) {
358 dev_err(dev->dev, " ring %d: fence %d/%d rptr/wptr %x/%x\n", i,
359 adreno_last_fence(gpu, ring),
360 adreno_submitted_fence(gpu, ring),
361 get_rptr(adreno_gpu, ring),
365 for (i = 0; i < 8; i++) {
366 pr_err("CP_SCRATCH_REG%d: %u\n", i,
367 gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
371 /* would be nice to not have to duplicate the _show() stuff with printk(): */
372 void adreno_dump(struct msm_gpu *gpu)
374 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
377 /* dump these out in a form that can be parsed by demsm: */
378 printk("IO:region %s 00000000 00020000\n", gpu->name);
379 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
380 uint32_t start = adreno_gpu->registers[i];
381 uint32_t end = adreno_gpu->registers[i+1];
384 for (addr = start; addr <= end; addr++) {
385 uint32_t val = gpu_read(gpu, addr);
386 printk("IO:R %08x %08x\n", addr<<2, val);
391 static uint32_t ring_freewords(struct msm_ringbuffer *ring)
393 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
394 uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
395 /* Use ring->next to calculate free size */
396 uint32_t wptr = ring->next - ring->start;
397 uint32_t rptr = get_rptr(adreno_gpu, ring);
398 return (rptr + (size - 1) - wptr) % size;
401 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
403 if (spin_until(ring_freewords(ring) >= ndwords))
404 DRM_ERROR("%s: timeout waiting for space in ringubffer %d\n",
405 ring->gpu->name, ring->id);
408 /* Read the set of powerlevels */
409 static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
411 struct device_node *child;
413 gpu->active_level = 1;
415 /* The device tree will tell us the best clock to initialize with */
416 of_property_read_u32(node, "qcom,initial-pwrlevel", &gpu->active_level);
418 if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
419 gpu->active_level = 1;
421 for_each_child_of_node(node, child) {
424 if (of_property_read_u32(child, "reg", &index))
427 if (index >= ARRAY_SIZE(gpu->gpufreq))
430 gpu->nr_pwrlevels = max(gpu->nr_pwrlevels, index + 1);
432 of_property_read_u32(child, "qcom,gpu-freq",
433 &gpu->gpufreq[index]);
434 of_property_read_u32(child, "qcom,bus-freq",
435 &gpu->busfreq[index]);
438 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
439 gpu->gpufreq[gpu->active_level],
440 gpu->gpufreq[gpu->nr_pwrlevels - 1],
441 gpu->busfreq[gpu->active_level]);
447 * Escape valve for targets that don't define the binning nodes. Get the
448 * first powerlevel node and parse it
450 static int adreno_get_legacy_pwrlevels(struct msm_gpu *gpu,
451 struct device_node *parent)
453 struct device_node *child;
455 child = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
457 return _adreno_get_pwrlevels(gpu, child);
459 dev_err(gpu->dev->dev, "Unable to parse any powerlevels\n");
463 /* Get the powerlevels for the target */
464 static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
466 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
467 struct device_node *node, *child;
469 /* See if the target has defined a number of power bins */
470 node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
472 /* If not look for the qcom,gpu-pwrlevels node */
473 return adreno_get_legacy_pwrlevels(gpu, parent);
476 for_each_child_of_node(node, child) {
479 if (of_property_read_u32(child, "qcom,speed-bin", &bin))
483 * If the bin matches the bin specified by the fuses, then we
484 * have a winner - parse it
486 if (adreno_gpu->speed_bin == bin)
487 return _adreno_get_pwrlevels(gpu, child);
493 static const struct {
497 { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
498 { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
501 /* Parse the statistics from the device tree */
502 static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
504 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
505 struct device_node *node = pdev->dev.of_node;
508 /* Probe the powerlevels */
509 ret = adreno_get_pwrlevels(gpu, node);
513 /* Check to see if any quirks were specified in the device tree */
514 for (i = 0; i < ARRAY_SIZE(quirks); i++)
515 if (of_property_read_bool(node, quirks[i].str))
516 adreno_gpu->quirks |= quirks[i].flag;
521 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
522 struct adreno_gpu *adreno_gpu,
523 const struct adreno_gpu_funcs *funcs,
524 struct msm_gpu_config *gpu_config)
526 struct adreno_platform_config *config = pdev->dev.platform_data;
527 struct msm_gpu *gpu = &adreno_gpu->base;
531 adreno_gpu->funcs = funcs;
532 adreno_gpu->info = adreno_info(config->rev);
533 adreno_gpu->gmem = adreno_gpu->info->gmem;
534 adreno_gpu->revn = adreno_gpu->info->revn;
535 adreno_gpu->rev = config->rev;
537 /* Get the rest of the target configuration from the device tree */
538 adreno_of_parse(pdev, gpu);
540 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
541 adreno_gpu->info->name, gpu_config);
545 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
547 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
548 adreno_gpu->info->pm4fw, ret);
552 ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
554 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
555 adreno_gpu->info->pfpfw, ret);
559 mmu = gpu->aspace->mmu;
561 ret = mmu->funcs->attach(mmu, NULL, 0);
566 mutex_lock(&drm->struct_mutex);
567 adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
569 mutex_unlock(&drm->struct_mutex);
570 if (IS_ERR(adreno_gpu->memptrs_bo)) {
571 ret = PTR_ERR(adreno_gpu->memptrs_bo);
572 adreno_gpu->memptrs_bo = NULL;
573 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
577 adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
578 if (!adreno_gpu->memptrs) {
579 dev_err(drm->dev, "could not vmap memptrs\n");
583 ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
584 &adreno_gpu->memptrs_iova);
586 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
593 void adreno_gpu_cleanup(struct adreno_gpu *gpu)
595 struct msm_gem_address_space *aspace = gpu->base.aspace;
597 if (gpu->memptrs_bo) {
598 if (gpu->memptrs_iova)
599 msm_gem_put_iova(gpu->memptrs_bo, aspace);
600 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
602 release_firmware(gpu->pm4);
603 release_firmware(gpu->pfp);
605 msm_gpu_cleanup(&gpu->base);
608 aspace->mmu->funcs->detach(aspace->mmu);
609 msm_gem_address_space_put(aspace);
613 static void adreno_snapshot_os(struct msm_gpu *gpu,
614 struct msm_snapshot *snapshot)
616 struct msm_snapshot_linux header;
618 memset(&header, 0, sizeof(header));
620 header.osid = SNAPSHOT_OS_LINUX_V3;
621 strlcpy(header.release, utsname()->release, sizeof(header.release));
622 strlcpy(header.version, utsname()->version, sizeof(header.version));
624 header.seconds = get_seconds();
625 header.ctxtcount = 0;
627 SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_OS, 0);
630 static void adreno_snapshot_ringbuffer(struct msm_gpu *gpu,
631 struct msm_snapshot *snapshot, struct msm_ringbuffer *ring)
633 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
634 struct msm_snapshot_ringbuffer header;
635 unsigned int i, end = 0;
636 unsigned int *data = ring->start;
638 memset(&header, 0, sizeof(header));
641 * We only want to copy the active contents of each ring, so find the
642 * last valid entry in the ringbuffer
644 for (i = 0; i < MSM_GPU_RINGBUFFER_SZ >> 2; i++) {
649 /* The dump always starts at 0 */
653 /* This is the number of dwords being dumped */
654 header.count = end + 1;
656 /* This is the size of the actual ringbuffer */
657 header.rbsize = MSM_GPU_RINGBUFFER_SZ >> 2;
659 header.id = ring->id;
660 header.gpuaddr = ring->iova;
661 header.rptr = get_rptr(adreno_gpu, ring);
662 header.wptr = get_wptr(ring);
663 header.timestamp_queued = adreno_submitted_fence(gpu, ring);
664 header.timestamp_retired = adreno_last_fence(gpu, ring);
666 /* Write the header even if the ringbuffer data is empty */
667 if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_RB_V2,
671 SNAPSHOT_MEMCPY(snapshot, ring->start, header.count * sizeof(u32));
674 static void adreno_snapshot_ringbuffers(struct msm_gpu *gpu,
675 struct msm_snapshot *snapshot)
677 struct msm_ringbuffer *ring;
680 /* Write a new section for each ringbuffer */
681 FOR_EACH_RING(gpu, ring, i)
682 adreno_snapshot_ringbuffer(gpu, snapshot, ring);
685 void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
687 adreno_snapshot_os(gpu, snapshot);
688 adreno_snapshot_ringbuffers(gpu, snapshot);
691 /* Return the group struct associated with the counter id */
693 static struct adreno_counter_group *get_counter_group(struct msm_gpu *gpu,
696 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
698 if (!adreno_gpu->counter_groups)
699 return ERR_PTR(-ENODEV);
701 if (groupid >= adreno_gpu->nr_counter_groups)
702 return ERR_PTR(-ENODEV);
704 return (struct adreno_counter_group *)
705 adreno_gpu->counter_groups[groupid];
708 int adreno_get_counter(struct msm_gpu *gpu, u32 groupid, u32 countable,
711 struct adreno_counter_group *group =
712 get_counter_group(gpu, groupid);
714 if (!IS_ERR_OR_NULL(group) && group->funcs.get)
715 return group->funcs.get(gpu, group, countable, lo, hi);
720 u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
722 struct adreno_counter_group *group =
723 get_counter_group(gpu, groupid);
725 if (!IS_ERR_OR_NULL(group) && group->funcs.read)
726 return group->funcs.read(gpu, group, counterid);
731 void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
733 struct adreno_counter_group *group =
734 get_counter_group(gpu, groupid);
736 if (!IS_ERR_OR_NULL(group) && group->funcs.put)
737 group->funcs.put(gpu, group, counterid);