2 * PCM - Direct Stream Mixing
3 * Copyright (c) 2003 by Jaroslav Kysela <perex@perex.cz>
6 * This library is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as
8 * published by the Free Software Foundation; either version 2.1 of
9 * the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include <sys/ioctl.h>
37 #include <sys/socket.h>
41 #include "pcm_direct.h"
48 int val; /* Value for SETVAL */
49 struct semid_ds *buf; /* Buffer for IPC_STAT, IPC_SET */
50 unsigned short *array; /* Array for GETALL, SETALL */
51 struct seminfo *__buf; /* Buffer for IPC_INFO (Linux specific) */
56 * add possibility to use futexes here
59 int snd_pcm_direct_semaphore_create_or_connect(snd_pcm_direct_t *dmix)
65 dmix->semid = semget(dmix->ipc_key, DIRECT_IPC_SEMS,
66 IPC_CREAT | dmix->ipc_perm);
69 if (dmix->ipc_gid < 0)
71 for (i = 0; i < DIRECT_IPC_SEMS; i++) {
73 if (semctl(dmix->semid, i, IPC_STAT, s) < 0) {
75 snd_pcm_direct_semaphore_discard(dmix);
78 buf.sem_perm.gid = dmix->ipc_gid;
80 semctl(dmix->semid, i, IPC_SET, s);
85 #define SND_PCM_DIRECT_MAGIC (0xa15ad300 + sizeof(snd_pcm_direct_share_t))
88 * global shared memory area
91 int snd_pcm_direct_shm_create_or_connect(snd_pcm_direct_t *dmix)
94 int tmpid, err, first_instance = 0;
97 dmix->shmid = shmget(dmix->ipc_key, sizeof(snd_pcm_direct_share_t),
99 if (dmix->shmid < 0 && errno == ENOENT) {
100 if ((dmix->shmid = shmget(dmix->ipc_key, sizeof(snd_pcm_direct_share_t),
101 IPC_CREAT | IPC_EXCL | dmix->ipc_perm)) != -1)
103 else if (errno == EEXIST)
107 if (dmix->shmid < 0) {
109 if ((tmpid = shmget(dmix->ipc_key, 0, dmix->ipc_perm)) != -1)
110 if (!shmctl(tmpid, IPC_STAT, &buf))
112 /* no users so destroy the segment */
113 if (!shmctl(tmpid, IPC_RMID, NULL))
117 dmix->shmptr = shmat(dmix->shmid, 0, 0);
118 if (dmix->shmptr == (void *) -1) {
120 snd_pcm_direct_shm_discard(dmix);
123 mlock(dmix->shmptr, sizeof(snd_pcm_direct_share_t));
124 if (shmctl(dmix->shmid, IPC_STAT, &buf) < 0) {
126 snd_pcm_direct_shm_discard(dmix);
129 if (first_instance) { /* we're the first user, clear the segment */
130 memset(dmix->shmptr, 0, sizeof(snd_pcm_direct_share_t));
131 if (dmix->ipc_gid >= 0) {
132 buf.shm_perm.gid = dmix->ipc_gid;
133 shmctl(dmix->shmid, IPC_SET, &buf);
135 dmix->shmptr->magic = SND_PCM_DIRECT_MAGIC;
138 if (dmix->shmptr->magic != SND_PCM_DIRECT_MAGIC) {
139 snd_pcm_direct_shm_discard(dmix);
146 /* discard shared memory */
148 * Define snd_* functions to be used in server.
149 * Since objects referred in a plugin can be released dynamically, a forked
150 * server should have statically linked functions.
151 * (e.g. Novell bugzilla #105772)
153 static int _snd_pcm_direct_shm_discard(snd_pcm_direct_t *dmix)
160 if (dmix->shmptr != (void *) -1 && shmdt(dmix->shmptr) < 0)
162 dmix->shmptr = (void *) -1;
163 if (shmctl(dmix->shmid, IPC_STAT, &buf) < 0)
165 if (buf.shm_nattch == 0) { /* we're the last user, destroy the segment */
166 if (shmctl(dmix->shmid, IPC_RMID, NULL) < 0)
174 /* ... and an exported version */
175 int snd_pcm_direct_shm_discard(snd_pcm_direct_t *dmix)
177 return _snd_pcm_direct_shm_discard(dmix);
184 static int get_tmp_name(char *filename, size_t size)
188 gettimeofday(&tv, NULL);
189 snprintf(filename, size, TMPDIR "/alsa-dmix-%i-%li-%li", (int)getpid(), (long)tv.tv_sec, (long)tv.tv_usec);
190 filename[size-1] = '\0';
194 static int make_local_socket(const char *filename, int server, mode_t ipc_perm, int ipc_gid)
196 size_t l = strlen(filename);
197 size_t size = offsetof(struct sockaddr_un, sun_path) + l;
198 struct sockaddr_un *addr = alloca(size);
201 sock = socket(PF_LOCAL, SOCK_STREAM, 0);
204 SYSERR("socket failed");
210 memset(addr, 0, size); /* make valgrind happy */
211 addr->sun_family = AF_LOCAL;
212 memcpy(addr->sun_path, filename, l);
215 if (bind(sock, (struct sockaddr *) addr, size) < 0) {
217 SYSERR("bind failed: %s", filename);
221 if (chmod(filename, ipc_perm) < 0) {
223 SYSERR("chmod failed: %s", filename);
228 if (chown(filename, -1, ipc_gid) < 0) {
229 #if 0 /* it's not fatal */
231 SYSERR("chown failed: %s", filename);
239 if (connect(sock, (struct sockaddr *) addr, size) < 0) {
241 SYSERR("connect failed: %s", filename);
250 #define SERVER_JOB_DEBUG
251 #define server_printf(fmt, args...) printf(fmt, ##args)
253 #undef SERVER_JOB_DEBUG
254 #define server_printf(fmt, args...) /* nothing */
257 static snd_pcm_direct_t *server_job_dmix;
259 static void server_cleanup(snd_pcm_direct_t *dmix)
261 close(dmix->server_fd);
263 if (dmix->server_free)
264 dmix->server_free(dmix);
265 unlink(dmix->shmptr->socket_name);
266 _snd_pcm_direct_shm_discard(dmix);
267 snd_pcm_direct_semaphore_discard(dmix);
270 static void server_job_signal(int sig ATTRIBUTE_UNUSED)
272 snd_pcm_direct_semaphore_down(server_job_dmix, DIRECT_IPC_SEM_CLIENT);
273 server_cleanup(server_job_dmix);
274 server_printf("DIRECT SERVER EXIT - SIGNAL\n");
278 /* This is a copy from ../socket.c, provided here only for a server job
279 * (see the comment above)
281 static int _snd_send_fd(int sock, void *data, size_t len, int fd)
284 size_t cmsg_len = CMSG_LEN(sizeof(int));
285 struct cmsghdr *cmsg = alloca(cmsg_len);
286 int *fds = (int *) CMSG_DATA(cmsg);
287 struct msghdr msghdr;
290 vec.iov_base = (void *)&data;
293 cmsg->cmsg_len = cmsg_len;
294 cmsg->cmsg_level = SOL_SOCKET;
295 cmsg->cmsg_type = SCM_RIGHTS;
298 msghdr.msg_name = NULL;
299 msghdr.msg_namelen = 0;
300 msghdr.msg_iov = &vec;
301 msghdr.msg_iovlen = 1;
302 msghdr.msg_control = cmsg;
303 msghdr.msg_controllen = cmsg_len;
304 msghdr.msg_flags = 0;
306 ret = sendmsg(sock, &msghdr, 0 );
312 static void server_job(snd_pcm_direct_t *dmix)
315 int max = 128, current = 0;
316 struct pollfd pfds[max + 1];
318 server_job_dmix = dmix;
319 /* don't allow to be killed */
320 signal(SIGHUP, server_job_signal);
321 signal(SIGQUIT, server_job_signal);
322 signal(SIGTERM, server_job_signal);
323 signal(SIGKILL, server_job_signal);
324 /* close all files to free resources */
325 i = sysconf(_SC_OPEN_MAX);
326 #ifdef SERVER_JOB_DEBUG
331 if (i != dmix->server_fd && i != dmix->hw_fd)
335 /* detach from parent */
338 pfds[0].fd = dmix->server_fd;
339 pfds[0].events = POLLIN | POLLERR | POLLHUP;
341 server_printf("DIRECT SERVER STARTED\n");
343 ret = poll(pfds, current + 1, 500);
344 server_printf("DIRECT SERVER: poll ret = %i, revents[0] = 0x%x, errno = %i\n", ret, pfds[0].revents, errno);
351 if (ret == 0 || (pfds[0].revents & (POLLERR | POLLHUP))) { /* timeout or error? */
353 snd_pcm_direct_semaphore_down(dmix, DIRECT_IPC_SEM_CLIENT);
354 if (shmctl(dmix->shmid, IPC_STAT, &buf) < 0) {
355 _snd_pcm_direct_shm_discard(dmix);
356 snd_pcm_direct_semaphore_up(dmix, DIRECT_IPC_SEM_CLIENT);
359 server_printf("DIRECT SERVER: nattch = %i\n", (int)buf.shm_nattch);
360 if (buf.shm_nattch == 1) /* server is the last user, exit */
362 snd_pcm_direct_semaphore_up(dmix, DIRECT_IPC_SEM_CLIENT);
365 if (pfds[0].revents & POLLIN) {
367 sck = accept(dmix->server_fd, 0, 0);
369 server_printf("DIRECT SERVER: new connection %i\n", sck);
370 if (current == max) {
373 unsigned char buf = 'A';
374 pfds[current+1].fd = sck;
375 pfds[current+1].events = POLLIN | POLLERR | POLLHUP;
376 _snd_send_fd(sck, &buf, 1, dmix->hw_fd);
377 server_printf("DIRECT SERVER: fd sent ok\n");
382 for (i = 0; i < current && ret > 0; i++) {
383 struct pollfd *pfd = &pfds[i+1];
385 server_printf("client %i revents = 0x%x\n", pfd->fd, pfd->revents);
386 if (pfd->revents & (POLLERR | POLLHUP)) {
392 if (!(pfd->revents & POLLIN))
395 if (read(pfd->fd, &cmd, 1) == 1)
396 cmd = 0 /*process command */;
398 for (i = 0; i < current; i++) {
399 if (pfds[i+1].fd < 0) {
401 memcpy(&pfds[i+1], &pfds[i+2], sizeof(struct pollfd) * (max - i - 1));
406 server_cleanup(dmix);
407 server_printf("DIRECT SERVER EXIT\n");
408 #ifdef SERVER_JOB_DEBUG
409 close(0); close(1); close(2);
414 int snd_pcm_direct_server_create(snd_pcm_direct_t *dmix)
418 dmix->server_fd = -1;
420 ret = get_tmp_name(dmix->shmptr->socket_name, sizeof(dmix->shmptr->socket_name));
424 ret = make_local_socket(dmix->shmptr->socket_name, 1, dmix->ipc_perm, dmix->ipc_gid);
427 dmix->server_fd = ret;
429 ret = listen(dmix->server_fd, 4);
431 close(dmix->server_fd);
437 close(dmix->server_fd);
439 } else if (ret == 0) {
445 waitpid(ret, NULL, 0);
447 dmix->server_pid = ret;
452 int snd_pcm_direct_server_discard(snd_pcm_direct_t *dmix)
455 //kill(dmix->server_pid, SIGTERM);
456 //waitpid(dmix->server_pid, NULL, 0);
457 dmix->server_pid = (pid_t)-1;
459 if (dmix->server_fd > 0) {
460 close(dmix->server_fd);
461 dmix->server_fd = -1;
471 int snd_pcm_direct_client_connect(snd_pcm_direct_t *dmix)
476 ret = make_local_socket(dmix->shmptr->socket_name, 0, -1, -1);
481 ret = snd_receive_fd(dmix->comm_fd, &buf, 1, &dmix->hw_fd);
482 if (ret < 1 || buf != 'A') {
483 close(dmix->comm_fd);
492 int snd_pcm_direct_client_discard(snd_pcm_direct_t *dmix)
495 close(dmix->comm_fd);
505 int snd_pcm_direct_nonblock(snd_pcm_t *pcm ATTRIBUTE_UNUSED, int nonblock ATTRIBUTE_UNUSED)
507 /* value is cached for us in pcm->mode (SND_PCM_NONBLOCK flag) */
511 int snd_pcm_direct_async(snd_pcm_t *pcm, int sig, pid_t pid)
513 snd_pcm_direct_t *dmix = pcm->private_data;
514 return snd_timer_async(dmix->timer, sig, pid);
517 /* empty the timer read queue */
518 int snd_pcm_direct_clear_timer_queue(snd_pcm_direct_t *dmix)
521 if (dmix->timer_need_poll) {
522 while (poll(&dmix->timer_fd, 1, 0) > 0) {
524 /* we don't need the value */
526 snd_timer_tread_t rbuf[4];
527 snd_timer_read(dmix->timer, rbuf, sizeof(rbuf));
529 snd_timer_read_t rbuf;
530 snd_timer_read(dmix->timer, &rbuf, sizeof(rbuf));
535 snd_timer_tread_t rbuf[4];
537 while ((len = snd_timer_read(dmix->timer, rbuf,
540 len != sizeof(rbuf[0]))
543 snd_timer_read_t rbuf;
544 while (snd_timer_read(dmix->timer, &rbuf, sizeof(rbuf)) > 0)
551 int snd_pcm_direct_timer_stop(snd_pcm_direct_t *dmix)
553 snd_timer_stop(dmix->timer);
558 * Recover slave on XRUN.
559 * Even if direct plugins disable xrun detection, there might be an xrun
560 * raised directly by some drivers.
561 * The first client recovers slave pcm.
562 * Each client needs to execute sw xrun handling afterwards
564 int snd_pcm_direct_slave_recover(snd_pcm_direct_t *direct)
569 semerr = snd_pcm_direct_semaphore_down(direct,
570 DIRECT_IPC_SEM_CLIENT);
572 SNDERR("SEMDOWN FAILED with err %d", semerr);
576 if (snd_pcm_state(direct->spcm) != SND_PCM_STATE_XRUN) {
577 /* ignore... someone else already did recovery */
578 semerr = snd_pcm_direct_semaphore_up(direct,
579 DIRECT_IPC_SEM_CLIENT);
581 SNDERR("SEMUP FAILED with err %d", semerr);
587 ret = snd_pcm_prepare(direct->spcm);
589 SNDERR("recover: unable to prepare slave");
590 semerr = snd_pcm_direct_semaphore_up(direct,
591 DIRECT_IPC_SEM_CLIENT);
593 SNDERR("SEMUP FAILED with err %d", semerr);
599 if (direct->type == SND_PCM_TYPE_DSHARE) {
600 const snd_pcm_channel_area_t *dst_areas;
601 dst_areas = snd_pcm_mmap_areas(direct->spcm);
602 snd_pcm_areas_silence(dst_areas, 0, direct->spcm->channels,
603 direct->spcm->buffer_size,
604 direct->spcm->format);
607 ret = snd_pcm_start(direct->spcm);
609 SNDERR("recover: unable to start slave");
610 semerr = snd_pcm_direct_semaphore_up(direct,
611 DIRECT_IPC_SEM_CLIENT);
613 SNDERR("SEMUP FAILED with err %d", semerr);
618 direct->shmptr->s.recoveries++;
619 semerr = snd_pcm_direct_semaphore_up(direct,
620 DIRECT_IPC_SEM_CLIENT);
622 SNDERR("SEMUP FAILED with err %d", semerr);
629 * enter xrun state, if slave xrun occurred
630 * @return: 0 - no xrun >0: xrun happened
632 int snd_pcm_direct_client_chk_xrun(snd_pcm_direct_t *direct, snd_pcm_t *pcm)
634 if (direct->shmptr->s.recoveries != direct->recoveries) {
635 /* no matter how many xruns we missed -
636 * so don't increment but just update to actual counter
638 direct->recoveries = direct->shmptr->s.recoveries;
639 pcm->fast_ops->drop(pcm);
640 /* trigger_tstamp update is missing in drop callbacks */
641 gettimestamp(&direct->trigger_tstamp, pcm->tstamp_type);
643 * if slave already entered xrun again the event is lost.
644 * snd_pcm_direct_clear_timer_queue(direct);
646 direct->state = SND_PCM_STATE_XRUN;
653 * This is the only operation guaranteed to be called before entering poll().
654 * Direct plugins use fd of snd_timer to poll on, these timers do NOT check
655 * state of substream in kernel by intention.
656 * Only the enter to xrun might be notified once (SND_TIMER_EVENT_MSTOP).
657 * If xrun event was not correctly handled or was ignored it will never be
658 * evaluated again afterwards.
659 * This will result in snd_pcm_wait() always returning timeout.
660 * In contrast poll() on pcm hardware checks ALSA state and will immediately
661 * return POLLERR on XRUN.
663 * To prevent timeout and applications endlessly spinning without xrun
664 * detected we add a state check here which may trigger the xrun sequence.
666 * return count of filled descriptors or negative error code
668 int snd_pcm_direct_poll_descriptors(snd_pcm_t *pcm, struct pollfd *pfds,
671 if (pcm->poll_fd < 0) {
672 SNDMSG("poll_fd < 0");
675 if (space >= 1 && pfds) {
676 pfds->fd = pcm->poll_fd;
677 pfds->events = pcm->poll_events | POLLERR | POLLNVAL;
682 /* this will also evaluate slave state and enter xrun if necessary */
683 /* using __snd_pcm_state() since this function is called inside lock */
684 switch (__snd_pcm_state(pcm)) {
685 case SND_PCM_STATE_XRUN:
693 int snd_pcm_direct_poll_revents(snd_pcm_t *pcm, struct pollfd *pfds, unsigned int nfds, unsigned short *revents)
695 snd_pcm_direct_t *dmix = pcm->private_data;
696 unsigned short events;
699 assert(pfds && nfds == 1 && revents);
702 events = pfds[0].revents;
703 if (events & POLLIN) {
704 snd_pcm_uframes_t avail;
705 __snd_pcm_avail_update(pcm);
706 if (pcm->stream == SND_PCM_STREAM_PLAYBACK) {
709 avail = snd_pcm_mmap_playback_avail(pcm);
711 avail = snd_pcm_mmap_capture_avail(pcm);
713 empty = avail < pcm->avail_min;
715 switch (snd_pcm_state(dmix->spcm)) {
716 case SND_PCM_STATE_XRUN:
717 /* recover slave and update client state to xrun
718 * before returning POLLERR
720 snd_pcm_direct_slave_recover(dmix);
721 snd_pcm_direct_client_chk_xrun(dmix, pcm);
723 case SND_PCM_STATE_SUSPENDED:
724 case SND_PCM_STATE_SETUP:
729 /* here we have a race condition:
730 * if period event arrived after the avail_update call
731 * above we might clear this event with the following
733 * There is no way to do this in atomic manner, so we
734 * need to recheck avail_update if we successfully
735 * cleared a poll event.
737 if (snd_pcm_direct_clear_timer_queue(dmix))
739 events &= ~(POLLOUT|POLLIN);
740 /* additional check */
741 switch (__snd_pcm_state(pcm)) {
742 case SND_PCM_STATE_XRUN:
743 case SND_PCM_STATE_SUSPENDED:
744 case SND_PCM_STATE_SETUP:
757 int snd_pcm_direct_info(snd_pcm_t *pcm, snd_pcm_info_t * info)
759 snd_pcm_direct_t *dmix = pcm->private_data;
761 if (dmix->spcm && !dmix->shmptr->use_server)
762 return snd_pcm_info(dmix->spcm, info);
764 memset(info, 0, sizeof(*info));
765 info->stream = pcm->stream;
767 /* FIXME: fill this with something more useful: we know the hardware name */
769 snd_strlcpy((char *)info->id, pcm->name, sizeof(info->id));
770 snd_strlcpy((char *)info->name, pcm->name, sizeof(info->name));
771 snd_strlcpy((char *)info->subname, pcm->name, sizeof(info->subname));
773 info->subdevices_count = 1;
777 static inline snd_mask_t *hw_param_mask(snd_pcm_hw_params_t *params,
778 snd_pcm_hw_param_t var)
780 return ¶ms->masks[var - SND_PCM_HW_PARAM_FIRST_MASK];
783 static inline snd_interval_t *hw_param_interval(snd_pcm_hw_params_t *params,
784 snd_pcm_hw_param_t var)
786 return ¶ms->intervals[var - SND_PCM_HW_PARAM_FIRST_INTERVAL];
789 static int hw_param_interval_refine_one(snd_pcm_hw_params_t *params,
790 snd_pcm_hw_param_t var,
795 if (!(params->rmask & (1<<var))) /* nothing to do? */
797 i = hw_param_interval(params, var);
798 if (snd_interval_empty(i)) {
799 SNDERR("dshare interval %i empty?", (int)var);
802 if (snd_interval_refine(i, src))
803 params->cmask |= 1<<var;
807 static int hw_param_interval_refine_minmax(snd_pcm_hw_params_t *params,
808 snd_pcm_hw_param_t var,
814 memset(&t, 0, sizeof(t));
815 snd_interval_set_minmax(&t, imin, imax);
817 return hw_param_interval_refine_one(params, var, &t);
820 /* this code is used 'as-is' from the alsa kernel code */
821 static int snd_interval_step(struct snd_interval *i, unsigned int min,
826 n = (i->min - min) % step;
827 if (n != 0 || i->openmin) {
831 n = (i->max - min) % step;
832 if (n != 0 || i->openmax) {
836 if (snd_interval_checkempty(i)) {
845 int snd_pcm_direct_hw_refine(snd_pcm_t *pcm, snd_pcm_hw_params_t *params)
847 snd_pcm_direct_t *dshare = pcm->private_data;
848 static const snd_mask_t access = { .bits = {
849 (1<<SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) |
850 (1<<SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED) |
851 (1<<SNDRV_PCM_ACCESS_RW_INTERLEAVED) |
852 (1<<SNDRV_PCM_ACCESS_RW_NONINTERLEAVED),
858 snd_output_stdio_attach(&log, stderr, 0);
859 snd_output_puts(log, "DMIX REFINE (begin):\n");
860 snd_pcm_hw_params_dump(params, log);
862 if (params->rmask & (1<<SND_PCM_HW_PARAM_ACCESS)) {
863 if (snd_mask_empty(hw_param_mask(params, SND_PCM_HW_PARAM_ACCESS))) {
864 SNDERR("dshare access mask empty?");
867 if (snd_mask_refine(hw_param_mask(params, SND_PCM_HW_PARAM_ACCESS), &access))
868 params->cmask |= 1<<SND_PCM_HW_PARAM_ACCESS;
870 if (params->rmask & (1<<SND_PCM_HW_PARAM_FORMAT)) {
871 if (snd_mask_empty(hw_param_mask(params, SND_PCM_HW_PARAM_FORMAT))) {
872 SNDERR("dshare format mask empty?");
875 if (snd_mask_refine_set(hw_param_mask(params, SND_PCM_HW_PARAM_FORMAT),
876 dshare->shmptr->hw.format))
877 params->cmask |= 1<<SND_PCM_HW_PARAM_FORMAT;
879 //snd_mask_none(hw_param_mask(params, SND_PCM_HW_PARAM_SUBFORMAT));
880 if (params->rmask & (1<<SND_PCM_HW_PARAM_CHANNELS)) {
881 if (snd_interval_empty(hw_param_interval(params, SND_PCM_HW_PARAM_CHANNELS))) {
882 SNDERR("dshare channels mask empty?");
885 err = snd_interval_refine_set(hw_param_interval(params, SND_PCM_HW_PARAM_CHANNELS), dshare->channels);
889 err = hw_param_interval_refine_one(params, SND_PCM_HW_PARAM_RATE,
890 &dshare->shmptr->hw.rate);
894 if (dshare->max_periods < 0) {
895 err = hw_param_interval_refine_one(params, SND_PCM_HW_PARAM_PERIOD_SIZE,
896 &dshare->shmptr->hw.period_size);
899 err = hw_param_interval_refine_one(params, SND_PCM_HW_PARAM_PERIOD_TIME,
900 &dshare->shmptr->hw.period_time);
903 err = hw_param_interval_refine_one(params, SND_PCM_HW_PARAM_BUFFER_SIZE,
904 &dshare->shmptr->hw.buffer_size);
907 err = hw_param_interval_refine_one(params, SND_PCM_HW_PARAM_BUFFER_TIME,
908 &dshare->shmptr->hw.buffer_time);
911 } else if (params->rmask & ((1<<SND_PCM_HW_PARAM_PERIODS)|
912 (1<<SND_PCM_HW_PARAM_BUFFER_BYTES)|
913 (1<<SND_PCM_HW_PARAM_BUFFER_SIZE)|
914 (1<<SND_PCM_HW_PARAM_BUFFER_TIME)|
915 (1<<SND_PCM_HW_PARAM_PERIOD_TIME)|
916 (1<<SND_PCM_HW_PARAM_PERIOD_SIZE)|
917 (1<<SND_PCM_HW_PARAM_PERIOD_BYTES))) {
918 snd_interval_t period_size = dshare->shmptr->hw.period_size;
919 snd_interval_t period_time = dshare->shmptr->hw.period_time;
921 unsigned int max_periods = dshare->max_periods;
923 max_periods = dshare->slave_buffer_size / dshare->slave_period_size;
925 /* make sure buffer size does not exceed slave buffer size */
926 err = hw_param_interval_refine_minmax(params, SND_PCM_HW_PARAM_BUFFER_SIZE,
927 2 * dshare->slave_period_size, dshare->slave_buffer_size);
930 if (dshare->var_periodsize) {
931 /* more tolerant settings... */
932 if (dshare->shmptr->hw.buffer_size.max / 2 > period_size.max)
933 period_size.max = dshare->shmptr->hw.buffer_size.max / 2;
934 if (dshare->shmptr->hw.buffer_time.max / 2 > period_time.max)
935 period_time.max = dshare->shmptr->hw.buffer_time.max / 2;
938 err = hw_param_interval_refine_one(params, SND_PCM_HW_PARAM_PERIOD_SIZE,
942 err = hw_param_interval_refine_one(params, SND_PCM_HW_PARAM_PERIOD_TIME,
948 err = hw_param_interval_refine_minmax(params, SND_PCM_HW_PARAM_PERIODS,
953 err = snd_pcm_hw_refine_soft(pcm, params);
957 err = snd_interval_step(hw_param_interval(params, SND_PCM_HW_PARAM_PERIOD_SIZE),
958 0, dshare->slave_period_size);
963 params->rmask |= (1 << SND_PCM_HW_PARAM_PERIOD_SIZE);
966 dshare->timer_ticks = hw_param_interval(params, SND_PCM_HW_PARAM_PERIOD_SIZE)->max / dshare->slave_period_size;
967 params->info = dshare->shmptr->s.info;
969 snd_output_puts(log, "DMIX REFINE (end):\n");
970 snd_pcm_hw_params_dump(params, log);
971 snd_output_close(log);
976 int snd_pcm_direct_hw_params(snd_pcm_t *pcm, snd_pcm_hw_params_t * params)
978 snd_pcm_direct_t *dmix = pcm->private_data;
980 params->info = dmix->shmptr->s.info;
981 params->rate_num = dmix->shmptr->s.rate;
982 params->rate_den = 1;
983 params->fifo_size = 0;
984 params->msbits = dmix->shmptr->s.msbits;
988 int snd_pcm_direct_hw_free(snd_pcm_t *pcm ATTRIBUTE_UNUSED)
990 /* values are cached in the pcm structure */
994 int snd_pcm_direct_sw_params(snd_pcm_t *pcm ATTRIBUTE_UNUSED, snd_pcm_sw_params_t * params ATTRIBUTE_UNUSED)
996 /* values are cached in the pcm structure */
1000 int snd_pcm_direct_channel_info(snd_pcm_t *pcm, snd_pcm_channel_info_t * info)
1002 return snd_pcm_channel_info_shm(pcm, info, -1);
1005 int snd_pcm_direct_mmap(snd_pcm_t *pcm ATTRIBUTE_UNUSED)
1010 int snd_pcm_direct_munmap(snd_pcm_t *pcm ATTRIBUTE_UNUSED)
1015 snd_pcm_chmap_query_t **snd_pcm_direct_query_chmaps(snd_pcm_t *pcm)
1017 snd_pcm_direct_t *dmix = pcm->private_data;
1018 return snd_pcm_query_chmaps(dmix->spcm);
1021 snd_pcm_chmap_t *snd_pcm_direct_get_chmap(snd_pcm_t *pcm)
1023 snd_pcm_direct_t *dmix = pcm->private_data;
1024 return snd_pcm_get_chmap(dmix->spcm);
1027 int snd_pcm_direct_set_chmap(snd_pcm_t *pcm, const snd_pcm_chmap_t *map)
1029 snd_pcm_direct_t *dmix = pcm->private_data;
1030 return snd_pcm_set_chmap(dmix->spcm, map);
1033 int snd_pcm_direct_prepare(snd_pcm_t *pcm)
1035 snd_pcm_direct_t *dmix = pcm->private_data;
1038 switch (snd_pcm_state(dmix->spcm)) {
1039 case SND_PCM_STATE_SETUP:
1040 case SND_PCM_STATE_XRUN:
1041 case SND_PCM_STATE_SUSPENDED:
1042 err = snd_pcm_prepare(dmix->spcm);
1045 snd_pcm_start(dmix->spcm);
1047 case SND_PCM_STATE_OPEN:
1048 case SND_PCM_STATE_DISCONNECTED:
1053 snd_pcm_direct_check_interleave(dmix, pcm);
1054 dmix->state = SND_PCM_STATE_PREPARED;
1055 dmix->appl_ptr = dmix->last_appl_ptr = 0;
1057 return snd_pcm_direct_set_timer_params(dmix);
1060 int snd_pcm_direct_resume(snd_pcm_t *pcm)
1062 snd_pcm_direct_t *dmix = pcm->private_data;
1063 snd_pcm_t *spcm = dmix->spcm;
1065 snd_pcm_direct_semaphore_down(dmix, DIRECT_IPC_SEM_CLIENT);
1066 /* some buggy drivers require the device resumed before prepared;
1067 * when a device has RESUME flag and is in SUSPENDED state, resume
1068 * here but immediately drop to bring it to a sane active state.
1070 if ((spcm->info & SND_PCM_INFO_RESUME) &&
1071 snd_pcm_state(spcm) == SND_PCM_STATE_SUSPENDED) {
1072 snd_pcm_resume(spcm);
1074 snd_pcm_direct_timer_stop(dmix);
1075 snd_pcm_direct_clear_timer_queue(dmix);
1076 snd_pcm_areas_silence(snd_pcm_mmap_areas(spcm), 0,
1077 spcm->channels, spcm->buffer_size,
1079 snd_pcm_prepare(spcm);
1080 snd_pcm_start(spcm);
1082 snd_pcm_direct_semaphore_up(dmix, DIRECT_IPC_SEM_CLIENT);
1086 #define COPY_SLAVE(field) (dmix->shmptr->s.field = spcm->field)
1088 /* copy the slave setting */
1089 static void save_slave_setting(snd_pcm_direct_t *dmix, snd_pcm_t *spcm)
1091 spcm->info &= ~SND_PCM_INFO_PAUSE;
1095 COPY_SLAVE(subformat);
1096 COPY_SLAVE(channels);
1098 COPY_SLAVE(period_size);
1099 COPY_SLAVE(period_time);
1100 COPY_SLAVE(periods);
1101 COPY_SLAVE(tstamp_mode);
1102 COPY_SLAVE(tstamp_type);
1103 COPY_SLAVE(period_step);
1104 COPY_SLAVE(avail_min);
1105 COPY_SLAVE(start_threshold);
1106 COPY_SLAVE(stop_threshold);
1107 COPY_SLAVE(silence_threshold);
1108 COPY_SLAVE(silence_size);
1109 COPY_SLAVE(boundary);
1112 COPY_SLAVE(rate_num);
1113 COPY_SLAVE(rate_den);
1114 COPY_SLAVE(hw_flags);
1115 COPY_SLAVE(fifo_size);
1116 COPY_SLAVE(buffer_size);
1117 COPY_SLAVE(buffer_time);
1118 COPY_SLAVE(sample_bits);
1119 COPY_SLAVE(frame_bits);
1121 dmix->shmptr->s.info &= ~SND_PCM_INFO_RESUME;
1127 * this function initializes hardware and starts playback operation with
1128 * no stop threshold (it operates all time without xrun checking)
1129 * also, the driver silences the unused ring buffer areas for us
1131 int snd_pcm_direct_initialize_slave(snd_pcm_direct_t *dmix, snd_pcm_t *spcm, struct slave_params *params)
1133 snd_pcm_hw_params_t hw_params = {0};
1134 snd_pcm_sw_params_t sw_params = {0};
1135 int ret, buffer_is_not_initialized;
1136 snd_pcm_uframes_t boundary;
1142 SNDERR("unable to find a valid configuration for slave");
1145 ret = snd_pcm_hw_params_any(spcm, &hw_params);
1147 SNDERR("snd_pcm_hw_params_any failed");
1150 ret = snd_pcm_hw_params_set_access(spcm, &hw_params,
1151 SND_PCM_ACCESS_MMAP_INTERLEAVED);
1153 ret = snd_pcm_hw_params_set_access(spcm, &hw_params,
1154 SND_PCM_ACCESS_MMAP_NONINTERLEAVED);
1156 SNDERR("slave plugin does not support mmap interleaved or mmap noninterleaved access");
1160 if (params->format == SND_PCM_FORMAT_UNKNOWN)
1163 ret = snd_pcm_hw_params_set_format(spcm, &hw_params,
1166 static const snd_pcm_format_t dmix_formats[] = {
1168 SND_PCM_FORMAT_S32 ^ SND_PCM_FORMAT_S32_LE ^
1169 SND_PCM_FORMAT_S32_BE,
1171 SND_PCM_FORMAT_S16 ^ SND_PCM_FORMAT_S16_LE ^
1172 SND_PCM_FORMAT_S16_BE,
1173 SND_PCM_FORMAT_S24_LE,
1174 SND_PCM_FORMAT_S24_3LE,
1177 snd_pcm_format_t format;
1180 for (i = 0; i < ARRAY_SIZE(dmix_formats); ++i) {
1181 format = dmix_formats[i];
1182 ret = snd_pcm_hw_params_set_format(spcm, &hw_params,
1187 if (ret < 0 && dmix->type != SND_PCM_TYPE_DMIX) {
1188 /* TODO: try to choose a good format */
1189 ret = INTERNAL(snd_pcm_hw_params_set_format_first)(spcm,
1190 &hw_params, &format);
1193 SNDERR("requested or auto-format is not available");
1196 params->format = format;
1198 ret = INTERNAL(snd_pcm_hw_params_set_channels_near)(spcm, &hw_params,
1199 (unsigned int *)¶ms->channels);
1201 SNDERR("requested count of channels is not available");
1204 ret = INTERNAL(snd_pcm_hw_params_set_rate_near)(spcm, &hw_params,
1205 (unsigned int *)¶ms->rate, 0);
1207 SNDERR("requested rate is not available");
1211 buffer_is_not_initialized = 0;
1212 if (params->buffer_time > 0) {
1213 ret = INTERNAL(snd_pcm_hw_params_set_buffer_time_near)(spcm,
1214 &hw_params, (unsigned int *)¶ms->buffer_time, 0);
1216 SNDERR("unable to set buffer time");
1219 } else if (params->buffer_size > 0) {
1220 ret = INTERNAL(snd_pcm_hw_params_set_buffer_size_near)(spcm,
1221 &hw_params, (snd_pcm_uframes_t *)¶ms->buffer_size);
1223 SNDERR("unable to set buffer size");
1227 buffer_is_not_initialized = 1;
1230 if (params->period_time > 0) {
1231 ret = INTERNAL(snd_pcm_hw_params_set_period_time_near)(spcm,
1232 &hw_params, (unsigned int *)¶ms->period_time, 0);
1234 SNDERR("unable to set period_time");
1237 } else if (params->period_size > 0) {
1238 ret = INTERNAL(snd_pcm_hw_params_set_period_size_near)(spcm,
1239 &hw_params, (snd_pcm_uframes_t *)¶ms->period_size,
1242 SNDERR("unable to set period_size");
1247 if (buffer_is_not_initialized && params->periods > 0) {
1248 unsigned int periods = params->periods;
1249 ret = INTERNAL(snd_pcm_hw_params_set_periods_near)(spcm,
1250 &hw_params, ¶ms->periods, 0);
1252 SNDERR("unable to set requested periods");
1255 if (params->periods == 1) {
1256 params->periods = periods;
1257 if (params->period_time > 0) {
1258 params->period_time /= 2;
1260 } else if (params->period_size > 0) {
1261 params->period_size /= 2;
1264 SNDERR("unable to use stream with periods == 1");
1269 ret = snd_pcm_hw_params(spcm, &hw_params);
1271 SNDERR("unable to install hw params");
1275 /* store some hw_params values to shared info */
1276 dmix->shmptr->hw.format =
1277 snd_mask_value(hw_param_mask(&hw_params,
1278 SND_PCM_HW_PARAM_FORMAT));
1279 dmix->shmptr->hw.rate =
1280 *hw_param_interval(&hw_params, SND_PCM_HW_PARAM_RATE);
1281 dmix->shmptr->hw.buffer_size =
1282 *hw_param_interval(&hw_params, SND_PCM_HW_PARAM_BUFFER_SIZE);
1283 dmix->shmptr->hw.buffer_time =
1284 *hw_param_interval(&hw_params, SND_PCM_HW_PARAM_BUFFER_TIME);
1285 dmix->shmptr->hw.period_size =
1286 *hw_param_interval(&hw_params, SND_PCM_HW_PARAM_PERIOD_SIZE);
1287 dmix->shmptr->hw.period_time =
1288 *hw_param_interval(&hw_params, SND_PCM_HW_PARAM_PERIOD_TIME);
1289 dmix->shmptr->hw.periods =
1290 *hw_param_interval(&hw_params, SND_PCM_HW_PARAM_PERIODS);
1293 ret = snd_pcm_sw_params_current(spcm, &sw_params);
1295 SNDERR("unable to get current sw_params");
1299 ret = snd_pcm_sw_params_get_boundary(&sw_params, &boundary);
1301 SNDERR("unable to get boundary");
1304 ret = snd_pcm_sw_params_set_stop_threshold(spcm, &sw_params, boundary);
1306 SNDERR("unable to set stop threshold");
1310 /* set timestamp mode to MMAP
1311 * the slave timestamp is copied appropriately in dsnoop/dmix/dshare
1312 * based on the tstamp_mode of each client
1314 ret = snd_pcm_sw_params_set_tstamp_mode(spcm, &sw_params,
1315 SND_PCM_TSTAMP_ENABLE);
1317 SNDERR("unable to tstamp mode MMAP");
1321 if (dmix->type != SND_PCM_TYPE_DMIX &&
1322 dmix->type != SND_PCM_TYPE_DSHARE)
1323 goto __skip_silencing;
1325 ret = snd_pcm_sw_params_set_silence_threshold(spcm, &sw_params, 0);
1327 SNDERR("unable to set silence threshold");
1330 ret = snd_pcm_sw_params_set_silence_size(spcm, &sw_params, boundary);
1332 SNDERR("unable to set silence threshold (please upgrade to 0.9.0rc8+ driver)");
1338 ret = snd_pcm_sw_params(spcm, &sw_params);
1340 SNDERR("unable to install sw params (please upgrade to 0.9.0rc8+ driver)");
1344 if (dmix->type == SND_PCM_TYPE_DSHARE) {
1345 const snd_pcm_channel_area_t *dst_areas;
1346 dst_areas = snd_pcm_mmap_areas(spcm);
1347 snd_pcm_areas_silence(dst_areas, 0, spcm->channels,
1348 spcm->buffer_size, spcm->format);
1351 ret = snd_pcm_start(spcm);
1353 SNDERR("unable to start PCM stream");
1357 if (snd_pcm_poll_descriptors_count(spcm) != 1) {
1358 SNDERR("unable to use hardware pcm with fd more than one!!!");
1361 snd_pcm_poll_descriptors(spcm, &fd, 1);
1362 dmix->hw_fd = fd.fd;
1364 save_slave_setting(dmix, spcm);
1366 /* Currently, we assume that each dmix client has the same
1367 * hw_params setting.
1368 * If the arbitrary hw_parmas is supported in future,
1369 * boundary has to be taken from the slave config but
1370 * recalculated for the native boundary size (for 32bit
1371 * emulation on 64bit arch).
1373 dmix->slave_buffer_size = spcm->buffer_size;
1374 dmix->slave_period_size = spcm->period_size;
1375 dmix->slave_boundary = spcm->boundary;
1377 spcm->donot_close = 1;
1381 ioctl(spcm->poll_fd, SNDRV_PCM_IOCTL_PVERSION, &ver);
1382 if (ver < SNDRV_PROTOCOL_VERSION(2, 0, 8))
1383 dmix->shmptr->use_server = 1;
1390 * the trick is used here; we cannot use effectively the hardware handle because
1391 * we cannot drive multiple accesses to appl_ptr; so we use slave timer of given
1392 * PCM hardware handle; it's not this easy and cheap?
1394 int snd_pcm_direct_initialize_poll_fd(snd_pcm_direct_t *dmix)
1397 snd_pcm_info_t info = {0};
1399 int capture = dmix->type == SND_PCM_TYPE_DSNOOP ? 1 : 0;
1402 dmix->timer_need_poll = 0;
1403 dmix->timer_ticks = 1;
1404 ret = snd_pcm_info(dmix->spcm, &info);
1406 SNDERR("unable to info for slave pcm");
1409 sprintf(name, "hw:CLASS=%i,SCLASS=0,CARD=%i,DEV=%i,SUBDEV=%i",
1410 (int)SND_TIMER_CLASS_PCM,
1411 snd_pcm_info_get_card(&info),
1412 snd_pcm_info_get_device(&info),
1413 snd_pcm_info_get_subdevice(&info) * 2 + capture);
1414 ret = snd_timer_open(&dmix->timer, name,
1415 SND_TIMER_OPEN_NONBLOCK | SND_TIMER_OPEN_TREAD);
1418 ret = snd_timer_open(&dmix->timer, name,
1419 SND_TIMER_OPEN_NONBLOCK);
1421 SNDERR("unable to open timer '%s'", name);
1426 if (snd_timer_poll_descriptors_count(dmix->timer) != 1) {
1427 SNDERR("unable to use timer '%s' with more than one fd!", name);
1430 snd_timer_poll_descriptors(dmix->timer, &dmix->timer_fd, 1);
1431 dmix->poll_fd = dmix->timer_fd.fd;
1433 dmix->timer_events = (1<<SND_TIMER_EVENT_MSUSPEND) |
1434 (1<<SND_TIMER_EVENT_MRESUME) |
1435 (1<<SND_TIMER_EVENT_MSTOP) |
1436 (1<<SND_TIMER_EVENT_STOP);
1439 * Some hacks for older kernel drivers
1443 ioctl(dmix->poll_fd, SNDRV_TIMER_IOCTL_PVERSION, &ver);
1444 /* In older versions, check via poll before read() is needed
1445 * because of the confliction between TIMER_START and
1448 if (ver < SNDRV_PROTOCOL_VERSION(2, 0, 4))
1449 dmix->timer_need_poll = 1;
1451 * In older versions, timer uses pause events instead
1452 * suspend/resume events.
1454 if (ver < SNDRV_PROTOCOL_VERSION(2, 0, 5)) {
1455 dmix->timer_events &= ~((1<<SND_TIMER_EVENT_MSUSPEND) |
1456 (1<<SND_TIMER_EVENT_MRESUME));
1457 dmix->timer_events |= (1<<SND_TIMER_EVENT_MPAUSE) |
1458 (1<<SND_TIMER_EVENT_MCONTINUE);
1460 /* In older versions, use SND_TIMER_EVENT_START too.
1462 if (ver < SNDRV_PROTOCOL_VERSION(2, 0, 6))
1463 dmix->timer_events |= 1<<SND_TIMER_EVENT_START;
1468 static snd_pcm_uframes_t recalc_boundary_size(unsigned long long bsize, snd_pcm_uframes_t buffer_size)
1470 if (bsize > LONG_MAX) {
1471 bsize = buffer_size;
1472 while (bsize * 2 <= LONG_MAX - buffer_size)
1475 return (snd_pcm_uframes_t)bsize;
1478 #define COPY_SLAVE(field) (spcm->field = dmix->shmptr->s.field)
1480 /* copy the slave setting */
1481 static void copy_slave_setting(snd_pcm_direct_t *dmix, snd_pcm_t *spcm)
1485 COPY_SLAVE(subformat);
1486 COPY_SLAVE(channels);
1488 COPY_SLAVE(period_size);
1489 COPY_SLAVE(period_time);
1490 COPY_SLAVE(periods);
1491 COPY_SLAVE(tstamp_mode);
1492 COPY_SLAVE(tstamp_type);
1493 COPY_SLAVE(period_step);
1494 COPY_SLAVE(avail_min);
1495 COPY_SLAVE(start_threshold);
1496 COPY_SLAVE(stop_threshold);
1497 COPY_SLAVE(silence_threshold);
1498 COPY_SLAVE(silence_size);
1499 COPY_SLAVE(boundary);
1502 COPY_SLAVE(rate_num);
1503 COPY_SLAVE(rate_den);
1504 COPY_SLAVE(hw_flags);
1505 COPY_SLAVE(fifo_size);
1506 COPY_SLAVE(buffer_size);
1507 COPY_SLAVE(buffer_time);
1508 COPY_SLAVE(sample_bits);
1509 COPY_SLAVE(frame_bits);
1511 spcm->info &= ~SND_PCM_INFO_PAUSE;
1512 spcm->boundary = recalc_boundary_size(dmix->shmptr->s.boundary, spcm->buffer_size);
1519 * open a slave PCM as secondary client (dup'ed fd)
1521 int snd_pcm_direct_open_secondary_client(snd_pcm_t **spcmp, snd_pcm_direct_t *dmix, const char *client_name)
1526 ret = snd_pcm_hw_open_fd(spcmp, client_name, dmix->hw_fd, 0);
1528 SNDERR("unable to open hardware");
1533 spcm->donot_close = 1;
1536 copy_slave_setting(dmix, spcm);
1538 /* Use the slave setting as SPCM, so far */
1539 dmix->slave_buffer_size = spcm->buffer_size;
1540 dmix->slave_period_size = dmix->shmptr->s.period_size;
1541 dmix->slave_boundary = spcm->boundary;
1542 dmix->recoveries = dmix->shmptr->s.recoveries;
1544 ret = snd_pcm_mmap(spcm);
1546 SNDERR("unable to mmap channels");
1553 * open a slave PCM as secondary client (dup'ed fd)
1555 int snd_pcm_direct_initialize_secondary_slave(snd_pcm_direct_t *dmix,
1557 struct slave_params *params ATTRIBUTE_UNUSED)
1561 spcm->donot_close = 1;
1564 copy_slave_setting(dmix, spcm);
1566 /* Use the slave setting as SPCM, so far */
1567 dmix->slave_buffer_size = spcm->buffer_size;
1568 dmix->slave_period_size = dmix->shmptr->s.period_size;
1569 dmix->slave_boundary = spcm->boundary;
1571 ret = snd_pcm_mmap(spcm);
1573 SNDERR("unable to mmap channels");
1579 int snd_pcm_direct_set_timer_params(snd_pcm_direct_t *dmix)
1581 snd_timer_params_t params = {0};
1582 unsigned int filter;
1585 snd_timer_params_set_auto_start(¶ms, 1);
1586 if (dmix->type != SND_PCM_TYPE_DSNOOP)
1587 snd_timer_params_set_early_event(¶ms, 1);
1588 snd_timer_params_set_ticks(¶ms, dmix->timer_ticks);
1590 filter = (1<<SND_TIMER_EVENT_TICK) |
1592 INTERNAL(snd_timer_params_set_filter)(¶ms, filter);
1594 ret = snd_timer_params(dmix->timer, ¶ms);
1596 SNDERR("unable to set timer parameters");
1603 * ring buffer operation
1605 int snd_pcm_direct_check_interleave(snd_pcm_direct_t *dmix, snd_pcm_t *pcm)
1607 unsigned int chn, channels;
1608 int bits, interleaved = 1;
1609 const snd_pcm_channel_area_t *dst_areas;
1610 const snd_pcm_channel_area_t *src_areas;
1612 bits = snd_pcm_format_physical_width(pcm->format);
1613 if ((bits % 8) != 0)
1615 channels = dmix->channels;
1616 dst_areas = snd_pcm_mmap_areas(dmix->spcm);
1617 src_areas = snd_pcm_mmap_areas(pcm);
1618 for (chn = 1; chn < channels; chn++) {
1619 if (dst_areas[chn-1].addr != dst_areas[chn].addr) {
1623 if (src_areas[chn-1].addr != src_areas[chn].addr) {
1628 for (chn = 0; chn < channels; chn++) {
1629 if (dmix->bindings && dmix->bindings[chn] != chn) {
1633 if (dst_areas[chn].first != chn * bits ||
1634 dst_areas[chn].step != channels * bits) {
1638 if (src_areas[chn].first != chn * bits ||
1639 src_areas[chn].step != channels * bits) {
1644 return dmix->interleaved = interleaved;
1648 * parse the channel map
1649 * id == client channel
1650 * value == slave's channel
1652 int snd_pcm_direct_parse_bindings(snd_pcm_direct_t *dmix,
1653 struct slave_params *params,
1656 snd_config_iterator_t i, next;
1657 unsigned int chn, chn1, count = 0;
1658 unsigned int *bindings;
1661 dmix->channels = UINT_MAX;
1664 if (snd_config_get_type(cfg) != SND_CONFIG_TYPE_COMPOUND) {
1665 SNDERR("invalid type for bindings");
1668 snd_config_for_each(i, next, cfg) {
1669 snd_config_t *n = snd_config_iterator_entry(i);
1672 if (snd_config_get_id(n, &id) < 0)
1674 err = safe_strtol(id, &cchannel);
1675 if (err < 0 || cchannel < 0) {
1676 SNDERR("invalid client channel in binding: %s\n", id);
1679 if ((unsigned)cchannel >= count)
1680 count = cchannel + 1;
1685 SNDERR("client channel out of range");
1688 bindings = malloc(count * sizeof(unsigned int));
1689 if (bindings == NULL)
1691 for (chn = 0; chn < count; chn++)
1692 bindings[chn] = UINT_MAX; /* don't route */
1693 snd_config_for_each(i, next, cfg) {
1694 snd_config_t *n = snd_config_iterator_entry(i);
1696 long cchannel, schannel;
1697 if (snd_config_get_id(n, &id) < 0)
1699 safe_strtol(id, &cchannel);
1700 if (snd_config_get_integer(n, &schannel) < 0) {
1701 SNDERR("unable to get slave channel (should be integer type) in binding: %s\n", id);
1705 if (schannel < 0 || schannel >= params->channels) {
1706 SNDERR("invalid slave channel number %ld in binding to %ld",
1707 schannel, cchannel);
1711 bindings[cchannel] = schannel;
1713 if (dmix->type == SND_PCM_TYPE_DSNOOP ||
1715 goto __skip_same_dst;
1716 for (chn = 0; chn < count; chn++) {
1717 for (chn1 = 0; chn1 < count; chn1++) {
1720 if (bindings[chn] == dmix->bindings[chn1]) {
1721 SNDERR("unable to route channels %d,%d to same destination %d", chn, chn1, bindings[chn]);
1728 dmix->bindings = bindings;
1729 dmix->channels = count;
1734 * parse slave config and calculate the ipc_key offset
1737 static int _snd_pcm_direct_get_slave_ipc_offset(snd_config_t *root,
1738 snd_config_t *sconf,
1742 snd_config_iterator_t i, next;
1743 snd_config_t *pcm_conf, *pcm_conf2;
1745 long card = 0, device = 0, subdevice = 0;
1748 if (snd_config_get_string(sconf, &str) >= 0) {
1749 if (hop > SND_CONF_MAX_HOPS) {
1750 SNDERR("Too many definition levels (looped?)");
1753 err = snd_config_search_definition(root, "pcm", str, &pcm_conf);
1755 SNDERR("Unknown slave PCM %s", str);
1758 err = _snd_pcm_direct_get_slave_ipc_offset(root, pcm_conf,
1761 snd_config_delete(pcm_conf);
1765 #if 0 /* for debug purposes */
1768 snd_output_stdio_attach(&out, stderr, 0);
1769 snd_config_save(sconf, out);
1770 snd_output_close(out);
1774 if (snd_config_search(sconf, "slave", &pcm_conf) >= 0) {
1775 if (snd_config_search(pcm_conf, "pcm", &pcm_conf) >= 0) {
1776 return _snd_pcm_direct_get_slave_ipc_offset(root,
1781 if (snd_config_get_string(pcm_conf, &str) >= 0 &&
1782 snd_config_search_definition(root, "pcm_slave",
1783 str, &pcm_conf) >= 0) {
1784 if (snd_config_search(pcm_conf, "pcm",
1787 _snd_pcm_direct_get_slave_ipc_offset(
1788 root, pcm_conf2, direction, hop + 1);
1789 snd_config_delete(pcm_conf);
1792 snd_config_delete(pcm_conf);
1797 snd_config_for_each(i, next, sconf) {
1798 snd_config_t *n = snd_config_iterator_entry(i);
1799 const char *id, *str;
1800 if (snd_config_get_id(n, &id) < 0)
1802 if (strcmp(id, "type") == 0) {
1803 err = snd_config_get_string(n, &str);
1805 SNDERR("Invalid value for PCM type definition\n");
1808 if (strcmp(str, "hw")) {
1809 SNDERR("Invalid type '%s' for slave PCM\n", str);
1814 if (strcmp(id, "card") == 0) {
1815 err = snd_config_get_integer(n, &card);
1817 err = snd_config_get_string(n, &str);
1819 SNDERR("Invalid type for %s", id);
1822 card = snd_card_get_index(str);
1824 SNDERR("Invalid value for %s", id);
1830 if (strcmp(id, "device") == 0) {
1831 err = snd_config_get_integer(n, &device);
1833 SNDERR("Invalid type for %s", id);
1838 if (strcmp(id, "subdevice") == 0) {
1839 err = snd_config_get_integer(n, &subdevice);
1841 SNDERR("Invalid type for %s", id);
1853 return (direction << 1) + (device << 2) + (subdevice << 8) + (card << 12);
1856 static int snd_pcm_direct_get_slave_ipc_offset(snd_config_t *root,
1857 snd_config_t *sconf,
1860 return _snd_pcm_direct_get_slave_ipc_offset(root, sconf, direction, 0);
1863 int snd_pcm_direct_parse_open_conf(snd_config_t *root, snd_config_t *conf,
1864 int stream, struct snd_pcm_direct_open_conf *rec)
1866 snd_config_iterator_t i, next;
1867 int ipc_key_add_uid = 0;
1872 rec->bindings = NULL;
1874 rec->ipc_perm = 0600;
1877 rec->max_periods = 0;
1878 rec->var_periodsize = 0;
1879 rec->direct_memory_access = 1;
1880 rec->hw_ptr_alignment = SND_PCM_HW_PTR_ALIGNMENT_AUTO;
1883 if (snd_config_search(root, "defaults.pcm.dmix_max_periods", &n) >= 0) {
1885 err = snd_config_get_integer(n, &val);
1887 rec->max_periods = val;
1890 snd_config_for_each(i, next, conf) {
1892 n = snd_config_iterator_entry(i);
1893 if (snd_config_get_id(n, &id) < 0)
1895 if (snd_pcm_conf_generic_id(id))
1897 if (strcmp(id, "ipc_key") == 0) {
1899 err = snd_config_get_integer(n, &key);
1901 SNDERR("The field ipc_key must be an integer type");
1908 if (strcmp(id, "ipc_perm") == 0) {
1910 err = snd_config_get_integer(n, &perm);
1912 SNDERR("Invalid type for %s", id);
1915 if ((perm & ~0777) != 0) {
1916 SNDERR("The field ipc_perm must be a valid file permission");
1919 rec->ipc_perm = perm;
1922 if (strcmp(id, "hw_ptr_alignment") == 0) {
1924 err = snd_config_get_string(n, &str);
1926 SNDERR("Invalid type for %s", id);
1929 if (strcmp(str, "no") == 0)
1930 rec->hw_ptr_alignment = SND_PCM_HW_PTR_ALIGNMENT_NO;
1931 else if (strcmp(str, "roundup") == 0)
1932 rec->hw_ptr_alignment = SND_PCM_HW_PTR_ALIGNMENT_ROUNDUP;
1933 else if (strcmp(str, "rounddown") == 0)
1934 rec->hw_ptr_alignment = SND_PCM_HW_PTR_ALIGNMENT_ROUNDDOWN;
1935 else if (strcmp(str, "auto") == 0)
1936 rec->hw_ptr_alignment = SND_PCM_HW_PTR_ALIGNMENT_AUTO;
1938 SNDERR("The field hw_ptr_alignment is invalid : %s", str);
1944 if (strcmp(id, "ipc_gid") == 0) {
1947 err = snd_config_get_ascii(n, &group);
1949 SNDERR("The field ipc_gid must be a valid group");
1957 if (isdigit(*group) == 0) {
1958 long clen = sysconf(_SC_GETGR_R_SIZE_MAX);
1959 size_t len = (clen == -1) ? 1024 : (size_t)clen;
1960 struct group grp, *pgrp;
1961 char *buffer = (char *)malloc(len);
1964 int st = getgrnam_r(group, &grp, buffer, len, &pgrp);
1965 if (st != 0 || !pgrp) {
1966 SNDERR("The field ipc_gid must be a valid group (create group %s)", group);
1970 rec->ipc_gid = pgrp->gr_gid;
1973 rec->ipc_gid = strtol(group, &endp, 10);
1978 if (strcmp(id, "ipc_key_add_uid") == 0) {
1979 if ((err = snd_config_get_bool(n)) < 0) {
1980 SNDERR("The field ipc_key_add_uid must be a boolean type");
1983 ipc_key_add_uid = err;
1986 if (strcmp(id, "slave") == 0) {
1990 if (strcmp(id, "bindings") == 0) {
1994 if (strcmp(id, "slowptr") == 0) {
1995 err = snd_config_get_bool(n);
2001 if (strcmp(id, "max_periods") == 0) {
2003 err = snd_config_get_integer(n, &val);
2006 rec->max_periods = val;
2009 if (strcmp(id, "var_periodsize") == 0) {
2010 err = snd_config_get_bool(n);
2013 rec->var_periodsize = err;
2016 if (strcmp(id, "direct_memory_access") == 0) {
2017 err = snd_config_get_bool(n);
2020 rec->direct_memory_access = err;
2023 SNDERR("Unknown field %s", id);
2027 SNDERR("slave is not defined");
2030 if (!rec->ipc_key) {
2031 SNDERR("Unique IPC key is not defined");
2034 if (ipc_key_add_uid)
2035 rec->ipc_key += getuid();
2036 err = snd_pcm_direct_get_slave_ipc_offset(root, conf, stream);
2039 rec->ipc_key += err;
2044 void snd_pcm_direct_reset_slave_ptr(snd_pcm_t *pcm, snd_pcm_direct_t *dmix)
2047 if (dmix->hw_ptr_alignment == SND_PCM_HW_PTR_ALIGNMENT_ROUNDUP ||
2048 (dmix->hw_ptr_alignment == SND_PCM_HW_PTR_ALIGNMENT_AUTO &&
2049 pcm->buffer_size <= pcm->period_size * 2))
2050 dmix->slave_appl_ptr =
2051 ((dmix->slave_appl_ptr + dmix->slave_period_size - 1) /
2052 dmix->slave_period_size) * dmix->slave_period_size;
2053 else if (dmix->hw_ptr_alignment == SND_PCM_HW_PTR_ALIGNMENT_ROUNDDOWN ||
2054 (dmix->hw_ptr_alignment == SND_PCM_HW_PTR_ALIGNMENT_AUTO &&
2055 (dmix->slave_period_size * SEC_TO_MS) /
2056 pcm->rate < LOW_LATENCY_PERIOD_TIME))
2057 dmix->slave_appl_ptr = dmix->slave_hw_ptr =
2058 ((dmix->slave_hw_ptr / dmix->slave_period_size) *
2059 dmix->slave_period_size);