OSDN Git Service

staging: most: remove header include path to drivers/staging
[tomoyo/tomoyo-test1.git] / drivers / staging / most / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * core.c - Implementation of core module of MOST Linux driver stack
4  *
5  * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
6  */
7
8 #include <linux/module.h>
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/list.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16 #include <linux/kobject.h>
17 #include <linux/mutex.h>
18 #include <linux/completion.h>
19 #include <linux/sysfs.h>
20 #include <linux/kthread.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/idr.h>
23
24 #include "most.h"
25
26 #define MAX_CHANNELS    64
27 #define STRING_SIZE     80
28
29 static struct ida mdev_id;
30 static int dummy_num_buffers;
31
32 static struct mostcore {
33         struct device dev;
34         struct device_driver drv;
35         struct bus_type bus;
36         struct list_head comp_list;
37 } mc;
38
39 #define to_driver(d) container_of(d, struct mostcore, drv)
40
41 struct pipe {
42         struct most_component *comp;
43         int refs;
44         int num_buffers;
45 };
46
47 struct most_channel {
48         struct device dev;
49         struct completion cleanup;
50         atomic_t mbo_ref;
51         atomic_t mbo_nq_level;
52         u16 channel_id;
53         char name[STRING_SIZE];
54         bool is_poisoned;
55         struct mutex start_mutex; /* channel activation synchronization */
56         struct mutex nq_mutex; /* nq thread synchronization */
57         int is_starving;
58         struct most_interface *iface;
59         struct most_channel_config cfg;
60         bool keep_mbo;
61         bool enqueue_halt;
62         struct list_head fifo;
63         spinlock_t fifo_lock; /* fifo access synchronization */
64         struct list_head halt_fifo;
65         struct list_head list;
66         struct pipe pipe0;
67         struct pipe pipe1;
68         struct list_head trash_fifo;
69         struct task_struct *hdm_enqueue_task;
70         wait_queue_head_t hdm_fifo_wq;
71
72 };
73
74 #define to_channel(d) container_of(d, struct most_channel, dev)
75
76 struct interface_private {
77         int dev_id;
78         char name[STRING_SIZE];
79         struct most_channel *channel[MAX_CHANNELS];
80         struct list_head channel_list;
81 };
82
83 static const struct {
84         int most_ch_data_type;
85         const char *name;
86 } ch_data_type[] = {
87         { MOST_CH_CONTROL, "control" },
88         { MOST_CH_ASYNC, "async" },
89         { MOST_CH_SYNC, "sync" },
90         { MOST_CH_ISOC, "isoc"},
91         { MOST_CH_ISOC, "isoc_avp"},
92 };
93
94 /**
95  * list_pop_mbo - retrieves the first MBO of the list and removes it
96  * @ptr: the list head to grab the MBO from.
97  */
98 #define list_pop_mbo(ptr)                                               \
99 ({                                                                      \
100         struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);     \
101         list_del(&_mbo->list);                                          \
102         _mbo;                                                           \
103 })
104
105 /**
106  * most_free_mbo_coherent - free an MBO and its coherent buffer
107  * @mbo: most buffer
108  */
109 static void most_free_mbo_coherent(struct mbo *mbo)
110 {
111         struct most_channel *c = mbo->context;
112         u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
113
114         if (c->iface->dma_free)
115                 c->iface->dma_free(mbo, coherent_buf_size);
116         else
117                 kfree(mbo->virt_address);
118         kfree(mbo);
119         if (atomic_sub_and_test(1, &c->mbo_ref))
120                 complete(&c->cleanup);
121 }
122
123 /**
124  * flush_channel_fifos - clear the channel fifos
125  * @c: pointer to channel object
126  */
127 static void flush_channel_fifos(struct most_channel *c)
128 {
129         unsigned long flags, hf_flags;
130         struct mbo *mbo, *tmp;
131
132         if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
133                 return;
134
135         spin_lock_irqsave(&c->fifo_lock, flags);
136         list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
137                 list_del(&mbo->list);
138                 spin_unlock_irqrestore(&c->fifo_lock, flags);
139                 most_free_mbo_coherent(mbo);
140                 spin_lock_irqsave(&c->fifo_lock, flags);
141         }
142         spin_unlock_irqrestore(&c->fifo_lock, flags);
143
144         spin_lock_irqsave(&c->fifo_lock, hf_flags);
145         list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
146                 list_del(&mbo->list);
147                 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
148                 most_free_mbo_coherent(mbo);
149                 spin_lock_irqsave(&c->fifo_lock, hf_flags);
150         }
151         spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
152
153         if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
154                 dev_warn(&mc.dev, "fifo | trash fifo not empty\n");
155 }
156
157 /**
158  * flush_trash_fifo - clear the trash fifo
159  * @c: pointer to channel object
160  */
161 static int flush_trash_fifo(struct most_channel *c)
162 {
163         struct mbo *mbo, *tmp;
164         unsigned long flags;
165
166         spin_lock_irqsave(&c->fifo_lock, flags);
167         list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
168                 list_del(&mbo->list);
169                 spin_unlock_irqrestore(&c->fifo_lock, flags);
170                 most_free_mbo_coherent(mbo);
171                 spin_lock_irqsave(&c->fifo_lock, flags);
172         }
173         spin_unlock_irqrestore(&c->fifo_lock, flags);
174         return 0;
175 }
176
177 static ssize_t available_directions_show(struct device *dev,
178                                          struct device_attribute *attr,
179                                          char *buf)
180 {
181         struct most_channel *c = to_channel(dev);
182         unsigned int i = c->channel_id;
183
184         strcpy(buf, "");
185         if (c->iface->channel_vector[i].direction & MOST_CH_RX)
186                 strcat(buf, "rx ");
187         if (c->iface->channel_vector[i].direction & MOST_CH_TX)
188                 strcat(buf, "tx ");
189         strcat(buf, "\n");
190         return strlen(buf);
191 }
192
193 static ssize_t available_datatypes_show(struct device *dev,
194                                         struct device_attribute *attr,
195                                         char *buf)
196 {
197         struct most_channel *c = to_channel(dev);
198         unsigned int i = c->channel_id;
199
200         strcpy(buf, "");
201         if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
202                 strcat(buf, "control ");
203         if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
204                 strcat(buf, "async ");
205         if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
206                 strcat(buf, "sync ");
207         if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
208                 strcat(buf, "isoc ");
209         strcat(buf, "\n");
210         return strlen(buf);
211 }
212
213 static ssize_t number_of_packet_buffers_show(struct device *dev,
214                                              struct device_attribute *attr,
215                                              char *buf)
216 {
217         struct most_channel *c = to_channel(dev);
218         unsigned int i = c->channel_id;
219
220         return snprintf(buf, PAGE_SIZE, "%d\n",
221                         c->iface->channel_vector[i].num_buffers_packet);
222 }
223
224 static ssize_t number_of_stream_buffers_show(struct device *dev,
225                                              struct device_attribute *attr,
226                                              char *buf)
227 {
228         struct most_channel *c = to_channel(dev);
229         unsigned int i = c->channel_id;
230
231         return snprintf(buf, PAGE_SIZE, "%d\n",
232                         c->iface->channel_vector[i].num_buffers_streaming);
233 }
234
235 static ssize_t size_of_packet_buffer_show(struct device *dev,
236                                           struct device_attribute *attr,
237                                           char *buf)
238 {
239         struct most_channel *c = to_channel(dev);
240         unsigned int i = c->channel_id;
241
242         return snprintf(buf, PAGE_SIZE, "%d\n",
243                         c->iface->channel_vector[i].buffer_size_packet);
244 }
245
246 static ssize_t size_of_stream_buffer_show(struct device *dev,
247                                           struct device_attribute *attr,
248                                           char *buf)
249 {
250         struct most_channel *c = to_channel(dev);
251         unsigned int i = c->channel_id;
252
253         return snprintf(buf, PAGE_SIZE, "%d\n",
254                         c->iface->channel_vector[i].buffer_size_streaming);
255 }
256
257 static ssize_t channel_starving_show(struct device *dev,
258                                      struct device_attribute *attr,
259                                      char *buf)
260 {
261         struct most_channel *c = to_channel(dev);
262
263         return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
264 }
265
266 static ssize_t set_number_of_buffers_show(struct device *dev,
267                                           struct device_attribute *attr,
268                                           char *buf)
269 {
270         struct most_channel *c = to_channel(dev);
271
272         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
273 }
274
275 static ssize_t set_buffer_size_show(struct device *dev,
276                                     struct device_attribute *attr,
277                                     char *buf)
278 {
279         struct most_channel *c = to_channel(dev);
280
281         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
282 }
283
284 static ssize_t set_direction_show(struct device *dev,
285                                   struct device_attribute *attr,
286                                   char *buf)
287 {
288         struct most_channel *c = to_channel(dev);
289
290         if (c->cfg.direction & MOST_CH_TX)
291                 return snprintf(buf, PAGE_SIZE, "tx\n");
292         else if (c->cfg.direction & MOST_CH_RX)
293                 return snprintf(buf, PAGE_SIZE, "rx\n");
294         return snprintf(buf, PAGE_SIZE, "unconfigured\n");
295 }
296
297 static ssize_t set_datatype_show(struct device *dev,
298                                  struct device_attribute *attr,
299                                  char *buf)
300 {
301         int i;
302         struct most_channel *c = to_channel(dev);
303
304         for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
305                 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
306                         return snprintf(buf, PAGE_SIZE, "%s",
307                                         ch_data_type[i].name);
308         }
309         return snprintf(buf, PAGE_SIZE, "unconfigured\n");
310 }
311
312 static ssize_t set_subbuffer_size_show(struct device *dev,
313                                        struct device_attribute *attr,
314                                        char *buf)
315 {
316         struct most_channel *c = to_channel(dev);
317
318         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
319 }
320
321 static ssize_t set_packets_per_xact_show(struct device *dev,
322                                          struct device_attribute *attr,
323                                          char *buf)
324 {
325         struct most_channel *c = to_channel(dev);
326
327         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
328 }
329
330 static ssize_t set_dbr_size_show(struct device *dev,
331                                  struct device_attribute *attr, char *buf)
332 {
333         struct most_channel *c = to_channel(dev);
334
335         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
336 }
337
338 #define to_dev_attr(a) container_of(a, struct device_attribute, attr)
339 static umode_t channel_attr_is_visible(struct kobject *kobj,
340                                        struct attribute *attr, int index)
341 {
342         struct device_attribute *dev_attr = to_dev_attr(attr);
343         struct device *dev = kobj_to_dev(kobj);
344         struct most_channel *c = to_channel(dev);
345
346         if (!strcmp(dev_attr->attr.name, "set_dbr_size") &&
347             (c->iface->interface != ITYPE_MEDIALB_DIM2))
348                 return 0;
349         if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") &&
350             (c->iface->interface != ITYPE_USB))
351                 return 0;
352
353         return attr->mode;
354 }
355
356 #define DEV_ATTR(_name)  (&dev_attr_##_name.attr)
357
358 static DEVICE_ATTR_RO(available_directions);
359 static DEVICE_ATTR_RO(available_datatypes);
360 static DEVICE_ATTR_RO(number_of_packet_buffers);
361 static DEVICE_ATTR_RO(number_of_stream_buffers);
362 static DEVICE_ATTR_RO(size_of_stream_buffer);
363 static DEVICE_ATTR_RO(size_of_packet_buffer);
364 static DEVICE_ATTR_RO(channel_starving);
365 static DEVICE_ATTR_RO(set_buffer_size);
366 static DEVICE_ATTR_RO(set_number_of_buffers);
367 static DEVICE_ATTR_RO(set_direction);
368 static DEVICE_ATTR_RO(set_datatype);
369 static DEVICE_ATTR_RO(set_subbuffer_size);
370 static DEVICE_ATTR_RO(set_packets_per_xact);
371 static DEVICE_ATTR_RO(set_dbr_size);
372
373 static struct attribute *channel_attrs[] = {
374         DEV_ATTR(available_directions),
375         DEV_ATTR(available_datatypes),
376         DEV_ATTR(number_of_packet_buffers),
377         DEV_ATTR(number_of_stream_buffers),
378         DEV_ATTR(size_of_stream_buffer),
379         DEV_ATTR(size_of_packet_buffer),
380         DEV_ATTR(channel_starving),
381         DEV_ATTR(set_buffer_size),
382         DEV_ATTR(set_number_of_buffers),
383         DEV_ATTR(set_direction),
384         DEV_ATTR(set_datatype),
385         DEV_ATTR(set_subbuffer_size),
386         DEV_ATTR(set_packets_per_xact),
387         DEV_ATTR(set_dbr_size),
388         NULL,
389 };
390
391 static struct attribute_group channel_attr_group = {
392         .attrs = channel_attrs,
393         .is_visible = channel_attr_is_visible,
394 };
395
396 static const struct attribute_group *channel_attr_groups[] = {
397         &channel_attr_group,
398         NULL,
399 };
400
401 static ssize_t description_show(struct device *dev,
402                                 struct device_attribute *attr,
403                                 char *buf)
404 {
405         struct most_interface *iface = to_most_interface(dev);
406
407         return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
408 }
409
410 static ssize_t interface_show(struct device *dev,
411                               struct device_attribute *attr,
412                               char *buf)
413 {
414         struct most_interface *iface = to_most_interface(dev);
415
416         switch (iface->interface) {
417         case ITYPE_LOOPBACK:
418                 return snprintf(buf, PAGE_SIZE, "loopback\n");
419         case ITYPE_I2C:
420                 return snprintf(buf, PAGE_SIZE, "i2c\n");
421         case ITYPE_I2S:
422                 return snprintf(buf, PAGE_SIZE, "i2s\n");
423         case ITYPE_TSI:
424                 return snprintf(buf, PAGE_SIZE, "tsi\n");
425         case ITYPE_HBI:
426                 return snprintf(buf, PAGE_SIZE, "hbi\n");
427         case ITYPE_MEDIALB_DIM:
428                 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
429         case ITYPE_MEDIALB_DIM2:
430                 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
431         case ITYPE_USB:
432                 return snprintf(buf, PAGE_SIZE, "usb\n");
433         case ITYPE_PCIE:
434                 return snprintf(buf, PAGE_SIZE, "pcie\n");
435         }
436         return snprintf(buf, PAGE_SIZE, "unknown\n");
437 }
438
439 static DEVICE_ATTR_RO(description);
440 static DEVICE_ATTR_RO(interface);
441
442 static struct attribute *interface_attrs[] = {
443         DEV_ATTR(description),
444         DEV_ATTR(interface),
445         NULL,
446 };
447
448 static struct attribute_group interface_attr_group = {
449         .attrs = interface_attrs,
450 };
451
452 static const struct attribute_group *interface_attr_groups[] = {
453         &interface_attr_group,
454         NULL,
455 };
456
457 static struct most_component *match_component(char *name)
458 {
459         struct most_component *comp;
460
461         list_for_each_entry(comp, &mc.comp_list, list) {
462                 if (!strcmp(comp->name, name))
463                         return comp;
464         }
465         return NULL;
466 }
467
468 struct show_links_data {
469         int offs;
470         char *buf;
471 };
472
473 static int print_links(struct device *dev, void *data)
474 {
475         struct show_links_data *d = data;
476         int offs = d->offs;
477         char *buf = d->buf;
478         struct most_channel *c;
479         struct most_interface *iface = to_most_interface(dev);
480
481         list_for_each_entry(c, &iface->p->channel_list, list) {
482                 if (c->pipe0.comp) {
483                         offs += snprintf(buf + offs,
484                                          PAGE_SIZE - offs,
485                                          "%s:%s:%s\n",
486                                          c->pipe0.comp->name,
487                                          dev_name(&iface->dev),
488                                          dev_name(&c->dev));
489                 }
490                 if (c->pipe1.comp) {
491                         offs += snprintf(buf + offs,
492                                          PAGE_SIZE - offs,
493                                          "%s:%s:%s\n",
494                                          c->pipe1.comp->name,
495                                          dev_name(&iface->dev),
496                                          dev_name(&c->dev));
497                 }
498         }
499         d->offs = offs;
500         return 0;
501 }
502
503 static ssize_t links_show(struct device_driver *drv, char *buf)
504 {
505         struct show_links_data d = { .buf = buf };
506
507         bus_for_each_dev(&mc.bus, NULL, &d, print_links);
508         return d.offs;
509 }
510
511 static ssize_t components_show(struct device_driver *drv, char *buf)
512 {
513         struct most_component *comp;
514         int offs = 0;
515
516         list_for_each_entry(comp, &mc.comp_list, list) {
517                 offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
518                                  comp->name);
519         }
520         return offs;
521 }
522
523 /**
524  * get_channel - get pointer to channel
525  * @mdev: name of the device interface
526  * @mdev_ch: name of channel
527  */
528 static struct most_channel *get_channel(char *mdev, char *mdev_ch)
529 {
530         struct device *dev = NULL;
531         struct most_interface *iface;
532         struct most_channel *c, *tmp;
533
534         dev = bus_find_device_by_name(&mc.bus, NULL, mdev);
535         if (!dev)
536                 return NULL;
537         iface = to_most_interface(dev);
538         list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
539                 if (!strcmp(dev_name(&c->dev), mdev_ch))
540                         return c;
541         }
542         return NULL;
543 }
544
545 static
546 inline int link_channel_to_component(struct most_channel *c,
547                                      struct most_component *comp,
548                                      char *name,
549                                      char *comp_param)
550 {
551         int ret;
552         struct most_component **comp_ptr;
553
554         if (!c->pipe0.comp)
555                 comp_ptr = &c->pipe0.comp;
556         else if (!c->pipe1.comp)
557                 comp_ptr = &c->pipe1.comp;
558         else
559                 return -ENOSPC;
560
561         *comp_ptr = comp;
562         ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name,
563                                   comp_param);
564         if (ret) {
565                 *comp_ptr = NULL;
566                 return ret;
567         }
568         return 0;
569 }
570
571 int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val)
572 {
573         struct most_channel *c = get_channel(mdev, mdev_ch);
574
575         if (!c)
576                 return -ENODEV;
577         c->cfg.buffer_size = val;
578         return 0;
579 }
580
581 int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val)
582 {
583         struct most_channel *c = get_channel(mdev, mdev_ch);
584
585         if (!c)
586                 return -ENODEV;
587         c->cfg.subbuffer_size = val;
588         return 0;
589 }
590
591 int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val)
592 {
593         struct most_channel *c = get_channel(mdev, mdev_ch);
594
595         if (!c)
596                 return -ENODEV;
597         c->cfg.dbr_size = val;
598         return 0;
599 }
600
601 int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val)
602 {
603         struct most_channel *c = get_channel(mdev, mdev_ch);
604
605         if (!c)
606                 return -ENODEV;
607         c->cfg.num_buffers = val;
608         return 0;
609 }
610
611 int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
612 {
613         int i;
614         struct most_channel *c = get_channel(mdev, mdev_ch);
615
616         if (!c)
617                 return -ENODEV;
618         for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
619                 if (!strcmp(buf, ch_data_type[i].name)) {
620                         c->cfg.data_type = ch_data_type[i].most_ch_data_type;
621                         break;
622                 }
623         }
624
625         if (i == ARRAY_SIZE(ch_data_type))
626                 dev_warn(&mc.dev, "invalid attribute settings\n");
627         return 0;
628 }
629
630 int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
631 {
632         struct most_channel *c = get_channel(mdev, mdev_ch);
633
634         if (!c)
635                 return -ENODEV;
636         if (!strcmp(buf, "dir_rx")) {
637                 c->cfg.direction = MOST_CH_RX;
638         } else if (!strcmp(buf, "rx")) {
639                 c->cfg.direction = MOST_CH_RX;
640         } else if (!strcmp(buf, "dir_tx")) {
641                 c->cfg.direction = MOST_CH_TX;
642         } else if (!strcmp(buf, "tx")) {
643                 c->cfg.direction = MOST_CH_TX;
644         } else {
645                 dev_err(&mc.dev, "Invalid direction\n");
646                 return -ENODATA;
647         }
648         return 0;
649 }
650
651 int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
652 {
653         struct most_channel *c = get_channel(mdev, mdev_ch);
654
655         if (!c)
656                 return -ENODEV;
657         c->cfg.packets_per_xact = val;
658         return 0;
659 }
660
661 int most_cfg_complete(char *comp_name)
662 {
663         struct most_component *comp;
664
665         comp = match_component(comp_name);
666         if (!comp)
667                 return -ENODEV;
668
669         return comp->cfg_complete();
670 }
671
672 int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
673                   char *comp_param)
674 {
675         struct most_channel *c = get_channel(mdev, mdev_ch);
676         struct most_component *comp = match_component(comp_name);
677
678         if (!c || !comp)
679                 return -ENODEV;
680
681         return link_channel_to_component(c, comp, link_name, comp_param);
682 }
683
684 int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
685 {
686         struct most_channel *c;
687         struct most_component *comp;
688
689         comp = match_component(comp_name);
690         if (!comp)
691                 return -ENODEV;
692         c = get_channel(mdev, mdev_ch);
693         if (!c)
694                 return -ENODEV;
695
696         if (comp->disconnect_channel(c->iface, c->channel_id))
697                 return -EIO;
698         if (c->pipe0.comp == comp)
699                 c->pipe0.comp = NULL;
700         if (c->pipe1.comp == comp)
701                 c->pipe1.comp = NULL;
702         return 0;
703 }
704
705 #define DRV_ATTR(_name)  (&driver_attr_##_name.attr)
706
707 static DRIVER_ATTR_RO(links);
708 static DRIVER_ATTR_RO(components);
709
710 static struct attribute *mc_attrs[] = {
711         DRV_ATTR(links),
712         DRV_ATTR(components),
713         NULL,
714 };
715
716 static struct attribute_group mc_attr_group = {
717         .attrs = mc_attrs,
718 };
719
720 static const struct attribute_group *mc_attr_groups[] = {
721         &mc_attr_group,
722         NULL,
723 };
724
725 static int most_match(struct device *dev, struct device_driver *drv)
726 {
727         if (!strcmp(dev_name(dev), "most"))
728                 return 0;
729         else
730                 return 1;
731 }
732
733 static inline void trash_mbo(struct mbo *mbo)
734 {
735         unsigned long flags;
736         struct most_channel *c = mbo->context;
737
738         spin_lock_irqsave(&c->fifo_lock, flags);
739         list_add(&mbo->list, &c->trash_fifo);
740         spin_unlock_irqrestore(&c->fifo_lock, flags);
741 }
742
743 static bool hdm_mbo_ready(struct most_channel *c)
744 {
745         bool empty;
746
747         if (c->enqueue_halt)
748                 return false;
749
750         spin_lock_irq(&c->fifo_lock);
751         empty = list_empty(&c->halt_fifo);
752         spin_unlock_irq(&c->fifo_lock);
753
754         return !empty;
755 }
756
757 static void nq_hdm_mbo(struct mbo *mbo)
758 {
759         unsigned long flags;
760         struct most_channel *c = mbo->context;
761
762         spin_lock_irqsave(&c->fifo_lock, flags);
763         list_add_tail(&mbo->list, &c->halt_fifo);
764         spin_unlock_irqrestore(&c->fifo_lock, flags);
765         wake_up_interruptible(&c->hdm_fifo_wq);
766 }
767
768 static int hdm_enqueue_thread(void *data)
769 {
770         struct most_channel *c = data;
771         struct mbo *mbo;
772         int ret;
773         typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
774
775         while (likely(!kthread_should_stop())) {
776                 wait_event_interruptible(c->hdm_fifo_wq,
777                                          hdm_mbo_ready(c) ||
778                                          kthread_should_stop());
779
780                 mutex_lock(&c->nq_mutex);
781                 spin_lock_irq(&c->fifo_lock);
782                 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
783                         spin_unlock_irq(&c->fifo_lock);
784                         mutex_unlock(&c->nq_mutex);
785                         continue;
786                 }
787
788                 mbo = list_pop_mbo(&c->halt_fifo);
789                 spin_unlock_irq(&c->fifo_lock);
790
791                 if (c->cfg.direction == MOST_CH_RX)
792                         mbo->buffer_length = c->cfg.buffer_size;
793
794                 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
795                 mutex_unlock(&c->nq_mutex);
796
797                 if (unlikely(ret)) {
798                         dev_err(&mc.dev, "hdm enqueue failed\n");
799                         nq_hdm_mbo(mbo);
800                         c->hdm_enqueue_task = NULL;
801                         return 0;
802                 }
803         }
804
805         return 0;
806 }
807
808 static int run_enqueue_thread(struct most_channel *c, int channel_id)
809 {
810         struct task_struct *task =
811                 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
812                             channel_id);
813
814         if (IS_ERR(task))
815                 return PTR_ERR(task);
816
817         c->hdm_enqueue_task = task;
818         return 0;
819 }
820
821 /**
822  * arm_mbo - recycle MBO for further usage
823  * @mbo: most buffer
824  *
825  * This puts an MBO back to the list to have it ready for up coming
826  * tx transactions.
827  *
828  * In case the MBO belongs to a channel that recently has been
829  * poisoned, the MBO is scheduled to be trashed.
830  * Calls the completion handler of an attached component.
831  */
832 static void arm_mbo(struct mbo *mbo)
833 {
834         unsigned long flags;
835         struct most_channel *c;
836
837         c = mbo->context;
838
839         if (c->is_poisoned) {
840                 trash_mbo(mbo);
841                 return;
842         }
843
844         spin_lock_irqsave(&c->fifo_lock, flags);
845         ++*mbo->num_buffers_ptr;
846         list_add_tail(&mbo->list, &c->fifo);
847         spin_unlock_irqrestore(&c->fifo_lock, flags);
848
849         if (c->pipe0.refs && c->pipe0.comp->tx_completion)
850                 c->pipe0.comp->tx_completion(c->iface, c->channel_id);
851
852         if (c->pipe1.refs && c->pipe1.comp->tx_completion)
853                 c->pipe1.comp->tx_completion(c->iface, c->channel_id);
854 }
855
856 /**
857  * arm_mbo_chain - helper function that arms an MBO chain for the HDM
858  * @c: pointer to interface channel
859  * @dir: direction of the channel
860  * @compl: pointer to completion function
861  *
862  * This allocates buffer objects including the containing DMA coherent
863  * buffer and puts them in the fifo.
864  * Buffers of Rx channels are put in the kthread fifo, hence immediately
865  * submitted to the HDM.
866  *
867  * Returns the number of allocated and enqueued MBOs.
868  */
869 static int arm_mbo_chain(struct most_channel *c, int dir,
870                          void (*compl)(struct mbo *))
871 {
872         unsigned int i;
873         struct mbo *mbo;
874         unsigned long flags;
875         u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
876
877         atomic_set(&c->mbo_nq_level, 0);
878
879         for (i = 0; i < c->cfg.num_buffers; i++) {
880                 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
881                 if (!mbo)
882                         goto flush_fifos;
883
884                 mbo->context = c;
885                 mbo->ifp = c->iface;
886                 mbo->hdm_channel_id = c->channel_id;
887                 if (c->iface->dma_alloc) {
888                         mbo->virt_address =
889                                 c->iface->dma_alloc(mbo, coherent_buf_size);
890                 } else {
891                         mbo->virt_address =
892                                 kzalloc(coherent_buf_size, GFP_KERNEL);
893                 }
894                 if (!mbo->virt_address)
895                         goto release_mbo;
896
897                 mbo->complete = compl;
898                 mbo->num_buffers_ptr = &dummy_num_buffers;
899                 if (dir == MOST_CH_RX) {
900                         nq_hdm_mbo(mbo);
901                         atomic_inc(&c->mbo_nq_level);
902                 } else {
903                         spin_lock_irqsave(&c->fifo_lock, flags);
904                         list_add_tail(&mbo->list, &c->fifo);
905                         spin_unlock_irqrestore(&c->fifo_lock, flags);
906                 }
907         }
908         return c->cfg.num_buffers;
909
910 release_mbo:
911         kfree(mbo);
912
913 flush_fifos:
914         flush_channel_fifos(c);
915         return 0;
916 }
917
918 /**
919  * most_submit_mbo - submits an MBO to fifo
920  * @mbo: most buffer
921  */
922 void most_submit_mbo(struct mbo *mbo)
923 {
924         if (WARN_ONCE(!mbo || !mbo->context,
925                       "bad mbo or missing channel reference\n"))
926                 return;
927
928         nq_hdm_mbo(mbo);
929 }
930 EXPORT_SYMBOL_GPL(most_submit_mbo);
931
932 /**
933  * most_write_completion - write completion handler
934  * @mbo: most buffer
935  *
936  * This recycles the MBO for further usage. In case the channel has been
937  * poisoned, the MBO is scheduled to be trashed.
938  */
939 static void most_write_completion(struct mbo *mbo)
940 {
941         struct most_channel *c;
942
943         c = mbo->context;
944         if (mbo->status == MBO_E_INVAL)
945                 dev_warn(&mc.dev, "Tx MBO status: invalid\n");
946         if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
947                 trash_mbo(mbo);
948         else
949                 arm_mbo(mbo);
950 }
951
952 int channel_has_mbo(struct most_interface *iface, int id,
953                     struct most_component *comp)
954 {
955         struct most_channel *c = iface->p->channel[id];
956         unsigned long flags;
957         int empty;
958
959         if (unlikely(!c))
960                 return -EINVAL;
961
962         if (c->pipe0.refs && c->pipe1.refs &&
963             ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
964              (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
965                 return 0;
966
967         spin_lock_irqsave(&c->fifo_lock, flags);
968         empty = list_empty(&c->fifo);
969         spin_unlock_irqrestore(&c->fifo_lock, flags);
970         return !empty;
971 }
972 EXPORT_SYMBOL_GPL(channel_has_mbo);
973
974 /**
975  * most_get_mbo - get pointer to an MBO of pool
976  * @iface: pointer to interface instance
977  * @id: channel ID
978  * @comp: driver component
979  *
980  * This attempts to get a free buffer out of the channel fifo.
981  * Returns a pointer to MBO on success or NULL otherwise.
982  */
983 struct mbo *most_get_mbo(struct most_interface *iface, int id,
984                          struct most_component *comp)
985 {
986         struct mbo *mbo;
987         struct most_channel *c;
988         unsigned long flags;
989         int *num_buffers_ptr;
990
991         c = iface->p->channel[id];
992         if (unlikely(!c))
993                 return NULL;
994
995         if (c->pipe0.refs && c->pipe1.refs &&
996             ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
997              (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
998                 return NULL;
999
1000         if (comp == c->pipe0.comp)
1001                 num_buffers_ptr = &c->pipe0.num_buffers;
1002         else if (comp == c->pipe1.comp)
1003                 num_buffers_ptr = &c->pipe1.num_buffers;
1004         else
1005                 num_buffers_ptr = &dummy_num_buffers;
1006
1007         spin_lock_irqsave(&c->fifo_lock, flags);
1008         if (list_empty(&c->fifo)) {
1009                 spin_unlock_irqrestore(&c->fifo_lock, flags);
1010                 return NULL;
1011         }
1012         mbo = list_pop_mbo(&c->fifo);
1013         --*num_buffers_ptr;
1014         spin_unlock_irqrestore(&c->fifo_lock, flags);
1015
1016         mbo->num_buffers_ptr = num_buffers_ptr;
1017         mbo->buffer_length = c->cfg.buffer_size;
1018         return mbo;
1019 }
1020 EXPORT_SYMBOL_GPL(most_get_mbo);
1021
1022 /**
1023  * most_put_mbo - return buffer to pool
1024  * @mbo: most buffer
1025  */
1026 void most_put_mbo(struct mbo *mbo)
1027 {
1028         struct most_channel *c = mbo->context;
1029
1030         if (c->cfg.direction == MOST_CH_TX) {
1031                 arm_mbo(mbo);
1032                 return;
1033         }
1034         nq_hdm_mbo(mbo);
1035         atomic_inc(&c->mbo_nq_level);
1036 }
1037 EXPORT_SYMBOL_GPL(most_put_mbo);
1038
1039 /**
1040  * most_read_completion - read completion handler
1041  * @mbo: most buffer
1042  *
1043  * This function is called by the HDM when data has been received from the
1044  * hardware and copied to the buffer of the MBO.
1045  *
1046  * In case the channel has been poisoned it puts the buffer in the trash queue.
1047  * Otherwise, it passes the buffer to an component for further processing.
1048  */
1049 static void most_read_completion(struct mbo *mbo)
1050 {
1051         struct most_channel *c = mbo->context;
1052
1053         if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1054                 trash_mbo(mbo);
1055                 return;
1056         }
1057
1058         if (mbo->status == MBO_E_INVAL) {
1059                 nq_hdm_mbo(mbo);
1060                 atomic_inc(&c->mbo_nq_level);
1061                 return;
1062         }
1063
1064         if (atomic_sub_and_test(1, &c->mbo_nq_level))
1065                 c->is_starving = 1;
1066
1067         if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
1068             c->pipe0.comp->rx_completion(mbo) == 0)
1069                 return;
1070
1071         if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
1072             c->pipe1.comp->rx_completion(mbo) == 0)
1073                 return;
1074
1075         most_put_mbo(mbo);
1076 }
1077
1078 /**
1079  * most_start_channel - prepares a channel for communication
1080  * @iface: pointer to interface instance
1081  * @id: channel ID
1082  * @comp: driver component
1083  *
1084  * This prepares the channel for usage. Cross-checks whether the
1085  * channel's been properly configured.
1086  *
1087  * Returns 0 on success or error code otherwise.
1088  */
1089 int most_start_channel(struct most_interface *iface, int id,
1090                        struct most_component *comp)
1091 {
1092         int num_buffer;
1093         int ret;
1094         struct most_channel *c = iface->p->channel[id];
1095
1096         if (unlikely(!c))
1097                 return -EINVAL;
1098
1099         mutex_lock(&c->start_mutex);
1100         if (c->pipe0.refs + c->pipe1.refs > 0)
1101                 goto out; /* already started by another component */
1102
1103         if (!try_module_get(iface->mod)) {
1104                 dev_err(&mc.dev, "failed to acquire HDM lock\n");
1105                 mutex_unlock(&c->start_mutex);
1106                 return -ENOLCK;
1107         }
1108
1109         c->cfg.extra_len = 0;
1110         if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1111                 dev_err(&mc.dev, "channel configuration failed. Go check settings...\n");
1112                 ret = -EINVAL;
1113                 goto err_put_module;
1114         }
1115
1116         init_waitqueue_head(&c->hdm_fifo_wq);
1117
1118         if (c->cfg.direction == MOST_CH_RX)
1119                 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1120                                            most_read_completion);
1121         else
1122                 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1123                                            most_write_completion);
1124         if (unlikely(!num_buffer)) {
1125                 ret = -ENOMEM;
1126                 goto err_put_module;
1127         }
1128
1129         ret = run_enqueue_thread(c, id);
1130         if (ret)
1131                 goto err_put_module;
1132
1133         c->is_starving = 0;
1134         c->pipe0.num_buffers = c->cfg.num_buffers / 2;
1135         c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
1136         atomic_set(&c->mbo_ref, num_buffer);
1137
1138 out:
1139         if (comp == c->pipe0.comp)
1140                 c->pipe0.refs++;
1141         if (comp == c->pipe1.comp)
1142                 c->pipe1.refs++;
1143         mutex_unlock(&c->start_mutex);
1144         return 0;
1145
1146 err_put_module:
1147         module_put(iface->mod);
1148         mutex_unlock(&c->start_mutex);
1149         return ret;
1150 }
1151 EXPORT_SYMBOL_GPL(most_start_channel);
1152
1153 /**
1154  * most_stop_channel - stops a running channel
1155  * @iface: pointer to interface instance
1156  * @id: channel ID
1157  * @comp: driver component
1158  */
1159 int most_stop_channel(struct most_interface *iface, int id,
1160                       struct most_component *comp)
1161 {
1162         struct most_channel *c;
1163
1164         if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1165                 dev_err(&mc.dev, "Bad interface or index out of range\n");
1166                 return -EINVAL;
1167         }
1168         c = iface->p->channel[id];
1169         if (unlikely(!c))
1170                 return -EINVAL;
1171
1172         mutex_lock(&c->start_mutex);
1173         if (c->pipe0.refs + c->pipe1.refs >= 2)
1174                 goto out;
1175
1176         if (c->hdm_enqueue_task)
1177                 kthread_stop(c->hdm_enqueue_task);
1178         c->hdm_enqueue_task = NULL;
1179
1180         if (iface->mod)
1181                 module_put(iface->mod);
1182
1183         c->is_poisoned = true;
1184         if (c->iface->poison_channel(c->iface, c->channel_id)) {
1185                 dev_err(&mc.dev, "Cannot stop channel %d of mdev %s\n", c->channel_id,
1186                         c->iface->description);
1187                 mutex_unlock(&c->start_mutex);
1188                 return -EAGAIN;
1189         }
1190         flush_trash_fifo(c);
1191         flush_channel_fifos(c);
1192
1193 #ifdef CMPL_INTERRUPTIBLE
1194         if (wait_for_completion_interruptible(&c->cleanup)) {
1195                 dev_err(&mc.dev, "Interrupted while clean up ch %d\n", c->channel_id);
1196                 mutex_unlock(&c->start_mutex);
1197                 return -EINTR;
1198         }
1199 #else
1200         wait_for_completion(&c->cleanup);
1201 #endif
1202         c->is_poisoned = false;
1203
1204 out:
1205         if (comp == c->pipe0.comp)
1206                 c->pipe0.refs--;
1207         if (comp == c->pipe1.comp)
1208                 c->pipe1.refs--;
1209         mutex_unlock(&c->start_mutex);
1210         return 0;
1211 }
1212 EXPORT_SYMBOL_GPL(most_stop_channel);
1213
1214 /**
1215  * most_register_component - registers a driver component with the core
1216  * @comp: driver component
1217  */
1218 int most_register_component(struct most_component *comp)
1219 {
1220         if (!comp) {
1221                 dev_err(&mc.dev, "Bad component\n");
1222                 return -EINVAL;
1223         }
1224         list_add_tail(&comp->list, &mc.comp_list);
1225         return 0;
1226 }
1227 EXPORT_SYMBOL_GPL(most_register_component);
1228
1229 static int disconnect_channels(struct device *dev, void *data)
1230 {
1231         struct most_interface *iface;
1232         struct most_channel *c, *tmp;
1233         struct most_component *comp = data;
1234
1235         iface = to_most_interface(dev);
1236         list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
1237                 if (c->pipe0.comp == comp || c->pipe1.comp == comp)
1238                         comp->disconnect_channel(c->iface, c->channel_id);
1239                 if (c->pipe0.comp == comp)
1240                         c->pipe0.comp = NULL;
1241                 if (c->pipe1.comp == comp)
1242                         c->pipe1.comp = NULL;
1243         }
1244         return 0;
1245 }
1246
1247 /**
1248  * most_deregister_component - deregisters a driver component with the core
1249  * @comp: driver component
1250  */
1251 int most_deregister_component(struct most_component *comp)
1252 {
1253         if (!comp) {
1254                 dev_err(&mc.dev, "Bad component\n");
1255                 return -EINVAL;
1256         }
1257
1258         bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
1259         list_del(&comp->list);
1260         return 0;
1261 }
1262 EXPORT_SYMBOL_GPL(most_deregister_component);
1263
1264 static void release_interface(struct device *dev)
1265 {
1266         dev_info(&mc.dev, "releasing interface dev %s...\n", dev_name(dev));
1267 }
1268
1269 static void release_channel(struct device *dev)
1270 {
1271         dev_info(&mc.dev, "releasing channel dev %s...\n", dev_name(dev));
1272 }
1273
1274 /**
1275  * most_register_interface - registers an interface with core
1276  * @iface: device interface
1277  *
1278  * Allocates and initializes a new interface instance and all of its channels.
1279  * Returns a pointer to kobject or an error pointer.
1280  */
1281 int most_register_interface(struct most_interface *iface)
1282 {
1283         unsigned int i;
1284         int id;
1285         struct most_channel *c;
1286
1287         if (!iface || !iface->enqueue || !iface->configure ||
1288             !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1289                 dev_err(&mc.dev, "Bad interface or channel overflow\n");
1290                 return -EINVAL;
1291         }
1292
1293         id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1294         if (id < 0) {
1295                 dev_err(&mc.dev, "Failed to alloc mdev ID\n");
1296                 return id;
1297         }
1298
1299         iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
1300         if (!iface->p) {
1301                 ida_simple_remove(&mdev_id, id);
1302                 return -ENOMEM;
1303         }
1304
1305         INIT_LIST_HEAD(&iface->p->channel_list);
1306         iface->p->dev_id = id;
1307         strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
1308         iface->dev.init_name = iface->p->name;
1309         iface->dev.bus = &mc.bus;
1310         iface->dev.parent = &mc.dev;
1311         iface->dev.groups = interface_attr_groups;
1312         iface->dev.release = release_interface;
1313         if (device_register(&iface->dev)) {
1314                 dev_err(&mc.dev, "registering iface->dev failed\n");
1315                 kfree(iface->p);
1316                 ida_simple_remove(&mdev_id, id);
1317                 return -ENOMEM;
1318         }
1319
1320         for (i = 0; i < iface->num_channels; i++) {
1321                 const char *name_suffix = iface->channel_vector[i].name_suffix;
1322
1323                 c = kzalloc(sizeof(*c), GFP_KERNEL);
1324                 if (!c)
1325                         goto err_free_resources;
1326                 if (!name_suffix)
1327                         snprintf(c->name, STRING_SIZE, "ch%d", i);
1328                 else
1329                         snprintf(c->name, STRING_SIZE, "%s", name_suffix);
1330                 c->dev.init_name = c->name;
1331                 c->dev.parent = &iface->dev;
1332                 c->dev.groups = channel_attr_groups;
1333                 c->dev.release = release_channel;
1334                 iface->p->channel[i] = c;
1335                 c->is_starving = 0;
1336                 c->iface = iface;
1337                 c->channel_id = i;
1338                 c->keep_mbo = false;
1339                 c->enqueue_halt = false;
1340                 c->is_poisoned = false;
1341                 c->cfg.direction = 0;
1342                 c->cfg.data_type = 0;
1343                 c->cfg.num_buffers = 0;
1344                 c->cfg.buffer_size = 0;
1345                 c->cfg.subbuffer_size = 0;
1346                 c->cfg.packets_per_xact = 0;
1347                 spin_lock_init(&c->fifo_lock);
1348                 INIT_LIST_HEAD(&c->fifo);
1349                 INIT_LIST_HEAD(&c->trash_fifo);
1350                 INIT_LIST_HEAD(&c->halt_fifo);
1351                 init_completion(&c->cleanup);
1352                 atomic_set(&c->mbo_ref, 0);
1353                 mutex_init(&c->start_mutex);
1354                 mutex_init(&c->nq_mutex);
1355                 list_add_tail(&c->list, &iface->p->channel_list);
1356                 if (device_register(&c->dev)) {
1357                         dev_err(&mc.dev, "registering c->dev failed\n");
1358                         goto err_free_most_channel;
1359                 }
1360         }
1361         most_interface_register_notify(iface->description);
1362         return 0;
1363
1364 err_free_most_channel:
1365         kfree(c);
1366
1367 err_free_resources:
1368         while (i > 0) {
1369                 c = iface->p->channel[--i];
1370                 device_unregister(&c->dev);
1371                 kfree(c);
1372         }
1373         kfree(iface->p);
1374         device_unregister(&iface->dev);
1375         ida_simple_remove(&mdev_id, id);
1376         return -ENOMEM;
1377 }
1378 EXPORT_SYMBOL_GPL(most_register_interface);
1379
1380 /**
1381  * most_deregister_interface - deregisters an interface with core
1382  * @iface: device interface
1383  *
1384  * Before removing an interface instance from the list, all running
1385  * channels are stopped and poisoned.
1386  */
1387 void most_deregister_interface(struct most_interface *iface)
1388 {
1389         int i;
1390         struct most_channel *c;
1391
1392         for (i = 0; i < iface->num_channels; i++) {
1393                 c = iface->p->channel[i];
1394                 if (c->pipe0.comp)
1395                         c->pipe0.comp->disconnect_channel(c->iface,
1396                                                         c->channel_id);
1397                 if (c->pipe1.comp)
1398                         c->pipe1.comp->disconnect_channel(c->iface,
1399                                                         c->channel_id);
1400                 c->pipe0.comp = NULL;
1401                 c->pipe1.comp = NULL;
1402                 list_del(&c->list);
1403                 device_unregister(&c->dev);
1404                 kfree(c);
1405         }
1406
1407         ida_simple_remove(&mdev_id, iface->p->dev_id);
1408         kfree(iface->p);
1409         device_unregister(&iface->dev);
1410 }
1411 EXPORT_SYMBOL_GPL(most_deregister_interface);
1412
1413 /**
1414  * most_stop_enqueue - prevents core from enqueueing MBOs
1415  * @iface: pointer to interface
1416  * @id: channel id
1417  *
1418  * This is called by an HDM that _cannot_ attend to its duties and
1419  * is imminent to get run over by the core. The core is not going to
1420  * enqueue any further packets unless the flagging HDM calls
1421  * most_resume enqueue().
1422  */
1423 void most_stop_enqueue(struct most_interface *iface, int id)
1424 {
1425         struct most_channel *c = iface->p->channel[id];
1426
1427         if (!c)
1428                 return;
1429
1430         mutex_lock(&c->nq_mutex);
1431         c->enqueue_halt = true;
1432         mutex_unlock(&c->nq_mutex);
1433 }
1434 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1435
1436 /**
1437  * most_resume_enqueue - allow core to enqueue MBOs again
1438  * @iface: pointer to interface
1439  * @id: channel id
1440  *
1441  * This clears the enqueue halt flag and enqueues all MBOs currently
1442  * sitting in the wait fifo.
1443  */
1444 void most_resume_enqueue(struct most_interface *iface, int id)
1445 {
1446         struct most_channel *c = iface->p->channel[id];
1447
1448         if (!c)
1449                 return;
1450
1451         mutex_lock(&c->nq_mutex);
1452         c->enqueue_halt = false;
1453         mutex_unlock(&c->nq_mutex);
1454
1455         wake_up_interruptible(&c->hdm_fifo_wq);
1456 }
1457 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1458
1459 static void release_most_sub(struct device *dev)
1460 {
1461         dev_info(&mc.dev, "releasing most_subsystem\n");
1462 }
1463
1464 static int __init most_init(void)
1465 {
1466         int err;
1467
1468         INIT_LIST_HEAD(&mc.comp_list);
1469         ida_init(&mdev_id);
1470
1471         mc.bus.name = "most",
1472         mc.bus.match = most_match,
1473         mc.drv.name = "most_core",
1474         mc.drv.bus = &mc.bus,
1475         mc.drv.groups = mc_attr_groups;
1476
1477         err = bus_register(&mc.bus);
1478         if (err) {
1479                 dev_err(&mc.dev, "Cannot register most bus\n");
1480                 return err;
1481         }
1482         err = driver_register(&mc.drv);
1483         if (err) {
1484                 dev_err(&mc.dev, "Cannot register core driver\n");
1485                 goto err_unregister_bus;
1486         }
1487         mc.dev.init_name = "most_bus";
1488         mc.dev.release = release_most_sub;
1489         if (device_register(&mc.dev)) {
1490                 err = -ENOMEM;
1491                 goto err_unregister_driver;
1492         }
1493         configfs_init();
1494         return 0;
1495
1496 err_unregister_driver:
1497         driver_unregister(&mc.drv);
1498 err_unregister_bus:
1499         bus_unregister(&mc.bus);
1500         return err;
1501 }
1502
1503 static void __exit most_exit(void)
1504 {
1505         device_unregister(&mc.dev);
1506         driver_unregister(&mc.drv);
1507         bus_unregister(&mc.bus);
1508         ida_destroy(&mdev_id);
1509 }
1510
1511 module_init(most_init);
1512 module_exit(most_exit);
1513 MODULE_LICENSE("GPL");
1514 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1515 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");