{
}
+static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
+{
+ return -ENODEV;
+}
+
static const struct fw_card_driver dummy_driver_template = {
.read_phy_reg = dummy_read_phy_reg,
.update_phy_reg = dummy_update_phy_reg,
.set_iso_channels = dummy_set_iso_channels,
.queue_iso = dummy_queue_iso,
.flush_queue_iso = dummy_flush_queue_iso,
+ .flush_iso_completions = dummy_flush_iso_completions,
};
void fw_card_release(struct kref *kref)
struct fw_cdev_send_phy_packet send_phy_packet;
struct fw_cdev_receive_phy_packets receive_phy_packets;
struct fw_cdev_set_iso_channels set_iso_channels;
+ struct fw_cdev_flush_iso flush_iso;
};
static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
return fw_iso_context_stop(client->iso_context);
}
+static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_flush_iso *a = &arg->flush_iso;
+
+ if (client->iso_context == NULL || a->handle != 0)
+ return -EINVAL;
+
+ return fw_iso_context_flush_completions(client->iso_context);
+}
+
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
[0x15] = ioctl_send_phy_packet,
[0x16] = ioctl_receive_phy_packets,
[0x17] = ioctl_set_iso_channels,
+ [0x18] = ioctl_flush_iso,
};
static int dispatch_ioctl(struct client *client,
}
EXPORT_SYMBOL(fw_iso_context_queue_flush);
+int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
+{
+ return ctx->card->driver->flush_iso_completions(ctx);
+}
+EXPORT_SYMBOL(fw_iso_context_flush_completions);
+
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
void (*flush_queue_iso)(struct fw_iso_context *ctx);
+ int (*flush_iso_completions)(struct fw_iso_context *ctx);
+
int (*stop_iso)(struct fw_iso_context *ctx);
};
struct context context;
void *header;
size_t header_length;
+ unsigned long flushing_completions;
+ u32 mc_buffer_bus;
+ u16 mc_completed;
u16 last_timestamp;
u8 sync;
u8 tags;
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
+ unsigned int req_count, res_count, completed;
u32 buffer_dma;
- if (last->res_count != 0)
+ req_count = le16_to_cpu(last->req_count);
+ res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
+ completed = req_count - res_count;
+ buffer_dma = le32_to_cpu(last->data_address);
+
+ if (completed > 0) {
+ ctx->mc_buffer_bus = buffer_dma;
+ ctx->mc_completed = completed;
+ }
+
+ if (res_count != 0)
/* Descriptor(s) not done yet, stop iteration */
return 0;
- buffer_dma = le32_to_cpu(last->data_address);
dma_sync_single_range_for_cpu(context->ohci->card.device,
buffer_dma & PAGE_MASK,
buffer_dma & ~PAGE_MASK,
- le16_to_cpu(last->req_count),
- DMA_FROM_DEVICE);
+ completed, DMA_FROM_DEVICE);
- if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
+ if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
ctx->base.callback.mc(&ctx->base,
- le32_to_cpu(last->data_address) +
- le16_to_cpu(last->req_count),
+ buffer_dma + completed,
ctx->base.callback_data);
+ ctx->mc_completed = 0;
+ }
return 1;
}
+static void flush_ir_buffer_fill(struct iso_context *ctx)
+{
+ dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
+ ctx->mc_buffer_bus & PAGE_MASK,
+ ctx->mc_buffer_bus & ~PAGE_MASK,
+ ctx->mc_completed, DMA_FROM_DEVICE);
+
+ ctx->base.callback.mc(&ctx->base,
+ ctx->mc_buffer_bus + ctx->mc_completed,
+ ctx->base.callback_data);
+ ctx->mc_completed = 0;
+}
+
static inline void sync_it_packet_for_cpu(struct context *context,
struct descriptor *pd)
{
if (ret < 0)
goto out_with_header;
- if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+ if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
set_multichannel_mask(ohci, 0);
+ ctx->mc_completed = 0;
+ }
return &ctx->base;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
}
+static int ohci_flush_iso_completions(struct fw_iso_context *base)
+{
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ int ret = 0;
+
+ tasklet_disable(&ctx->context.tasklet);
+
+ if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
+ context_tasklet((unsigned long)&ctx->context);
+
+ switch (base->type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ case FW_ISO_CONTEXT_RECEIVE:
+ if (ctx->header_length != 0)
+ flush_iso_completions(ctx);
+ break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ if (ctx->mc_completed != 0)
+ flush_ir_buffer_fill(ctx);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ clear_bit_unlock(0, &ctx->flushing_completions);
+ smp_mb__after_clear_bit();
+ }
+
+ tasklet_enable(&ctx->context.tasklet);
+
+ return ret;
+}
+
static const struct fw_card_driver ohci_driver = {
.enable = ohci_enable,
.read_phy_reg = ohci_read_phy_reg,
.set_iso_channels = ohci_set_iso_channels,
.queue_iso = ohci_queue_iso,
.flush_queue_iso = ohci_flush_queue_iso,
+ .flush_iso_completions = ohci_flush_iso_completions,
.start_iso = ohci_start_iso,
.stop_iso = ohci_stop_iso,
};
* @header: Stripped headers, if any
*
* This event is sent when the controller has completed an &fw_cdev_iso_packet
- * with the %FW_CDEV_ISO_INTERRUPT bit set, or when there have been so many
- * completed packets without the interrupt bit set that the kernel's internal
- * buffer for @header is about to overflow. (In the latter case, kernels with
- * ABI version < 5 drop header data up to the next interrupt packet.)
+ * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
+ * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
+ * without the interrupt bit set that the kernel's internal buffer for @header
+ * is about to overflow. (In the last case, kernels with ABI version < 5 drop
+ * header data up to the next interrupt packet.)
*
* Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
*
* This event is sent in multichannel contexts (context type
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
* chunks that have been completely filled and that have the
- * %FW_CDEV_ISO_INTERRUPT bit set.
+ * %FW_CDEV_ISO_INTERRUPT bit set, or when explicitly requested with
+ * %FW_CDEV_IOC_FLUSH_ISO.
*
* The buffer is continuously filled with the following data, per packet:
* - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt,
#define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets)
#define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels)
+/* available since kernel version 3.4 */
+#define FW_CDEV_IOC_FLUSH_ISO _IOW('#', 0x18, struct fw_cdev_flush_iso)
+
/*
* ABI version history
* 1 (2.6.22) - initial version
* %FW_CDEV_IOC_SET_ISO_CHANNELS
* 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to
* avoid dropping data
+ * - added %FW_CDEV_IOC_FLUSH_ISO
*/
/**
};
/**
+ * struct fw_cdev_flush_iso - flush completed iso packets
+ * @handle: handle of isochronous context to flush
+ *
+ * For %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE contexts,
+ * report any completed packets.
+ *
+ * For %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL contexts, report the current
+ * offset in the receive buffer, if it has changed; this is typically in the
+ * middle of some buffer chunk.
+ *
+ * Any %FW_CDEV_EVENT_ISO_INTERRUPT or %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
+ * events generated by this ioctl are sent synchronously, i.e., are available
+ * for reading from the file descriptor when this ioctl returns.
+ */
+struct fw_cdev_flush_iso {
+ __u32 handle;
+};
+
+/**
* struct fw_cdev_get_cycle_timer - read cycle timer register
* @local_time: system time, in microseconds since the Epoch
* @cycle_timer: Cycle Time register contents
struct fw_iso_buffer *buffer,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
+int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);