Lines Matching refs:vsock

81 	struct virtio_vsock *vsock;  in virtio_transport_get_local_cid()  local
85 vsock = rcu_dereference(the_virtio_vsock); in virtio_transport_get_local_cid()
86 if (!vsock) { in virtio_transport_get_local_cid()
91 ret = vsock->guest_cid; in virtio_transport_get_local_cid()
99 struct virtio_vsock *vsock, gfp_t gfp) in virtio_transport_send_skb() argument
104 sgs = vsock->out_sgs; in virtio_transport_send_skb()
157 struct virtio_vsock *vsock = in virtio_transport_send_pkt_work() local
163 mutex_lock(&vsock->tx_lock); in virtio_transport_send_pkt_work()
165 if (!vsock->tx_run) in virtio_transport_send_pkt_work()
168 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_send_pkt_work()
175 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); in virtio_transport_send_pkt_work()
181 ret = virtio_transport_send_skb(skb, vq, vsock, GFP_KERNEL); in virtio_transport_send_pkt_work()
183 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); in virtio_transport_send_pkt_work()
188 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_send_pkt_work()
191 val = atomic_dec_return(&vsock->queued_replies); in virtio_transport_send_pkt_work()
205 mutex_unlock(&vsock->tx_lock); in virtio_transport_send_pkt_work()
208 queue_work(virtio_vsock_workqueue, &vsock->rx_work); in virtio_transport_send_pkt_work()
214 static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb) in virtio_transport_send_skb_fast_path() argument
216 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_send_skb_fast_path()
220 ret = mutex_trylock(&vsock->tx_lock); in virtio_transport_send_skb_fast_path()
224 ret = virtio_transport_send_skb(skb, vq, vsock, GFP_ATOMIC); in virtio_transport_send_skb_fast_path()
228 mutex_unlock(&vsock->tx_lock); in virtio_transport_send_skb_fast_path()
237 struct virtio_vsock *vsock; in virtio_transport_send_pkt() local
243 vsock = rcu_dereference(the_virtio_vsock); in virtio_transport_send_pkt()
244 if (!vsock) { in virtio_transport_send_pkt()
250 if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) { in virtio_transport_send_pkt()
262 if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) || in virtio_transport_send_pkt()
263 virtio_transport_send_skb_fast_path(vsock, skb)) { in virtio_transport_send_pkt()
265 atomic_inc(&vsock->queued_replies); in virtio_transport_send_pkt()
267 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); in virtio_transport_send_pkt()
268 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); in virtio_transport_send_pkt()
279 struct virtio_vsock *vsock; in virtio_transport_cancel_pkt() local
283 vsock = rcu_dereference(the_virtio_vsock); in virtio_transport_cancel_pkt()
284 if (!vsock) { in virtio_transport_cancel_pkt()
289 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); in virtio_transport_cancel_pkt()
292 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_cancel_pkt()
295 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); in virtio_transport_cancel_pkt()
298 queue_work(virtio_vsock_workqueue, &vsock->rx_work); in virtio_transport_cancel_pkt()
308 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) in virtio_vsock_rx_fill() argument
316 vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_vsock_rx_fill()
332 vsock->rx_buf_nr++; in virtio_vsock_rx_fill()
334 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) in virtio_vsock_rx_fill()
335 vsock->rx_buf_max_nr = vsock->rx_buf_nr; in virtio_vsock_rx_fill()
341 struct virtio_vsock *vsock = in virtio_transport_tx_work() local
346 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_tx_work()
347 mutex_lock(&vsock->tx_lock); in virtio_transport_tx_work()
349 if (!vsock->tx_run) in virtio_transport_tx_work()
364 mutex_unlock(&vsock->tx_lock); in virtio_transport_tx_work()
367 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); in virtio_transport_tx_work()
371 static bool virtio_transport_more_replies(struct virtio_vsock *vsock) in virtio_transport_more_replies() argument
373 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_more_replies()
377 val = atomic_read(&vsock->queued_replies); in virtio_transport_more_replies()
383 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, in virtio_vsock_event_fill_one() argument
389 vq = vsock->vqs[VSOCK_VQ_EVENT]; in virtio_vsock_event_fill_one()
397 static void virtio_vsock_event_fill(struct virtio_vsock *vsock) in virtio_vsock_event_fill() argument
401 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { in virtio_vsock_event_fill()
402 struct virtio_vsock_event *event = &vsock->event_list[i]; in virtio_vsock_event_fill()
404 virtio_vsock_event_fill_one(vsock, event); in virtio_vsock_event_fill()
407 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); in virtio_vsock_event_fill()
422 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) in virtio_vsock_update_guest_cid() argument
424 struct virtio_device *vdev = vsock->vdev; in virtio_vsock_update_guest_cid()
429 vsock->guest_cid = le64_to_cpu(guest_cid); in virtio_vsock_update_guest_cid()
433 static void virtio_vsock_event_handle(struct virtio_vsock *vsock, in virtio_vsock_event_handle() argument
438 virtio_vsock_update_guest_cid(vsock); in virtio_vsock_event_handle()
447 struct virtio_vsock *vsock = in virtio_transport_event_work() local
451 vq = vsock->vqs[VSOCK_VQ_EVENT]; in virtio_transport_event_work()
453 mutex_lock(&vsock->event_lock); in virtio_transport_event_work()
455 if (!vsock->event_run) in virtio_transport_event_work()
465 virtio_vsock_event_handle(vsock, event); in virtio_transport_event_work()
467 virtio_vsock_event_fill_one(vsock, event); in virtio_transport_event_work()
471 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); in virtio_transport_event_work()
473 mutex_unlock(&vsock->event_lock); in virtio_transport_event_work()
478 struct virtio_vsock *vsock = vq->vdev->priv; in virtio_vsock_event_done() local
480 if (!vsock) in virtio_vsock_event_done()
482 queue_work(virtio_vsock_workqueue, &vsock->event_work); in virtio_vsock_event_done()
487 struct virtio_vsock *vsock = vq->vdev->priv; in virtio_vsock_tx_done() local
489 if (!vsock) in virtio_vsock_tx_done()
491 queue_work(virtio_vsock_workqueue, &vsock->tx_work); in virtio_vsock_tx_done()
496 struct virtio_vsock *vsock = vq->vdev->priv; in virtio_vsock_rx_done() local
498 if (!vsock) in virtio_vsock_rx_done()
500 queue_work(virtio_vsock_workqueue, &vsock->rx_work); in virtio_vsock_rx_done()
505 struct virtio_vsock *vsock; in virtio_transport_can_msgzerocopy() local
510 vsock = rcu_dereference(the_virtio_vsock); in virtio_transport_can_msgzerocopy()
511 if (vsock) { in virtio_transport_can_msgzerocopy()
512 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_can_msgzerocopy()
598 struct virtio_vsock *vsock; in virtio_transport_seqpacket_allow() local
603 vsock = rcu_dereference(the_virtio_vsock); in virtio_transport_seqpacket_allow()
604 if (vsock) in virtio_transport_seqpacket_allow()
605 seqpacket_allow = vsock->seqpacket_allow; in virtio_transport_seqpacket_allow()
613 struct virtio_vsock *vsock = in virtio_transport_rx_work() local
617 vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_rx_work()
619 mutex_lock(&vsock->rx_lock); in virtio_transport_rx_work()
621 if (!vsock->rx_run) in virtio_transport_rx_work()
630 if (!virtio_transport_more_replies(vsock)) { in virtio_transport_rx_work()
642 vsock->rx_buf_nr--; in virtio_transport_rx_work()
658 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) in virtio_transport_rx_work()
659 virtio_vsock_rx_fill(vsock); in virtio_transport_rx_work()
660 mutex_unlock(&vsock->rx_lock); in virtio_transport_rx_work()
663 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) in virtio_vsock_vqs_init() argument
665 struct virtio_device *vdev = vsock->vdev; in virtio_vsock_vqs_init()
673 mutex_lock(&vsock->rx_lock); in virtio_vsock_vqs_init()
674 vsock->rx_buf_nr = 0; in virtio_vsock_vqs_init()
675 vsock->rx_buf_max_nr = 0; in virtio_vsock_vqs_init()
676 mutex_unlock(&vsock->rx_lock); in virtio_vsock_vqs_init()
678 atomic_set(&vsock->queued_replies, 0); in virtio_vsock_vqs_init()
680 ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL); in virtio_vsock_vqs_init()
684 virtio_vsock_update_guest_cid(vsock); in virtio_vsock_vqs_init()
691 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock) in virtio_vsock_vqs_start() argument
693 mutex_lock(&vsock->tx_lock); in virtio_vsock_vqs_start()
694 vsock->tx_run = true; in virtio_vsock_vqs_start()
695 mutex_unlock(&vsock->tx_lock); in virtio_vsock_vqs_start()
697 mutex_lock(&vsock->rx_lock); in virtio_vsock_vqs_start()
698 virtio_vsock_rx_fill(vsock); in virtio_vsock_vqs_start()
699 vsock->rx_run = true; in virtio_vsock_vqs_start()
700 mutex_unlock(&vsock->rx_lock); in virtio_vsock_vqs_start()
702 mutex_lock(&vsock->event_lock); in virtio_vsock_vqs_start()
703 virtio_vsock_event_fill(vsock); in virtio_vsock_vqs_start()
704 vsock->event_run = true; in virtio_vsock_vqs_start()
705 mutex_unlock(&vsock->event_lock); in virtio_vsock_vqs_start()
716 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); in virtio_vsock_vqs_start()
719 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) in virtio_vsock_vqs_del() argument
721 struct virtio_device *vdev = vsock->vdev; in virtio_vsock_vqs_del()
731 mutex_lock(&vsock->rx_lock); in virtio_vsock_vqs_del()
732 vsock->rx_run = false; in virtio_vsock_vqs_del()
733 mutex_unlock(&vsock->rx_lock); in virtio_vsock_vqs_del()
735 mutex_lock(&vsock->tx_lock); in virtio_vsock_vqs_del()
736 vsock->tx_run = false; in virtio_vsock_vqs_del()
737 mutex_unlock(&vsock->tx_lock); in virtio_vsock_vqs_del()
739 mutex_lock(&vsock->event_lock); in virtio_vsock_vqs_del()
740 vsock->event_run = false; in virtio_vsock_vqs_del()
741 mutex_unlock(&vsock->event_lock); in virtio_vsock_vqs_del()
748 mutex_lock(&vsock->rx_lock); in virtio_vsock_vqs_del()
749 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) in virtio_vsock_vqs_del()
751 mutex_unlock(&vsock->rx_lock); in virtio_vsock_vqs_del()
753 mutex_lock(&vsock->tx_lock); in virtio_vsock_vqs_del()
754 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) in virtio_vsock_vqs_del()
756 mutex_unlock(&vsock->tx_lock); in virtio_vsock_vqs_del()
758 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); in virtio_vsock_vqs_del()
766 struct virtio_vsock *vsock = NULL; in virtio_vsock_probe() local
781 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); in virtio_vsock_probe()
782 if (!vsock) { in virtio_vsock_probe()
787 vsock->vdev = vdev; in virtio_vsock_probe()
790 mutex_init(&vsock->tx_lock); in virtio_vsock_probe()
791 mutex_init(&vsock->rx_lock); in virtio_vsock_probe()
792 mutex_init(&vsock->event_lock); in virtio_vsock_probe()
793 skb_queue_head_init(&vsock->send_pkt_queue); in virtio_vsock_probe()
794 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); in virtio_vsock_probe()
795 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); in virtio_vsock_probe()
796 INIT_WORK(&vsock->event_work, virtio_transport_event_work); in virtio_vsock_probe()
797 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); in virtio_vsock_probe()
800 vsock->seqpacket_allow = true; in virtio_vsock_probe()
802 vdev->priv = vsock; in virtio_vsock_probe()
804 ret = virtio_vsock_vqs_init(vsock); in virtio_vsock_probe()
808 for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++) in virtio_vsock_probe()
809 vsock->out_sgs[i] = &vsock->out_bufs[i]; in virtio_vsock_probe()
811 rcu_assign_pointer(the_virtio_vsock, vsock); in virtio_vsock_probe()
812 virtio_vsock_vqs_start(vsock); in virtio_vsock_probe()
819 kfree(vsock); in virtio_vsock_probe()
826 struct virtio_vsock *vsock = vdev->priv; in virtio_vsock_remove() local
834 virtio_vsock_vqs_del(vsock); in virtio_vsock_remove()
839 flush_work(&vsock->rx_work); in virtio_vsock_remove()
840 flush_work(&vsock->tx_work); in virtio_vsock_remove()
841 flush_work(&vsock->event_work); in virtio_vsock_remove()
842 flush_work(&vsock->send_pkt_work); in virtio_vsock_remove()
846 kfree(vsock); in virtio_vsock_remove()
852 struct virtio_vsock *vsock = vdev->priv; in virtio_vsock_freeze() local
859 virtio_vsock_vqs_del(vsock); in virtio_vsock_freeze()
868 struct virtio_vsock *vsock = vdev->priv; in virtio_vsock_restore() local
880 ret = virtio_vsock_vqs_init(vsock); in virtio_vsock_restore()
884 rcu_assign_pointer(the_virtio_vsock, vsock); in virtio_vsock_restore()
885 virtio_vsock_vqs_start(vsock); in virtio_vsock_restore()