1 // Copyright (c) 2019 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "quiche/quic/core/batch_writer/quic_batch_writer_buffer.h"
6
7 #include <sstream>
8
9 namespace quic {
10
QuicBatchWriterBuffer()11 QuicBatchWriterBuffer::QuicBatchWriterBuffer() {
12 memset(buffer_, 0, sizeof(buffer_));
13 }
14
Clear()15 void QuicBatchWriterBuffer::Clear() { buffered_writes_.clear(); }
16
DebugString() const17 std::string QuicBatchWriterBuffer::DebugString() const {
18 std::ostringstream os;
19 os << "{ buffer: " << static_cast<const void*>(buffer_)
20 << " buffer_end: " << static_cast<const void*>(buffer_end())
21 << " buffered_writes_.size(): " << buffered_writes_.size()
22 << " next_write_loc: " << static_cast<const void*>(GetNextWriteLocation())
23 << " SizeInUse: " << SizeInUse() << " }";
24 return os.str();
25 }
26
Invariants() const27 bool QuicBatchWriterBuffer::Invariants() const {
28 // Buffers in buffered_writes_ should not overlap, and collectively they
29 // should cover a continuous prefix of buffer_.
30 const char* next_buffer = buffer_;
31 for (auto iter = buffered_writes_.begin(); iter != buffered_writes_.end();
32 ++iter) {
33 if ((iter->buffer != next_buffer) ||
34 (iter->buffer + iter->buf_len > buffer_end())) {
35 return false;
36 }
37 next_buffer += iter->buf_len;
38 }
39
40 return static_cast<size_t>(next_buffer - buffer_) == SizeInUse();
41 }
42
GetNextWriteLocation() const43 char* QuicBatchWriterBuffer::GetNextWriteLocation() const {
44 const char* next_loc =
45 buffered_writes_.empty()
46 ? buffer_
47 : buffered_writes_.back().buffer + buffered_writes_.back().buf_len;
48 if (static_cast<size_t>(buffer_end() - next_loc) < kMaxOutgoingPacketSize) {
49 return nullptr;
50 }
51 return const_cast<char*>(next_loc);
52 }
53
PushBufferedWrite(const char * buffer,size_t buf_len,const QuicIpAddress & self_address,const QuicSocketAddress & peer_address,const PerPacketOptions * options,const QuicPacketWriterParams & params,uint64_t release_time)54 QuicBatchWriterBuffer::PushResult QuicBatchWriterBuffer::PushBufferedWrite(
55 const char* buffer, size_t buf_len, const QuicIpAddress& self_address,
56 const QuicSocketAddress& peer_address, const PerPacketOptions* options,
57 const QuicPacketWriterParams& params, uint64_t release_time) {
58 QUICHE_DCHECK(Invariants());
59 QUICHE_DCHECK_LE(buf_len, kMaxOutgoingPacketSize);
60
61 PushResult result = {/*succeeded=*/false, /*buffer_copied=*/false};
62 char* next_write_location = GetNextWriteLocation();
63 if (next_write_location == nullptr) {
64 return result;
65 }
66
67 if (buffer != next_write_location) {
68 if (IsExternalBuffer(buffer, buf_len)) {
69 memcpy(next_write_location, buffer, buf_len);
70 } else if (IsInternalBuffer(buffer, buf_len)) {
71 memmove(next_write_location, buffer, buf_len);
72 } else {
73 QUIC_BUG(quic_bug_10831_1)
74 << "Buffer[" << static_cast<const void*>(buffer) << ", "
75 << static_cast<const void*>(buffer + buf_len)
76 << ") overlaps with internal buffer["
77 << static_cast<const void*>(buffer_) << ", "
78 << static_cast<const void*>(buffer_end()) << ")";
79 return result;
80 }
81 result.buffer_copied = true;
82 } else {
83 // In place push, do nothing.
84 }
85 if (buffered_writes_.empty()) {
86 // Starting a new batch.
87 ++batch_id_;
88
89 // |batch_id| is a 32-bit unsigned int that is possibly shared by a lot of
90 // QUIC connections(because writer can be shared), so wrap around happens,
91 // when it happens we skip id=0, which indicates "not batched".
92 if (batch_id_ == 0) {
93 ++batch_id_;
94 }
95 }
96 buffered_writes_.emplace_back(
97 next_write_location, buf_len, self_address, peer_address,
98 options ? options->Clone() : std::unique_ptr<PerPacketOptions>(), params,
99 release_time);
100
101 QUICHE_DCHECK(Invariants());
102
103 result.succeeded = true;
104 result.batch_id = batch_id_;
105 return result;
106 }
107
UndoLastPush()108 void QuicBatchWriterBuffer::UndoLastPush() {
109 if (!buffered_writes_.empty()) {
110 buffered_writes_.pop_back();
111 }
112 }
113
PopBufferedWrite(int32_t num_buffered_writes)114 QuicBatchWriterBuffer::PopResult QuicBatchWriterBuffer::PopBufferedWrite(
115 int32_t num_buffered_writes) {
116 QUICHE_DCHECK(Invariants());
117 QUICHE_DCHECK_GE(num_buffered_writes, 0);
118 QUICHE_DCHECK_LE(static_cast<size_t>(num_buffered_writes),
119 buffered_writes_.size());
120
121 PopResult result = {/*num_buffers_popped=*/0,
122 /*moved_remaining_buffers=*/false};
123
124 result.num_buffers_popped = std::max<int32_t>(num_buffered_writes, 0);
125 result.num_buffers_popped =
126 std::min<int32_t>(result.num_buffers_popped, buffered_writes_.size());
127 buffered_writes_.pop_front_n(result.num_buffers_popped);
128
129 if (!buffered_writes_.empty()) {
130 // If not all buffered writes are erased, the remaining ones will not cover
131 // a continuous prefix of buffer_. We'll fix it by moving the remaining
132 // buffers to the beginning of buffer_ and adjust the buffer pointers in all
133 // remaining buffered writes.
134 // This should happen very rarely, about once per write block.
135 result.moved_remaining_buffers = true;
136 const char* buffer_before_move = buffered_writes_.front().buffer;
137 size_t buffer_len_to_move = buffered_writes_.back().buffer +
138 buffered_writes_.back().buf_len -
139 buffer_before_move;
140 memmove(buffer_, buffer_before_move, buffer_len_to_move);
141
142 size_t distance_to_move = buffer_before_move - buffer_;
143 for (BufferedWrite& buffered_write : buffered_writes_) {
144 buffered_write.buffer -= distance_to_move;
145 }
146
147 QUICHE_DCHECK_EQ(buffer_, buffered_writes_.front().buffer);
148 }
149 QUICHE_DCHECK(Invariants());
150
151 return result;
152 }
153
SizeInUse() const154 size_t QuicBatchWriterBuffer::SizeInUse() const {
155 if (buffered_writes_.empty()) {
156 return 0;
157 }
158
159 return buffered_writes_.back().buffer + buffered_writes_.back().buf_len -
160 buffer_;
161 }
162
163 } // namespace quic
164