Skip to content

Commit 67455b3

Browse files
author
Luis Sanchez
committed
[FAB-1242] Limit batch size to AbsoluteMaxBytes
- Receiver.Ordered() should not produce a batch of messages larger BatchSize.AbsoluteMaxBytes. Change-Id: I3fcdc49c5756ee2215c8a7837ac3e0e49073aa13 Signed-off-by: Luis Sanchez <[email protected]>
1 parent 012f0b5 commit 67455b3

File tree

2 files changed

+155
-40
lines changed

2 files changed

+155
-40
lines changed

orderer/common/blockcutter/blockcutter.go

+90-35
Original file line numberDiff line numberDiff line change
@@ -29,22 +29,34 @@ var logger = logging.MustGetLogger("orderer/common/blockcutter")
2929
// Receiver defines a sink for the ordered broadcast messages
3030
type Receiver interface {
3131
// Ordered should be invoked sequentially as messages are ordered
32-
// If the message is a valid normal message and does not fill the batch, nil, nil, true is returned
33-
// If the message is a valid normal message and fills a batch, the batch, committers, true is returned
34-
// If the message is a valid special message (like a config message) it terminates the current batch
35-
// and returns the current batch and committers (if it is not empty), plus a second batch containing the special transaction and commiter, and true
36-
// If the ordered message is determined to be invalid, then nil, nil, false is returned
32+
// If the current message valid, and no batches need to be cut:
33+
// - Ordered will return nil, nil, and true (indicating ok).
34+
// If the current message valid, and batches need to be cut:
35+
// - Ordered will return 1 or 2 batches of messages, 1 or 2 batches of committers, and true (indicating ok).
36+
// If the current message is invalid:
37+
// - Ordered will return nil, nil, and false (to indicate not ok).
38+
//
39+
// Given a valid message, if the current message needs to be isolated (as determined during filtering).
40+
// - Ordered will return:
41+
// * The pending batch of (if not empty), and a second batch containing only the isolated message.
42+
// * The corresponding batches of committers.
43+
// * true (indicating ok).
44+
// Otherwise, given a valid message, the pending batch, if not empty, will be cut and returned if:
45+
// - The current message needs to be isolated (as determined during filtering).
46+
// - The current message will cause the pending batch size in bytes to exceed BatchSize.AbsoluteMaxBytes.
47+
// - After adding the current message to the pending batch, the message count has reached BatchSize.MaxMessageCount.
3748
Ordered(msg *cb.Envelope) ([][]*cb.Envelope, [][]filter.Committer, bool)
3849

3950
// Cut returns the current batch and starts a new one
4051
Cut() ([]*cb.Envelope, []filter.Committer)
4152
}
4253

4354
type receiver struct {
44-
sharedConfigManager sharedconfig.Manager
45-
filters *filter.RuleSet
46-
curBatch []*cb.Envelope
47-
batchComs []filter.Committer
55+
sharedConfigManager sharedconfig.Manager
56+
filters *filter.RuleSet
57+
pendingBatch []*cb.Envelope
58+
pendingBatchSizeBytes uint32
59+
pendingCommitters []filter.Committer
4860
}
4961

5062
// NewReceiverImpl creates a Receiver implementation based on the given sharedconfig manager and filters
@@ -56,11 +68,22 @@ func NewReceiverImpl(sharedConfigManager sharedconfig.Manager, filters *filter.R
5668
}
5769

5870
// Ordered should be invoked sequentially as messages are ordered
59-
// If the message is a valid normal message and does not fill the batch, nil, nil, true is returned
60-
// If the message is a valid normal message and fills a batch, the batch, committers, true is returned
61-
// If the message is a valid special message (like a config message) it terminates the current batch
62-
// and returns the current batch and committers (if it is not empty), plus a second batch containing the special transaction and commiter, and true
63-
// If the ordered message is determined to be invalid, then nil, nil, false is returned
71+
// If the current message valid, and no batches need to be cut:
72+
// - Ordered will return nil, nil, and true (indicating ok).
73+
// If the current message valid, and batches need to be cut:
74+
// - Ordered will return 1 or 2 batches of messages, 1 or 2 batches of committers, and true (indicating ok).
75+
// If the current message is invalid:
76+
// - Ordered will return nil, nil, and false (to indicate not ok).
77+
//
78+
// Given a valid message, if the current message needs to be isolated (as determined during filtering).
79+
// - Ordered will return:
80+
// * The pending batch of (if not empty), and a second batch containing only the isolated message.
81+
// * The corresponding batches of committers.
82+
// * true (indicating ok).
83+
// Otherwise, given a valid message, the pending batch, if not empty, will be cut and returned if:
84+
// - The current message needs to be isolated (as determined during filtering).
85+
// - The current message will cause the pending batch size in bytes to exceed BatchSize.AbsoluteMaxBytes.
86+
// - After adding the current message to the pending batch, the message count has reached BatchSize.MaxMessageCount.
6487
func (r *receiver) Ordered(msg *cb.Envelope) ([][]*cb.Envelope, [][]filter.Committer, bool) {
6588
// The messages must be filtered a second time in case configuration has changed since the message was received
6689
committer, err := r.filters.Apply(msg)
@@ -70,38 +93,70 @@ func (r *receiver) Ordered(msg *cb.Envelope) ([][]*cb.Envelope, [][]filter.Commi
7093
}
7194

7295
if committer.Isolated() {
73-
logger.Debugf("Found message which requested to be isolated, cutting into its own block")
74-
firstBatch := r.curBatch
75-
r.curBatch = nil
76-
firstComs := r.batchComs
77-
r.batchComs = nil
78-
secondBatch := []*cb.Envelope{msg}
79-
if firstBatch == nil {
80-
return [][]*cb.Envelope{secondBatch}, [][]filter.Committer{[]filter.Committer{committer}}, true
96+
logger.Debugf("Found message which requested to be isolated, cutting into its own batch")
97+
98+
messageBatches := [][]*cb.Envelope{}
99+
committerBatches := [][]filter.Committer{}
100+
101+
// cut pending batch, if it has any messages
102+
if len(r.pendingBatch) > 0 {
103+
messageBatch, committerBatch := r.Cut()
104+
messageBatches = append(messageBatches, messageBatch)
105+
committerBatches = append(committerBatches, committerBatch)
81106
}
82-
return [][]*cb.Envelope{firstBatch, secondBatch}, [][]filter.Committer{firstComs, []filter.Committer{committer}}, true
107+
108+
// create new batch with single message
109+
messageBatches = append(messageBatches, []*cb.Envelope{msg})
110+
committerBatches = append(committerBatches, []filter.Committer{committer})
111+
112+
return messageBatches, committerBatches, true
113+
}
114+
115+
messageBatches := [][]*cb.Envelope{}
116+
committerBatches := [][]filter.Committer{}
117+
118+
messageSizeBytes := messageSizeBytes(msg)
119+
messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes > r.sharedConfigManager.BatchSize().AbsoluteMaxBytes
120+
121+
if messageWillOverflowBatchSizeBytes {
122+
logger.Debugf("The current message, with %v bytes, will overflow the pending batch of %v bytes.", messageSizeBytes, r.pendingBatchSizeBytes)
123+
logger.Debugf("Pending batch would overflow if current message is added, cutting batch now.")
124+
messageBatch, committerBatch := r.Cut()
125+
messageBatches = append(messageBatches, messageBatch)
126+
committerBatches = append(committerBatches, committerBatch)
83127
}
84128

85129
logger.Debugf("Enqueuing message into batch")
86-
r.curBatch = append(r.curBatch, msg)
87-
r.batchComs = append(r.batchComs, committer)
130+
r.pendingBatch = append(r.pendingBatch, msg)
131+
r.pendingBatchSizeBytes += messageSizeBytes
132+
r.pendingCommitters = append(r.pendingCommitters, committer)
133+
134+
if uint32(len(r.pendingBatch)) >= r.sharedConfigManager.BatchSize().MaxMessageCount {
135+
logger.Debugf("Batch size met, cutting batch")
136+
messageBatch, committerBatch := r.Cut()
137+
messageBatches = append(messageBatches, messageBatch)
138+
committerBatches = append(committerBatches, committerBatch)
139+
}
88140

89-
if uint32(len(r.curBatch)) < r.sharedConfigManager.BatchSize().MaxMessageCount {
141+
// return nils instead of empty slices
142+
if len(messageBatches) == 0 {
90143
return nil, nil, true
91144
}
92145

93-
logger.Debugf("Batch size met, creating block")
94-
newBatch := r.curBatch
95-
newComs := r.batchComs
96-
r.curBatch = nil
97-
return [][]*cb.Envelope{newBatch}, [][]filter.Committer{newComs}, true
146+
return messageBatches, committerBatches, true
147+
98148
}
99149

100150
// Cut returns the current batch and starts a new one
101151
func (r *receiver) Cut() ([]*cb.Envelope, []filter.Committer) {
102-
batch := r.curBatch
103-
r.curBatch = nil
104-
committers := r.batchComs
105-
r.batchComs = nil
152+
batch := r.pendingBatch
153+
r.pendingBatch = nil
154+
committers := r.pendingCommitters
155+
r.pendingCommitters = nil
156+
r.pendingBatchSizeBytes = 0
106157
return batch, committers
107158
}
159+
160+
func messageSizeBytes(message *cb.Envelope) uint32 {
161+
return uint32(len(message.Payload) + len(message.Signature))
162+
}

orderer/common/blockcutter/blockcutter_test.go

+65-5
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,8 @@ var unmatchedTx = &cb.Envelope{Payload: []byte("UNMATCHED")}
8080
func TestNormalBatch(t *testing.T) {
8181
filters := getFilters()
8282
maxMessageCount := uint32(2)
83-
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount}}, filters)
83+
absoluteMaxBytes := uint32(100)
84+
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: absoluteMaxBytes}}, filters)
8485

8586
batches, committers, ok := r.Ordered(goodTx)
8687

@@ -107,7 +108,8 @@ func TestNormalBatch(t *testing.T) {
107108
func TestBadMessageInBatch(t *testing.T) {
108109
filters := getFilters()
109110
maxMessageCount := uint32(2)
110-
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount}}, filters)
111+
absoluteMaxBytes := uint32(100)
112+
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: absoluteMaxBytes}}, filters)
111113

112114
batches, committers, ok := r.Ordered(badTx)
113115

@@ -143,7 +145,8 @@ func TestBadMessageInBatch(t *testing.T) {
143145
func TestUnmatchedMessageInBatch(t *testing.T) {
144146
filters := getFilters()
145147
maxMessageCount := uint32(2)
146-
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount}}, filters)
148+
absoluteMaxBytes := uint32(100)
149+
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: absoluteMaxBytes}}, filters)
147150

148151
batches, committers, ok := r.Ordered(unmatchedTx)
149152

@@ -179,7 +182,8 @@ func TestUnmatchedMessageInBatch(t *testing.T) {
179182
func TestIsolatedEmptyBatch(t *testing.T) {
180183
filters := getFilters()
181184
maxMessageCount := uint32(2)
182-
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount}}, filters)
185+
absoluteMaxBytes := uint32(100)
186+
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: absoluteMaxBytes}}, filters)
183187

184188
batches, committers, ok := r.Ordered(isolatedTx)
185189

@@ -203,7 +207,8 @@ func TestIsolatedEmptyBatch(t *testing.T) {
203207
func TestIsolatedPartialBatch(t *testing.T) {
204208
filters := getFilters()
205209
maxMessageCount := uint32(2)
206-
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount}}, filters)
210+
absoluteMaxBytes := uint32(100)
211+
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: absoluteMaxBytes}}, filters)
207212

208213
batches, committers, ok := r.Ordered(goodTx)
209214

@@ -241,3 +246,58 @@ func TestIsolatedPartialBatch(t *testing.T) {
241246
t.Fatalf("Should have had the isolated tx in the second batch")
242247
}
243248
}
249+
250+
func TestBatchSizeAbsoluteMaxBytesOverflow(t *testing.T) {
251+
filters := getFilters()
252+
253+
goodTxBytes := messageSizeBytes(goodTx)
254+
255+
// set absolute max bytes such that 10 goodTx will not fit
256+
absoluteMaxBytes := goodTxBytes*10 - 1
257+
258+
// set message count > 9
259+
maxMessageCount := uint32(20)
260+
261+
r := NewReceiverImpl(&mocksharedconfig.Manager{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: absoluteMaxBytes}}, filters)
262+
263+
// enqueue 9 messages
264+
for i := 0; i < 9; i++ {
265+
batches, committers, ok := r.Ordered(goodTx)
266+
if batches != nil || committers != nil {
267+
t.Fatalf("Should not have created batch")
268+
}
269+
if !ok {
270+
t.Fatalf("Should have enqueued message into batch")
271+
}
272+
}
273+
274+
// next message should create batch
275+
batches, committers, ok := r.Ordered(goodTx)
276+
277+
if batches == nil || committers == nil {
278+
t.Fatalf("Should have created batch")
279+
}
280+
281+
if len(batches) != 1 || len(committers) != 1 {
282+
t.Fatalf("Should have created one batch, got %d and %d", len(batches), len(committers))
283+
}
284+
285+
if len(batches[0]) != 9 || len(committers[0]) != 9 {
286+
t.Fatalf("Should have had nine normal tx in the batch got %d and %d committers", len(batches[0]), len(committers[0]))
287+
}
288+
if !ok {
289+
t.Fatalf("Should have enqueued the tenth message into batch")
290+
}
291+
292+
// force a batch cut
293+
messageBatch, committerBatch := r.Cut()
294+
295+
if messageBatch == nil || committerBatch == nil {
296+
t.Fatalf("Should have created batch")
297+
}
298+
299+
if len(messageBatch) != 1 || len(committerBatch) != 1 {
300+
t.Fatalf("Should have had one tx in the batch, got %d and %d", len(batches), len(committers))
301+
}
302+
303+
}

0 commit comments

Comments
 (0)