@@ -29,22 +29,34 @@ var logger = logging.MustGetLogger("orderer/common/blockcutter")
29
29
// Receiver defines a sink for the ordered broadcast messages
30
30
type Receiver interface {
31
31
// Ordered should be invoked sequentially as messages are ordered
32
- // If the message is a valid normal message and does not fill the batch, nil, nil, true is returned
33
- // If the message is a valid normal message and fills a batch, the batch, committers, true is returned
34
- // If the message is a valid special message (like a config message) it terminates the current batch
35
- // and returns the current batch and committers (if it is not empty), plus a second batch containing the special transaction and commiter, and true
36
- // If the ordered message is determined to be invalid, then nil, nil, false is returned
32
+ // If the current message valid, and no batches need to be cut:
33
+ // - Ordered will return nil, nil, and true (indicating ok).
34
+ // If the current message valid, and batches need to be cut:
35
+ // - Ordered will return 1 or 2 batches of messages, 1 or 2 batches of committers, and true (indicating ok).
36
+ // If the current message is invalid:
37
+ // - Ordered will return nil, nil, and false (to indicate not ok).
38
+ //
39
+ // Given a valid message, if the current message needs to be isolated (as determined during filtering).
40
+ // - Ordered will return:
41
+ // * The pending batch of (if not empty), and a second batch containing only the isolated message.
42
+ // * The corresponding batches of committers.
43
+ // * true (indicating ok).
44
+ // Otherwise, given a valid message, the pending batch, if not empty, will be cut and returned if:
45
+ // - The current message needs to be isolated (as determined during filtering).
46
+ // - The current message will cause the pending batch size in bytes to exceed BatchSize.AbsoluteMaxBytes.
47
+ // - After adding the current message to the pending batch, the message count has reached BatchSize.MaxMessageCount.
37
48
Ordered (msg * cb.Envelope ) ([][]* cb.Envelope , [][]filter.Committer , bool )
38
49
39
50
// Cut returns the current batch and starts a new one
40
51
Cut () ([]* cb.Envelope , []filter.Committer )
41
52
}
42
53
43
54
type receiver struct {
44
- sharedConfigManager sharedconfig.Manager
45
- filters * filter.RuleSet
46
- curBatch []* cb.Envelope
47
- batchComs []filter.Committer
55
+ sharedConfigManager sharedconfig.Manager
56
+ filters * filter.RuleSet
57
+ pendingBatch []* cb.Envelope
58
+ pendingBatchSizeBytes uint32
59
+ pendingCommitters []filter.Committer
48
60
}
49
61
50
62
// NewReceiverImpl creates a Receiver implementation based on the given sharedconfig manager and filters
@@ -56,11 +68,22 @@ func NewReceiverImpl(sharedConfigManager sharedconfig.Manager, filters *filter.R
56
68
}
57
69
58
70
// Ordered should be invoked sequentially as messages are ordered
59
- // If the message is a valid normal message and does not fill the batch, nil, nil, true is returned
60
- // If the message is a valid normal message and fills a batch, the batch, committers, true is returned
61
- // If the message is a valid special message (like a config message) it terminates the current batch
62
- // and returns the current batch and committers (if it is not empty), plus a second batch containing the special transaction and commiter, and true
63
- // If the ordered message is determined to be invalid, then nil, nil, false is returned
71
+ // If the current message valid, and no batches need to be cut:
72
+ // - Ordered will return nil, nil, and true (indicating ok).
73
+ // If the current message valid, and batches need to be cut:
74
+ // - Ordered will return 1 or 2 batches of messages, 1 or 2 batches of committers, and true (indicating ok).
75
+ // If the current message is invalid:
76
+ // - Ordered will return nil, nil, and false (to indicate not ok).
77
+ //
78
+ // Given a valid message, if the current message needs to be isolated (as determined during filtering).
79
+ // - Ordered will return:
80
+ // * The pending batch of (if not empty), and a second batch containing only the isolated message.
81
+ // * The corresponding batches of committers.
82
+ // * true (indicating ok).
83
+ // Otherwise, given a valid message, the pending batch, if not empty, will be cut and returned if:
84
+ // - The current message needs to be isolated (as determined during filtering).
85
+ // - The current message will cause the pending batch size in bytes to exceed BatchSize.AbsoluteMaxBytes.
86
+ // - After adding the current message to the pending batch, the message count has reached BatchSize.MaxMessageCount.
64
87
func (r * receiver ) Ordered (msg * cb.Envelope ) ([][]* cb.Envelope , [][]filter.Committer , bool ) {
65
88
// The messages must be filtered a second time in case configuration has changed since the message was received
66
89
committer , err := r .filters .Apply (msg )
@@ -70,38 +93,70 @@ func (r *receiver) Ordered(msg *cb.Envelope) ([][]*cb.Envelope, [][]filter.Commi
70
93
}
71
94
72
95
if committer .Isolated () {
73
- logger .Debugf ("Found message which requested to be isolated, cutting into its own block" )
74
- firstBatch := r .curBatch
75
- r .curBatch = nil
76
- firstComs := r .batchComs
77
- r .batchComs = nil
78
- secondBatch := []* cb.Envelope {msg }
79
- if firstBatch == nil {
80
- return [][]* cb.Envelope {secondBatch }, [][]filter.Committer {[]filter.Committer {committer }}, true
96
+ logger .Debugf ("Found message which requested to be isolated, cutting into its own batch" )
97
+
98
+ messageBatches := [][]* cb.Envelope {}
99
+ committerBatches := [][]filter.Committer {}
100
+
101
+ // cut pending batch, if it has any messages
102
+ if len (r .pendingBatch ) > 0 {
103
+ messageBatch , committerBatch := r .Cut ()
104
+ messageBatches = append (messageBatches , messageBatch )
105
+ committerBatches = append (committerBatches , committerBatch )
81
106
}
82
- return [][]* cb.Envelope {firstBatch , secondBatch }, [][]filter.Committer {firstComs , []filter.Committer {committer }}, true
107
+
108
+ // create new batch with single message
109
+ messageBatches = append (messageBatches , []* cb.Envelope {msg })
110
+ committerBatches = append (committerBatches , []filter.Committer {committer })
111
+
112
+ return messageBatches , committerBatches , true
113
+ }
114
+
115
+ messageBatches := [][]* cb.Envelope {}
116
+ committerBatches := [][]filter.Committer {}
117
+
118
+ messageSizeBytes := messageSizeBytes (msg )
119
+ messageWillOverflowBatchSizeBytes := r .pendingBatchSizeBytes + messageSizeBytes > r .sharedConfigManager .BatchSize ().AbsoluteMaxBytes
120
+
121
+ if messageWillOverflowBatchSizeBytes {
122
+ logger .Debugf ("The current message, with %v bytes, will overflow the pending batch of %v bytes." , messageSizeBytes , r .pendingBatchSizeBytes )
123
+ logger .Debugf ("Pending batch would overflow if current message is added, cutting batch now." )
124
+ messageBatch , committerBatch := r .Cut ()
125
+ messageBatches = append (messageBatches , messageBatch )
126
+ committerBatches = append (committerBatches , committerBatch )
83
127
}
84
128
85
129
logger .Debugf ("Enqueuing message into batch" )
86
- r .curBatch = append (r .curBatch , msg )
87
- r .batchComs = append (r .batchComs , committer )
130
+ r .pendingBatch = append (r .pendingBatch , msg )
131
+ r .pendingBatchSizeBytes += messageSizeBytes
132
+ r .pendingCommitters = append (r .pendingCommitters , committer )
133
+
134
+ if uint32 (len (r .pendingBatch )) >= r .sharedConfigManager .BatchSize ().MaxMessageCount {
135
+ logger .Debugf ("Batch size met, cutting batch" )
136
+ messageBatch , committerBatch := r .Cut ()
137
+ messageBatches = append (messageBatches , messageBatch )
138
+ committerBatches = append (committerBatches , committerBatch )
139
+ }
88
140
89
- if uint32 (len (r .curBatch )) < r .sharedConfigManager .BatchSize ().MaxMessageCount {
141
+ // return nils instead of empty slices
142
+ if len (messageBatches ) == 0 {
90
143
return nil , nil , true
91
144
}
92
145
93
- logger .Debugf ("Batch size met, creating block" )
94
- newBatch := r .curBatch
95
- newComs := r .batchComs
96
- r .curBatch = nil
97
- return [][]* cb.Envelope {newBatch }, [][]filter.Committer {newComs }, true
146
+ return messageBatches , committerBatches , true
147
+
98
148
}
99
149
100
150
// Cut returns the current batch and starts a new one
101
151
func (r * receiver ) Cut () ([]* cb.Envelope , []filter.Committer ) {
102
- batch := r .curBatch
103
- r .curBatch = nil
104
- committers := r .batchComs
105
- r .batchComs = nil
152
+ batch := r .pendingBatch
153
+ r .pendingBatch = nil
154
+ committers := r .pendingCommitters
155
+ r .pendingCommitters = nil
156
+ r .pendingBatchSizeBytes = 0
106
157
return batch , committers
107
158
}
159
+
160
+ func messageSizeBytes (message * cb.Envelope ) uint32 {
161
+ return uint32 (len (message .Payload ) + len (message .Signature ))
162
+ }
0 commit comments