mirror of https://github.com/minetest/minetest.git
Some small optimization in makeSplitPacket func. If we know data.getSize() and maximum_data_size we can calulate number of chunks. For example: size = 1105, max = 495. The number = (int)(size/max)+1=3 (2*495 + 115) where 115 - tail of data block. And we can write chunks_count in the first cycle.
After this optimisation we have 1 cycle and calc some variables around cycle.
This commit is contained in:
parent
02fb912a95
commit
aea7e05e52
|
@ -81,44 +81,36 @@ core::list<SharedBuffer<u8> > makeSplitPacket(
|
||||||
u16 seqnum)
|
u16 seqnum)
|
||||||
{
|
{
|
||||||
// Chunk packets, containing the TYPE_SPLIT header
|
// Chunk packets, containing the TYPE_SPLIT header
|
||||||
core::list<SharedBuffer<u8> > chunks;
|
core::list<SharedBuffer<u8> > chunks;
|
||||||
|
|
||||||
u32 chunk_header_size = 7;
|
u32 chunk_header_size = 7;
|
||||||
u32 maximum_data_size = chunksize_max - chunk_header_size;
|
u32 maximum_data_size = chunksize_max - chunk_header_size;
|
||||||
u32 start = 0;
|
u32 start = 0;
|
||||||
u32 end = 0;
|
// u32 end = 0;
|
||||||
u32 chunk_num = 0;
|
// u32 chunk_num = 0;
|
||||||
do{
|
u32 data_size = data.getSize(); //calc once, not in the cycle
|
||||||
end = start + maximum_data_size - 1;
|
u32 chunks_count = (u32)(data_size / maximum_data_size) +1;
|
||||||
if(end > data.getSize() - 1)
|
u32 payload_size = maximum_data_size;
|
||||||
end = data.getSize() - 1;
|
|
||||||
|
for(int i=0; i<chunks_count;i++){
|
||||||
u32 payload_size = end - start + 1;
|
|
||||||
u32 packet_size = chunk_header_size + payload_size;
|
SharedBuffer<u8> chunk(maximum_data_size + chunk_header_size);
|
||||||
|
|
||||||
SharedBuffer<u8> chunk(packet_size);
|
writeU8(&chunk[0], TYPE_SPLIT);
|
||||||
|
writeU16(&chunk[1], seqnum);
|
||||||
writeU8(&chunk[0], TYPE_SPLIT);
|
writeU16(&chunk[3], chunks_count);
|
||||||
writeU16(&chunk[1], seqnum);
|
writeU16(&chunk[5], i); // chunk number
|
||||||
// [3] u16 chunk_count is written at next stage
|
|
||||||
writeU16(&chunk[5], chunk_num);
|
start = i * maximum_data_size; // 0, 1*dsize, 2*dsize...
|
||||||
memcpy(&chunk[chunk_header_size], &data[start], payload_size);
|
|
||||||
|
// on the last iteration
|
||||||
chunks.push_back(chunk);
|
if((i+1) == chunks_count)
|
||||||
|
// calc the tail which size smaller than maximum_data_size
|
||||||
start = end + 1;
|
payload_size = data_size - start;
|
||||||
chunk_num++;
|
|
||||||
}
|
memcpy(&chunk[chunk_header_size], &data[start], payload_size);
|
||||||
while(end != data.getSize() - 1);
|
chunks.push_back(chunk);
|
||||||
|
}
|
||||||
u16 chunk_count = chunks.getSize();
|
|
||||||
|
|
||||||
core::list<SharedBuffer<u8> >::Iterator i = chunks.begin();
|
|
||||||
for(; i != chunks.end(); i++)
|
|
||||||
{
|
|
||||||
// Write chunk_count
|
|
||||||
writeU16(&((*i)[3]), chunk_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
return chunks;
|
return chunks;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue