CHANGED TO MIRROR

This commit is contained in:
DerTyp187
2021-10-25 09:20:01 +02:00
parent bd712107b7
commit e509a919b6
611 changed files with 38291 additions and 1216 deletions

View File

@@ -0,0 +1,97 @@
// batching functionality encapsulated into one class.
// -> less complexity
// -> easy to test
//
// IMPORTANT: we use THRESHOLD batching, not MAXED SIZE batching.
// see threshold comments below.
//
// includes timestamp for tick batching.
// -> allows NetworkTransform etc. to use timestamp without including it in
// every single message
using System;
using System.Collections.Generic;
namespace Mirror
{
public class Batcher
{
// batching threshold instead of max size.
// -> small messages are fit into threshold sized batches
// -> messages larger than threshold are single batches
//
// in other words, we fit up to 'threshold' but still allow larger ones
// for two reasons:
// 1.) data races: skipping batching for larger messages would send a
// large spawn message immediately, while others are batched and
// only flushed at the end of the frame
// 2) timestamp batching: if each batch is expected to contain a
// timestamp, then large messages have to be a batch too. otherwise
// they would not contain a timestamp
readonly int threshold;
// TimeStamp header size for those who need it
public const int HeaderSize = sizeof(double);
// batched messages
// IMPORTANT: we queue the serialized messages!
// queueing NetworkMessage would box and allocate!
Queue<PooledNetworkWriter> messages = new Queue<PooledNetworkWriter>();
public Batcher(int threshold)
{
this.threshold = threshold;
}
// add a message for batching
// we allow any sized messages.
// caller needs to make sure they are within max packet size.
public void AddMessage(ArraySegment<byte> message)
{
// put into a (pooled) writer
// -> WriteBytes instead of WriteSegment because the latter
// would add a size header. we want to write directly.
// -> will be returned to pool when making the batch!
// IMPORTANT: NOT adding a size header / msg saves LOTS of bandwidth
PooledNetworkWriter writer = NetworkWriterPool.GetWriter();
writer.WriteBytes(message.Array, message.Offset, message.Count);
messages.Enqueue(writer);
}
// batch as many messages as possible into writer
// returns true if any batch was made.
public bool MakeNextBatch(NetworkWriter writer, double timeStamp)
{
// if we have no messages then there's nothing to do
if (messages.Count == 0)
return false;
// make sure the writer is fresh to avoid uncertain situations
if (writer.Position != 0)
throw new ArgumentException($"MakeNextBatch needs a fresh writer!");
// write timestamp first
// -> double precision for accuracy over long periods of time
writer.WriteDouble(timeStamp);
// do start no matter what
do
{
// add next message no matter what. even if > threshold.
// (we do allow > threshold sized messages as single batch)
PooledNetworkWriter message = messages.Dequeue();
ArraySegment<byte> segment = message.ToArraySegment();
writer.WriteBytes(segment.Array, segment.Offset, segment.Count);
// return the writer to pool
NetworkWriterPool.Recycle(message);
}
// keep going as long as we have more messages,
// AND the next one would fit into threshold.
while (messages.Count > 0 &&
writer.Position + messages.Peek().Position <= threshold);
// we had messages, so a batch was made
return true;
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 0afaaa611a2142d48a07bdd03b68b2b3
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,142 @@
// un-batching functionality encapsulated into one class.
// -> less complexity
// -> easy to test
//
// includes timestamp for tick batching.
// -> allows NetworkTransform etc. to use timestamp without including it in
// every single message
using System;
using System.Collections.Generic;
namespace Mirror
{
public class Unbatcher
{
// supporting adding multiple batches before GetNextMessage is called.
// just in case.
Queue<PooledNetworkWriter> batches = new Queue<PooledNetworkWriter>();
public int BatchesCount => batches.Count;
// NetworkReader is only created once,
// then pointed to the first batch.
NetworkReader reader = new NetworkReader(new byte[0]);
// timestamp that was written into the batch remotely.
// for the batch that our reader is currently pointed at.
double readerRemoteTimeStamp;
// helper function to start reading a batch.
void StartReadingBatch(PooledNetworkWriter batch)
{
// point reader to it
reader.SetBuffer(batch.ToArraySegment());
// read remote timestamp (double)
// -> AddBatch quarantees that we have at least 8 bytes to read
readerRemoteTimeStamp = reader.ReadDouble();
}
// add a new batch.
// returns true if valid.
// returns false if not, in which case the connection should be disconnected.
public bool AddBatch(ArraySegment<byte> batch)
{
// IMPORTANT: ArraySegment is only valid until returning. we copy it!
//
// NOTE: it's not possible to create empty ArraySegments, so we
// don't need to check against that.
// make sure we have at least 8 bytes to read for tick timestamp
if (batch.Count < Batcher.HeaderSize)
return false;
// put into a (pooled) writer
// -> WriteBytes instead of WriteSegment because the latter
// would add a size header. we want to write directly.
// -> will be returned to pool when sending!
PooledNetworkWriter writer = NetworkWriterPool.GetWriter();
writer.WriteBytes(batch.Array, batch.Offset, batch.Count);
// first batch? then point reader there
if (batches.Count == 0)
StartReadingBatch(writer);
// add batch
batches.Enqueue(writer);
//Debug.Log($"Adding Batch {BitConverter.ToString(batch.Array, batch.Offset, batch.Count)} => batches={batches.Count} reader={reader}");
return true;
}
// get next message, unpacked from batch (if any)
// timestamp is the REMOTE time when the batch was created remotely.
public bool GetNextMessage(out NetworkReader message, out double remoteTimeStamp)
{
// getting messages would be easy via
// <<size, message, size, message, ...>>
// but to save A LOT of bandwidth, we use
// <<message, message, ...>
// in other words, we don't know where the current message ends
//
// BUT: it doesn't matter!
// -> we simply return the reader
// * if we have one yet
// * and if there's more to read
// -> the caller can then read one message from it
// -> when the end is reached, we retire the batch!
//
// for example:
// while (GetNextMessage(out message))
// ProcessMessage(message);
//
message = null;
// do nothing if we don't have any batches.
// otherwise the below queue.Dequeue() would throw an
// InvalidOperationException if operating on empty queue.
if (batches.Count == 0)
{
remoteTimeStamp = 0;
return false;
}
// was our reader pointed to anything yet?
if (reader.Length == 0)
{
remoteTimeStamp = 0;
return false;
}
// no more data to read?
if (reader.Remaining == 0)
{
// retire the batch
PooledNetworkWriter writer = batches.Dequeue();
NetworkWriterPool.Recycle(writer);
// do we have another batch?
if (batches.Count > 0)
{
// point reader to the next batch.
// we'll return the reader below.
PooledNetworkWriter next = batches.Peek();
StartReadingBatch(next);
}
// otherwise there's nothing more to read
else
{
remoteTimeStamp = 0;
return false;
}
}
// use the current batch's remote timestamp
// AFTER potentially moving to the next batch ABOVE!
remoteTimeStamp = readerRemoteTimeStamp;
// if we got here, then we have more data to read.
message = reader;
return true;
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 328562d71e1c45c58581b958845aa7a4
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
userData:
assetBundleName:
assetBundleVariant: