I'm currently working on an client-server application. The Server renders images, compresses them by using Nvidias H264 Encoder and sends them to a client. On client side the image is decompressed and displayed. Im using wrapper for both TCP and UDP Berkeley Windows sockets. UDP works just fine, all pictures are displayed properly. When switching to TCP-transmission, there occurs some random distortion. Not the whole time, just in some occasions. The distorion usually looks like this:
http://de.tinypic.com/r/14sedf6/8
It stays for ~5 to 10 frames (60fps). The site http://www.onsip.com/about-voip/sip/udp-versus-tcp-for-voip states, that using TCP for audio transmission leads to some kind of jittering that is "unacceptable [...] for the end user.". Do you think jittering is the reason for the observed image distortion or any other clues on this?
This is the code of the TCP wrapper. Functions like Bind() are implemented in the super-class and should work, because the udp wrapper uses them too. :)
TcpSocket::TcpSocket()
{
WSADATA wsaData;
int i = WSAStartup(MAKEWORD(2,2), &wsaData);
m_Sock = -1;
}
bool TcpSocket::Create()
{
if ((m_Sock = socket(AF_INET, SOCK_STREAM, 0)) > 0)
return true;
return false;
}
bool TcpSocket::Listen(int que)
{
if (listen(m_Sock, que) == 0)
return true;
return false;
}
bool TcpSocket::Accept(TcpSocket &clientSock)
{
int size = sizeof(struct sockaddr);
clientSock.m_Sock = accept(m_Sock,
(struct sockaddr *) &clientSock.m_SockAddr, (socklen_t *) &size);
if (clientSock.m_Sock == -1)
cout << "accept failed: " << WSAGetLastError() << endl;
WSACleanup();
return false;
return true;
}
bool TcpSocket::Connect(string address, int port)
{
struct in_addr *addr_ptr;
struct hostent *hostPtr;
string add;
try
{
hostPtr = gethostbyname(address.c_str());
if (hostPtr == NULL)
return false;
// the first address in the list of host addresses
addr_ptr = (struct in_addr *) *hostPtr->h_addr_list;
// changed the address format to the Internet address in standard dot notation
add = inet_ntoa(*addr_ptr);
if (add.c_str() == "")
return false;
} catch (int e)
{
return false;
}
struct sockaddr_in sockAddr;
sockAddr.sin_family = AF_INET;
sockAddr.sin_port = htons(port);
sockAddr.sin_addr.s_addr = inet_addr(add.c_str());
if (connect(m_Sock, (struct sockaddr *) &sockAddr, sizeof(struct sockaddr))== 0)
return true;
return false;
}
int TcpSocket::Receive(char *buff, int buffLen)
{
return recv(m_Sock, buff, buffLen, 0);
}
int TcpSocket::Send(const char *buff, int len)
{
return send(m_Sock, buff, len, 0);
}
Thank you very much for any help, clues or suggestions!
Edit1: I get the packets on client side like following:
//This is the TCP read() call which should block until something is received
int i = server->Receive(serverMessage, 100000);
//Passing the received buffer to decoder. sizeof(UINT8) is because of an identifyer which kind of package is received,
//sizeof(int) is the length of the actual buffer
m_decoder->parseData((const unsigned char*)(serverMessage + sizeof(UINT8) + sizeof(int)), size);