标签:
有RTSPServer,当然就要有RTSPClient。
如果按照Server端的架构,想一下Client端各部分的组成可能是这样:
因为要连接RTSP server,所以RTSPClient要有TCP socket。当获取到server端的DESCRIBE后,应建立一个对应于ServerMediaSession的ClientMediaSession。对应每个Track,ClientMediaSession中应建立ClientMediaSubsession。当建立RTP Session时,应分别为所拥有的Track发送SETUP请求连接,在获取回应后,分别为所有的track建立RTP socket,然后请求PLAY,然后开始传输数据。事实是这样吗?只能分析代码了。
testProgs中的OpenRTSP是典型的RTSPClient示例,所以分析它吧。
main()函数在playCommon.cpp文件中。main()的流程比较简单,跟服务端差别不大:建立任务计划对象--建立环境对象--处理用户输入的参数(RTSP地址)--创建RTSPClient实例--发出第一个RTSP请求(可能是OPTIONS也可能是DESCRIBE)--进入Loop。
RTSP的tcp连接是在发送第一个RTSP请求时才建立的,在RTSPClient的那几个发请求的函数sendXXXXXXCommand()中最终都调用sendRequest(),sendRequest()中会跟据情况建立起TCP连接。在建立连接时马上向任务计划中加入处理从这个TCP接收数据的socket handler:RTSPClient::incomingDataHandler()。
下面就是发送RTSP请求,OPTIONS就不必看了,从请求DESCRIBE开始:
- void getSDPDescription(RTSPClient::responseHandler* afterFunc)
- {
- ourRTSPClient->sendDescribeCommand(afterFunc, ourAuthenticator);
- }
- unsigned RTSPClient::sendDescribeCommand(responseHandler* responseHandler,
- Authenticator* authenticator)
- {
- if (authenticator != NULL)
- fCurrentAuthenticator = *authenticator;
- return sendRequest(new RequestRecord(++fCSeq, "DESCRIBE", responseHandler));
- }
参数responseHandler是调用者提供的回调函数,用于在处理完请求的回应后再调用之。并且在这个回调函数中会发出下一个请求--所有的请求都是这样依次发出的。使用回调函数的原因主要是因为socket的发送与接收不是同步进行的。类RequestRecord就代表一个请求,它不但保存了RTSP请求相关的信息,而且保存了请求完成后的回调函数--就是responseHandler。有些请求发出时还没建立tcp连接,不能立即发送,则加入fRequestsAwaitingConnection队列;有些发出后要等待Server端的回应,就加入fRequestsAwaitingResponse队列,当收到回应后再从队列中把它取出。
由于RTSPClient::sendRequest()太复杂,就不列其代码了,其无非是建立起RTSP请求字符串然后用TCP socket发送之。
现在看一下收到DESCRIBE的回应后如何处理它。理论上是跟据媒体信息建立起MediaSession了,看看是不是这样:
- void continueAfterDESCRIBE(RTSPClient*, int resultCode, char* resultString)
- {
- char* sdpDescription = resultString;
-
-
- session = MediaSession::createNew(*env, sdpDescription);
- delete[] sdpDescription;
-
-
- MediaSubsessionIterator iter(*session);
- MediaSubsession *subsession;
- Boolean madeProgress = False;
- char const* singleMediumToTest = singleMedium;
-
- while ((subsession = iter.next()) != NULL) {
-
- if (subsession->initiate(simpleRTPoffsetArg)) {
- madeProgress = True;
- if (subsession->rtpSource() != NULL) {
-
-
-
- unsigned const thresh = 1000000;
- subsession->rtpSource()->setPacketReorderingThresholdTime(thresh);
-
-
-
-
-
- int socketNum = subsession->rtpSource()->RTPgs()->socketNum();
- unsigned curBufferSize = getReceiveBufferSize(*env,socketNum);
- if (socketInputBufferSize > 0 || fileSinkBufferSize > curBufferSize) {
- unsigned newBufferSize = socketInputBufferSize > 0 ?
- socketInputBufferSize : fileSinkBufferSize;
- newBufferSize = setReceiveBufferTo(*env, socketNum, newBufferSize);
- if (socketInputBufferSize > 0) {
- *env
- << "Changed socket receive buffer size for the \""
- << subsession->mediumName() << "/"
- << subsession->codecName()
- << "\" subsession from " << curBufferSize
- << " to " << newBufferSize << " bytes\n";
- }
- }
- }
- }
- }
- if (!madeProgress)
- shutdown();
-
-
-
- setupStreams();
- }
此函数被删掉很多枝叶,所以发现与原版不同请不要惊掉大牙。
的确在DESCRIBE回应后建立起了MediaSession,而且我们发现Client端的MediaSession不叫ClientMediaSesson,SubSession亦不是。我现在很想看看MediaSession与MediaSubsession的建立过程:
- MediaSession* MediaSession::createNew(UsageEnvironment& env,char const* sdpDescription)
- {
- MediaSession* newSession = new MediaSession(env);
- if (newSession != NULL) {
- if (!newSession->initializeWithSDP(sdpDescription)) {
- delete newSession;
- return NULL;
- }
- }
-
- return newSession;
- }
我可以告诉你,MediaSession的构造函数没什么可看的,那么就来看initializeWithSDP():
内容太多,不必看了,我大体说说吧:就是处理SDP,跟据每一行来初始化一些变量。当遇到"m="行时,就建立一个MediaSubsession,然后再处理这一行之下,下一个"m="行之上的行们,用这些参数初始化MediaSubsession的变量。循环往复,直到尽头。然而这其中并没有建立RTP socket。我们发现在continueAfterDESCRIBE()中,创建MediaSession之后又调用了subsession->initiate(simpleRTPoffsetArg),那么socket是不是在它里面创建的呢?look:
- Boolean MediaSubsession::initiate(int useSpecialRTPoffset)
- {
- if (fReadSource != NULL)
- return True;
-
- do {
- if (fCodecName == NULL) {
- env().setResultMsg("Codec is unspecified");
- break;
- }
-
-
-
-
- struct in_addr tempAddr;
- tempAddr.s_addr = connectionEndpointAddress();
-
-
- if (fClientPortNum != 0) {
-
-
- fClientPortNum = fClientPortNum & ~1;
- if (isSSM()) {
- fRTPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr,
- fClientPortNum);
- } else {
- fRTPSocket = new Groupsock(env(), tempAddr, fClientPortNum,
- 255);
- }
- if (fRTPSocket == NULL) {
- env().setResultMsg("Failed to create RTP socket");
- break;
- }
-
-
- portNumBits const rtcpPortNum = fClientPortNum | 1;
- if (isSSM()) {
- fRTCPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr,
- rtcpPortNum);
- } else {
- fRTCPSocket = new Groupsock(env(), tempAddr, rtcpPortNum, 255);
- }
- if (fRTCPSocket == NULL) {
- char tmpBuf[100];
- sprintf(tmpBuf, "Failed to create RTCP socket (port %d)",
- rtcpPortNum);
- env().setResultMsg(tmpBuf);
- break;
- }
- } else {
-
-
-
-
-
-
- HashTable* socketHashTable = HashTable::create(ONE_WORD_HASH_KEYS);
- if (socketHashTable == NULL)
- break;
- Boolean success = False;
- NoReuse dummy;
-
- while (1) {
-
- if (isSSM()) {
- fRTPSocket = new Groupsock(env(), tempAddr,
- fSourceFilterAddr, 0);
- } else {
- fRTPSocket = new Groupsock(env(), tempAddr, 0, 255);
- }
- if (fRTPSocket == NULL) {
- env().setResultMsg(
- "MediaSession::initiate(): unable to create RTP and RTCP sockets");
- break;
- }
-
-
- Port clientPort(0);
- if (!getSourcePort(env(), fRTPSocket->socketNum(),
- clientPort)) {
- break;
- }
- fClientPortNum = ntohs(clientPort.num());
- if ((fClientPortNum & 1) != 0) {
-
- unsigned key = (unsigned) fClientPortNum;
- Groupsock* existing = (Groupsock*) socketHashTable->Add(
- (char const*) key, fRTPSocket);
- delete existing;
- continue;
- }
-
-
- portNumBits rtcpPortNum = fClientPortNum | 1;
- if (isSSM()) {
- fRTCPSocket = new Groupsock(env(), tempAddr,
- fSourceFilterAddr, rtcpPortNum);
- } else {
- fRTCPSocket = new Groupsock(env(), tempAddr, rtcpPortNum,
- 255);
- }
- if (fRTCPSocket != NULL && fRTCPSocket->socketNum() >= 0) {
-
- success = True;
- break;
- } else {
-
- delete fRTCPSocket;
-
-
- unsigned key = (unsigned) fClientPortNum;
- Groupsock* existing = (Groupsock*) socketHashTable->Add(
- (char const*) key, fRTPSocket);
- delete existing;
- continue;
- }
- }
-
-
- Groupsock* oldGS;
- while ((oldGS = (Groupsock*) socketHashTable->RemoveNext()) != NULL) {
- delete oldGS;
- }
- delete socketHashTable;
-
- if (!success)
- break;
- }
-
-
-
- unsigned rtpBufSize = fBandwidth * 25 / 2;
- if (rtpBufSize < 50 * 1024)
- rtpBufSize = 50 * 1024;
- increaseReceiveBufferTo(env(), fRTPSocket->socketNum(), rtpBufSize);
-
-
- if (isSSM()) {
-
- fRTCPSocket->changeDestinationParameters(fSourceFilterAddr, 0, ~0);
- }
-
-
-
- if (!createSourceObjects(useSpecialRTPoffset))
- break;
-
- if (fReadSource == NULL) {
- env().setResultMsg("Failed to create read source");
- break;
- }
-
-
- if (fRTPSource != NULL) {
-
-
- unsigned totSessionBandwidth =
- fBandwidth ? fBandwidth + fBandwidth / 20 : 500;
- fRTCPInstance = RTCPInstance::createNew(env(), fRTCPSocket,
- totSessionBandwidth, (unsigned char const*) fParent.CNAME(),
- NULL
- if (fRTCPInstance == NULL) {
- env().setResultMsg("Failed to create RTCP instance");
- break;
- }
- }
-
- return True;
- } while (0);
-
-
- delete fRTPSocket;
- fRTPSocket = NULL;
- delete fRTCPSocket;
- fRTCPSocket = NULL;
- Medium::close(fRTCPInstance);
- fRTCPInstance = NULL;
- Medium::close(fReadSource);
- fReadSource = fRTPSource = NULL;
- fClientPortNum = 0;
- return False;
- }
是的,在其中创建了RTP/RTCP socket并创建了RTPSource,创建RTPSource在函数createSourceObjects()中,看一下:
- Boolean MediaSubsession::createSourceObjects(int useSpecialRTPoffset)
- {
- do {
-
- if (strcmp(fProtocolName, "UDP") == 0) {
-
- fReadSource = BasicUDPSource::createNew(env(), fRTPSocket);
- fRTPSource = NULL;
-
- if (strcmp(fCodecName, "MP2T") == 0) {
- fReadSource = MPEG2TransportStreamFramer::createNew(env(),
- fReadSource);
-
- }
- } else {
-
-
-
-
- Boolean createSimpleRTPSource = False;
- Boolean doNormalMBitRule = False;
- if (strcmp(fCodecName, "QCELP") == 0) {
- fReadSource = QCELPAudioRTPSource::createNew(env(), fRTPSocket,
- fRTPSource, fRTPPayloadFormat, fRTPTimestampFrequency);
-
- } else if (strcmp(fCodecName, "AMR") == 0) {
- fReadSource = AMRAudioRTPSource::createNew(env(), fRTPSocket,
- fRTPSource, fRTPPayloadFormat, 0
- fNumChannels, fOctetalign, fInterleaving,
- fRobustsorting, fCRC);
-
- } else if (strcmp(fCodecName, "AMR-WB") == 0) {
- fReadSource = AMRAudioRTPSource::createNew(env(), fRTPSocket,
- fRTPSource, fRTPPayloadFormat, 1
- fNumChannels, fOctetalign, fInterleaving,
- fRobustsorting, fCRC);
-
- } else if (strcmp(fCodecName, "MPA") == 0) {
- fReadSource = fRTPSource = MPEG1or2AudioRTPSource::createNew(
- env(), fRTPSocket, fRTPPayloadFormat,
- fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "MPA-ROBUST") == 0) {
- fRTPSource = MP3ADURTPSource::createNew(env(), fRTPSocket,
- fRTPPayloadFormat, fRTPTimestampFrequency);
- if (fRTPSource == NULL)
- break;
-
-
- MP3ADUdeinterleaver* deinterleaver = MP3ADUdeinterleaver::createNew(
- env(), fRTPSource);
- if (deinterleaver == NULL)
- break;
-
-
- fReadSource = MP3FromADUSource::createNew(env(), deinterleaver);
- } else if (strcmp(fCodecName, "X-MP3-DRAFT-00") == 0) {
-
-
- fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket,
- fRTPPayloadFormat, fRTPTimestampFrequency,
- "audio/MPA-ROBUST"
- if (fRTPSource == NULL)
- break;
-
-
- fReadSource = MP3FromADUSource::createNew(env(), fRTPSource,
- False
- } else if (strcmp(fCodecName, "MP4A-LATM") == 0) {
- fReadSource = fRTPSource = MPEG4LATMAudioRTPSource::createNew(
- env(), fRTPSocket, fRTPPayloadFormat,
- fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "AC3") == 0
- || strcmp(fCodecName, "EAC3") == 0) {
- fReadSource = fRTPSource = AC3AudioRTPSource::createNew(env(),
- fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "MP4V-ES") == 0) {
- fReadSource = fRTPSource = MPEG4ESVideoRTPSource::createNew(
- env(), fRTPSocket, fRTPPayloadFormat,
- fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "MPEG4-GENERIC") == 0) {
- fReadSource = fRTPSource = MPEG4GenericRTPSource::createNew(
- env(), fRTPSocket, fRTPPayloadFormat,
- fRTPTimestampFrequency, fMediumName, fMode, fSizelength,
- fIndexlength, fIndexdeltalength);
- } else if (strcmp(fCodecName, "MPV") == 0) {
- fReadSource = fRTPSource = MPEG1or2VideoRTPSource::createNew(
- env(), fRTPSocket, fRTPPayloadFormat,
- fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "MP2T") == 0) {
- fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket,
- fRTPPayloadFormat, fRTPTimestampFrequency, "video/MP2T",
- 0, False);
- fReadSource = MPEG2TransportStreamFramer::createNew(env(),
- fRTPSource);
-
- } else if (strcmp(fCodecName, "H261") == 0) {
- fReadSource = fRTPSource = H261VideoRTPSource::createNew(env(),
- fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "H263-1998") == 0
- || strcmp(fCodecName, "H263-2000") == 0) {
- fReadSource = fRTPSource = H263plusVideoRTPSource::createNew(
- env(), fRTPSocket, fRTPPayloadFormat,
- fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "H264") == 0) {
- fReadSource = fRTPSource = H264VideoRTPSource::createNew(env(),
- fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "DV") == 0) {
- fReadSource = fRTPSource = DVVideoRTPSource::createNew(env(),
- fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency);
- } else if (strcmp(fCodecName, "JPEG") == 0) {
- fReadSource = fRTPSource = JPEGVideoRTPSource::createNew(env(),
- fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency,
- videoWidth(), videoHeight());
- } else if (strcmp(fCodecName, "X-QT") == 0
- || strcmp(fCodecName, "X-QUICKTIME") == 0) {
-
-
- char* mimeType = new char[strlen(mediumName())
- + strlen(codecName()) + 2];
- sprintf(mimeType, "%s/%s", mediumName(), codecName());
- fReadSource = fRTPSource = QuickTimeGenericRTPSource::createNew(
- env(), fRTPSocket, fRTPPayloadFormat,
- fRTPTimestampFrequency, mimeType);
- delete[] mimeType;
- } else if (strcmp(fCodecName, "PCMU") == 0
- || strcmp(fCodecName, "GSM") == 0
- || strcmp(fCodecName, "DVI4") == 0
- || strcmp(fCodecName, "PCMA") == 0
- || strcmp(fCodecName, "MP1S") == 0
- || strcmp(fCodecName, "MP2P") == 0
- || strcmp(fCodecName, "L8") == 0
- || strcmp(fCodecName, "L16") == 0
- || strcmp(fCodecName, "L20") == 0
- || strcmp(fCodecName, "L24") == 0
- || strcmp(fCodecName, "G726-16") == 0
- || strcmp(fCodecName, "G726-24") == 0
- || strcmp(fCodecName, "G726-32") == 0
- || strcmp(fCodecName, "G726-40") == 0
- || strcmp(fCodecName, "SPEEX") == 0
- || strcmp(fCodecName, "T140") == 0
- || strcmp(fCodecName, "DAT12") == 0
- ) {
- createSimpleRTPSource = True;
- useSpecialRTPoffset = 0;
- } else if (useSpecialRTPoffset >= 0) {
-
-
- createSimpleRTPSource = True;
- } else {
- env().setResultMsg(
- "RTP payload format unknown or not supported");
- break;
- }
-
- if (createSimpleRTPSource) {
- char* mimeType = new char[strlen(mediumName())
- + strlen(codecName()) + 2];
- sprintf(mimeType, "%s/%s", mediumName(), codecName());
- fReadSource = fRTPSource = SimpleRTPSource::createNew(env(),
- fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency,
- mimeType, (unsigned) useSpecialRTPoffset,
- doNormalMBitRule);
- delete[] mimeType;
- }
- }
-
- return True;
- } while (0);
-
- return False;
- }
可以看到,这个函数里主要是跟据前面分析出的媒体和传输信息建立合适的Source。
socket建立了,Source也创建了,下一步应该是连接Sink,形成一个流。到此为止还未看到Sink的影子,应该是在下一步SETUP中建立,我们看到在continueAfterDESCRIBE()的最后调用了setupStreams(),那么就来探索一下setupStreams():
- void setupStreams()
- {
- static MediaSubsessionIterator* setupIter = NULL;
- if (setupIter == NULL)
- setupIter = new MediaSubsessionIterator(*session);
-
-
- while ((subsession = setupIter->next()) != NULL) {
-
- if (subsession->clientPortNum() == 0)
- continue;
-
-
-
- <span style="white-space:pre"> </span>
- setupSubsession(subsession, streamUsingTCP, continueAfterSETUP);
- return;
- }
-
-
-
- delete setupIter;
- if (!madeProgress)
- shutdown();
-
-
-
-
- if (createReceivers) {
- if (outputQuickTimeFile) {
-
- qtOut = QuickTimeFileSink::createNew(*env, *session, "stdout",
- fileSinkBufferSize, movieWidth, movieHeight, movieFPS,
- packetLossCompensate, syncStreams, generateHintTracks,
- generateMP4Format);
- if (qtOut == NULL) {
- *env << "Failed to create QuickTime file sink for stdout: "
- << env->getResultMsg();
- shutdown();
- }
-
- qtOut->startPlaying(sessionAfterPlaying, NULL);
- } else if (outputAVIFile) {
-
- aviOut = AVIFileSink::createNew(*env, *session, "stdout",
- fileSinkBufferSize, movieWidth, movieHeight, movieFPS,
- packetLossCompensate);
- if (aviOut == NULL) {
- *env << "Failed to create AVI file sink for stdout: "
- << env->getResultMsg();
- shutdown();
- }
-
- aviOut->startPlaying(sessionAfterPlaying, NULL);
- } else {
-
- madeProgress = False;
- MediaSubsessionIterator iter(*session);
- while ((subsession = iter.next()) != NULL) {
- if (subsession->readSource() == NULL)
- continue;
-
-
- char outFileName[1000];
- if (singleMedium == NULL) {
-
-
- static unsigned streamCounter = 0;
- snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d",
- fileNamePrefix, subsession->mediumName(),
- subsession->codecName(), ++streamCounter);
- } else {
- sprintf(outFileName, "stdout");
- }
- FileSink* fileSink;
- if (strcmp(subsession->mediumName(), "audio") == 0
- && (strcmp(subsession->codecName(), "AMR") == 0
- || strcmp(subsession->codecName(), "AMR-WB")
- == 0)) {
-
- fileSink = AMRAudioFileSink::createNew(*env, outFileName,
- fileSinkBufferSize, oneFilePerFrame);
- } else if (strcmp(subsession->mediumName(), "video") == 0
- && (strcmp(subsession->codecName(), "H264") == 0)) {
-
- fileSink = H264VideoFileSink::createNew(*env, outFileName,
- subsession->fmtp_spropparametersets(),
- fileSinkBufferSize, oneFilePerFrame);
- } else {
-
- fileSink = FileSink::createNew(*env, outFileName,
- fileSinkBufferSize, oneFilePerFrame);
- }
- subsession->sink = fileSink;
- if (subsession->sink == NULL) {
- *env << "Failed to create FileSink for \"" << outFileName
- << "\": " << env->getResultMsg() << "\n";
- } else {
- if (singleMedium == NULL) {
- *env << "Created output file: \"" << outFileName
- << "\"\n";
- } else {
- *env << "Outputting data from the \""
- << subsession->mediumName() << "/"
- << subsession->codecName()
- << "\" subsession to ‘stdout‘\n";
- }
-
- if (strcmp(subsession->mediumName(), "video") == 0
- && strcmp(subsession->codecName(), "MP4V-ES") == 0 &&
- subsession->fmtp_config() != NULL) {
-
-
-
- unsigned configLen;
- unsigned char* configData
- = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
- struct timeval timeNow;
- gettimeofday(&timeNow, NULL);
- fileSink->addData(configData, configLen, timeNow);
- delete[] configData;
- }
-
-
- subsession->sink->startPlaying(*(subsession->readSource()),
- subsessionAfterPlaying, subsession);
-
-
-
- if (subsession->rtcpInstance() != NULL) {
- subsession->rtcpInstance()->setByeHandler(
- subsessionByeHandler, subsession);
- }
-
- madeProgress = True;
- }
- }
- if (!madeProgress)
- shutdown();
- }
- }
-
-
- if (duration == 0) {
- if (scale > 0)
- duration = session->playEndTime() - initialSeekTime;
- else if (scale < 0)
- duration = initialSeekTime;
- }
- if (duration < 0)
- duration = 0.0;
-
- endTime = initialSeekTime;
- if (scale > 0) {
- if (duration <= 0)
- endTime = -1.0f;
- else
- endTime = initialSeekTime + duration;
- } else {
- endTime = initialSeekTime - duration;
- if (endTime < 0)
- endTime = 0.0f;
- }
-
-
- startPlayingSession(session, initialSeekTime, endTime, scale,
- continueAfterPLAY);
- }
仔细看看注释,应很容易了解此函数。
转自:http://blog.csdn.net/niu_gao/article/details/6927461
(转)Live555中RTSPClient分析
标签:
原文地址:http://www.cnblogs.com/lihaiping/p/4793741.html