namespace AppCom
{
class CAppComSession : public boost::enable_shared_from_this<CAppComSession>
{
public:
CAppComSession(boost::asio::io_service &io_service) : m_socket(io_service)
{
m_bRunning = true;
}
~CAppComSession()
{
// m_thread->join();
// m_socket.close();
}
void Start()
{
static boost::asio::ip::tcp::no_delay option(true);
m_socket.set_option(option);
m_thread.reset(new boost::thread(boost::bind(&CAppComSession::StartThread, this)));
m_thread->detach();
}
void StartThread()
{
do
{
boost::system::error_code ec;
char szRecvBuf[10240] = { 0 };
int nReadLen = m_socket.read_some(boost::asio::buffer(szRecvBuf), ec);
if (ec)
{
LOG4CPLUS_ERROR(LOGGERTAG, "come across boost asio read error:" << ec);
break;
}
LOG4CPLUS_INFO(LOGGERTAG, "recv app message:" << szRecvBuf);
std::string strResponse;
int nRet = ProcessAPPMsg(szRecvBuf, strResponse);
if (0 != nRet) break;
int nLen = boost::asio::write(m_socket, boost::asio::buffer(strResponse, strResponse.length()), ec);
LOG4CPLUS_INFO(LOGGERTAG, "send app response message:" << strResponse);
} while (0);
m_bRunning = false;
//m_socket.close();
}
boost::asio::ip::tcp::socket &GetSocket()
{
return m_socket;
}
bool GetCurThreadRunningStatus()
{
return m_bRunning;
}
private:
boost::asio::ip::tcp::socket m_socket;
bool m_bRunning;
boost::shared_ptr<boost::thread> m_thread;
};
typedef boost::shared_ptr<CAppComSession> CPtrSession;
class CAppComServer
{
public:
CAppComServer(boost::asio::io_service &io_service, boost::asio::ip::tcp::endpoint &endpoint)
:m_ioService(io_service), m_acceptor(io_service, endpoint)
{
CPtrSession newSession(new CAppComSession(io_service));
m_vecThreadInstance.push_back(newSession);
m_acceptor.async_accept(newSession->GetSocket(),
boost::bind(&CAppComServer::HandleAccept,
this,
newSession,
boost::asio::placeholders::error));
}
void HandleAccept(CPtrSession newSession, const boost::system::error_code &error)
{
if (error) return;
newSession->Start();
//ClearHasEndConnection();
CPtrSession createNewSession(new CAppComSession(m_ioService));
//m_vecThreadInstance.push_back(createNewSession);
m_acceptor.async_accept(createNewSession->GetSocket(),
boost::bind(&CAppComServer::HandleAccept,
this,
createNewSession,
boost::asio::placeholders::error));
}
void ClearHasEndConnection()
{
std::vector<CPtrSession>::iterator iter;
iter = m_vecThreadInstance.begin();
while (iter != m_vecThreadInstance.end())
{
if (!(*iter)->GetCurThreadRunningStatus())
{
iter->reset();
m_vecThreadInstance.erase(iter);
break;
}
iter++;
}
}
void run()
{
m_ioService.run();
}
private:
boost::asio::io_service &m_ioService;
std::vector<CPtrSession> m_vecThreadInstance;
boost::asio::ip::tcp::acceptor m_acceptor;
};
void ListenAppComFunc();
int StartAppComService();
}
说明
这里跟之前的asio 异步服务器是有很大的区别
1)套接字可以不用关闭,其次也不需要担心线程的返回问题
2)不再需要保存请求处理的实例,自然也就没有管理所有实例的必要性,至于什么时候退出,服务器的接收线程不需要考虑
错误提醒:
在实际的应用环境中,在读数据m_socket.read_some(boost::asio::buffer(szRecvBuf), ec)的时候,会产生套接字错误,返回10035,代表含义是在一个非套接字上尝试了一个操作。
出现原因分析:
当线程分离的时候,accept函数开始等待下一个请求,createNewSession由于是智能指针,跳出了函数,开始调用析构函数进行对象的清理,这个时候m_socket已经被清理掉了,很多类的成员变量已经无法被使用了,m_vecThreadInstance.push_back(createNewSession);却能够保存对象的实例,不至于马上调用析构函数,如果调用该函数的话,就必须自己定时清理已经服务完毕的对象
本文转自fengyuzaitu 51CTO博客,原文链接:http://blog.51cto.com/fengyuzaitu/2044782,如需转载请自行联系原作者