跟踪多个服务器上的客户

时间:2016-09-04 03:04:07

标签: multithreading sockets concurrency client-server load-balancing

过去几个月我一直在设计聊天系统以获得乐趣,而且我在负载平衡方面找不到多少......

到目前为止,我的架构由一个WebSocket服务器组成,虽然为了简单起见,websocket层将被排除在本主题之外;用于存储用户帐户和聊天信息的MySQL数据库;一个在nginx上运行的面向php的网站。

我考虑过使用memcached来保存聊天的列表,并引用每个连接的客户端,但我不确定如何处理消息/队列系统在发送消息或用户已加入/退出(Redis?)时告诉其他连接的客户端。

最终,这个并发问题还有其他潜在的缺陷,即我应该从套接字层抽象处理层,还是在处理层,不担心其他客户端在处理过程中是否断开连接?我应该让套接字层处理吗?

使用我的memcached示例,我可以在该ramdisk中存储所有相关的客户端信息,并在我认为合适的情况下请求/更新它。这会是可接受的方式吗?

最好,我想阅读一些材料并找出如何自己完成这项工作,而不仅仅是从这里得到答案,我希望能够将此作为未来可扩展性的一课如果我再次设计这样的东西。

1 个答案:

答案 0 :(得分:0)

这是我制作的测试服务器

import multiprocessing
import socket, select
import redis, json

'''
'' The basic idea of this server is to have cores-1 processes running to munch data
'' and one "master" process handling all of the client connections and what not.
''
'' Scalability is simple, treat any other process as another server and there will be
'' no cross-coding required '''

'''
'' Sending data
''  master_pipe.send( {"__id__": "SEND_DATA", "fileno": "417", "data": "3025561204"} )
''
'' Closing a socket
''  master_pipe.send( {"__id__": "CLOSE_SOCKET", "fileno": 417} ) '''
def Worker(worker_index, worker_queue, master_pipe):
        memory = redis.StrictRedis(host = "127.0.0.1", port = 6379)

    #try:
        for client_id, *args in iter(worker_queue.get, None):
            client = json.loads(memory.get("server0:client-" + str(client_id)).decode("utf-8"))
            if not client:
                continue

            #print("NOPE", args)

            if args[0][:5] == "join:":
                client["chat"] = str(args[0][5:])
                memory.set("server0:client-" + str(client_id), json.dumps(client).encode("utf-8", "ignore"))
                memory.lpush("chat:" + str(args[0][5:]), client["__id__"])

            elif args[0][:7] == "online:":  
                #print(client)
                if "chat" in client:
                    print(memory.lrange("chat:" + client["chat"], 0, -1))

    #except Exception as e:
    #   #print(e)


def Master(master_pipe, workers):
    memory = redis.Redis(host = "127.0.0.1", port = 6379)
    memory.delete("clients")

    server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
    server.bind(("0.0.0.0", 7777))
    server.listen(socket.SOMAXCONN)

    epoll = select.epoll()
    epoll.register(server.fileno(), select.EPOLLIN)
    epoll.register(master_pipe.fileno(), select.EPOLLIN)

    sockets, i = {}, 0
    while True:
        for fileno, e_bits in epoll.poll():
            try:
                if fileno == server.fileno():
                    sock, (addr, port) = server.accept()
                    sockets[sock.fileno()] = sock
                    epoll.register(sock.fileno(), select.EPOLLIN | select.EPOLLHUP)

                    client_object = {"__id__": sock.fileno()}
                    memory.set("server0:client-" + str(sock.fileno()), json.dumps(client_object).encode("utf-8", "ignore"))

                elif fileno == master_pipe.fileno():
                    print(master_pipe.recv())

                elif e_bits & select.EPOLLIN:
                    recv = sockets[fileno].recv(1024).decode("utf-8", "ignore").rstrip("\r\n")
                    if not recv:
                        raise socket.error
                    if recv == "asdasdasd":
                        print(len(sockets))
                    #print(recv)
                    workers[i % len(workers)].put( (fileno, recv, ) )

            except socket.error:
                sockets[fileno].close()
                del sockets[fileno]

                client = json.loads(memory.get("server0:client-" + str(fileno)).decode("utf-8"))
                #print(client)

                if client:
                    if "chat" in client:
                        memory.lrem("chat:" + client["chat"], client["__id__"])

                    memory.delete("server0:client-" + str(fileno))

            finally:
                i += 1


if __name__ == "__main__":
    workers = []
    master_pipe, worker_pipe = multiprocessing.Pipe()

    for i in range( max(1, multiprocessing.cpu_count() - 1) ):
        workers.append(multiprocessing.Queue())

        p = multiprocessing.Process(target = Worker, args = (i, workers[-1], worker_pipe, ))
        p.daemon = True
        p.start()

    Master(master_pipe, workers)