结合使用SQLAlchemy yield_per和group_by

时间:2018-08-24 16:20:55

标签: python sqlite sqlalchemy

我有一个。 SQLAlchemy数据库表跨24小时,每小时最多1,000,000行。下面的示例表。

from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declatative_base
from sqlalchemy.orm import sessionmaker
from random import choice

import pandas as pd

Base = declarative_base()


class WebsiteData(Base):
    __tablename__ = 'hourly_website_table'

    id = Column(Integer, primary_key=True)
    user = Column(String(600), index=True)
    website = Column(String(600))
    time_secs = Column(Integer, index=True)

class DataBaseManager:

    def __init__(self, db_loc='sqlite:////home/test/database.db'):
        self.engine = create_engine(db_loc, echo=False)
        self.table = WebsiteData

    def get_session(self):
        Session = sessionmaker(bind=self.engine)
        session = Session()
        Base.metadata.create_all(self.engine)
        return session

    def get_db_info(self):
        session = self.get_session()
        rows = session.query(self.table).count()
        session.close()
        return rows

    def df_to_hourly_db(self, table_name, df, time_secs):
        conn = self.engine.raw_connection()
        df['hour'] = time_secs
        query = "INSERT OR REPLACE INTO %s (user,website,time_secs) VALUES (?,?,?)" %\
            table_name
        conn.executemany(query, df[['user', 'website', 'hour']].to_records(index=False))
        conn.commit()
        conn.close()

def create_df(time_secs=0, users=10000, rows_per_user=100):
    user_arr = [("u%d" % i) for i in range(users)] * rows_per_user
    web_arr = [("www.website_%d" % (time_secs + i)) for i in xrange(rows_per_user * users)]
    return pd.DataFrame({'user': user_arr, 'website': web_arr})

DBM = DataBaseManager()

for hour in range(24):
    time_secs = (60 * 24 * 3600) + (hour * 3600)
    df = create_df(time_secs=time_secs, rows_per_user=choice(range(100)))
    DBM.df_to_hourly_db(df, time_secs)

每小时的行数是可变的。为了避免将整个表一次加载到内存中,我想对数据执行group_by(table.time_secs),然后依次流传输每个组。是否可以通过某种方式组合SQLAlchemy的group_byyield_per方法来实现这一目标?我知道yield_per允许您一次产生一定数量的行,但是每次迭代有可能产生不同数量的行吗?如果没有,还有其他类似的方法吗?

0 个答案:

没有答案