前期准备:
以管理员身份运行cmd,输入net start MongoDB,回车
输入jupyter notebook,回车,等待网页跳出
基础设置:
import pymongo
import charts
client=pymongo.MongoClient('localhost',27017)
local=client['local']
sample=local['sample']
pipeline筛选查看数据:
pipeline = [
{'$match':{'cates':'手机'}},
{'$limit':100}
]
pipeline计数排列功能:
pipeline = [
{'$match':{'$and':[{'pub_date':'2015.12.24'},{'time':3}]}},
{'$group':{'_id':'$price','counts':{'$sum':1}}},
{'$sort' :{'counts':-1}},
{'$limit':10}
]
set函数看一共有几类:
look_list=[]
for i in sample.aggregate(pipeline):
look_list.append(i['look'])
look_index=list(set(look_list))
print(look_index)
计数:
the_cate_list=['二手物品', '家具', '办公用品/设备', '文体/户外/乐器', '笔记本', '台式机/配件', '服装/鞋帽/箱包', '图书/音像/软件', '美容/保健', '设备', '数码产品', '家电', '手机', '母婴/儿童用品', '平板电脑']
look_dic={}
for cate in the_cate_list:
pipeline = [{'$match':{'cates':cate}}]
detailed_list=[]
for i in sample.aggregate(pipeline):
look_list.append(i['look'])
count=len(look_list)
look_index=list(set(look_list))
sample.insert_one({'cate':cate,'look':look_index})
print({'cate':cate,'look':look_index})
pipeline-match,group(包含,切片,排除,平均值):
# {'area':[1,2,3]}
pipeline1 = [
{'$match':{'$and':[{'pub_date':{'$gte':'2015.12.25','$lte':'2015.12.27'}},{'area':{'$all':['朝阳']}}]}}, #列表包含朝阳的
{'$group':{'_id':{'$slice':['$cates',2,1]},'counts':{'$sum':1}}}, #cate为列表,取从左数两个后的那一个,切片法
{'$limit':3}
]
for i in item_info.aggregate(pipeline1):
print(i)
pipeline2 = [
{'$match':{'$and':[{'pub_date':{'$gte':'2015.12.25','$lte':'2015.12.27'}},
{'cates':{'$all':['北京二手手机']}},
{'look':{'$nin':['-']}} #not in ,排除
]}},
{'$group':{'_id':'$look','avg_price':{'$avg':'$price'}}}, #avg求平均值
{'$sort':{'avg_price':-1}} #以group的结果排序
]
取字典里的值:用索引
def data_gen(date1,date2,area,limit):
pipeline1 = [
{'$match':{'$and':[{'pub_date':{'$gte':date1,'$lte':date2}},{'area':{'$all':area}}]}},
{'$group':{'_id':{'$slice':['$cates',2,1]},'counts':{'$sum':1}}},
{'$limit':limit},
{'$sort':{'counts':-1}}
]
for i in item_info.aggregate(pipeline1):
data = {
'name': i['_id'][0], #此处用到了索引
'data': [i['counts']], #同上,但data的固定格式就是一定外面有[]
'type': 'column'
}
yield data