def get_page(url):
try:
import urllib2
req = urllib2.Request(url)
return urllib2.urlopen(req).read()
except:
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
def compute_ranks(graph):
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for node in graph:
if page in graph[node]:
newrank = newrank + d * (ranks[node] / len(graph[node]))
print page,newrank
newranks[page] = newrank
ranks = newranks
return ranks
def quick_sort(url_lst,ranks):
url_sorted_worse=[]
url_sorted_better=[]
if len(url_lst)<=1:
return url_lst
pivot=url_lst[0]
for url in url_lst[1:]:
if ranks[url]<=ranks[pivot]:
url_sorted_worse.append(url)
else:
url_sorted_better.append(url)
return quick_sort(url_sorted_better,ranks)+[pivot]+quick_sort(url_sorted_worse,ranks)
def ordered_search(index, ranks, keyword):
if keyword in index:
all_urls=index[keyword]
else:
return None
return quick_sort(all_urls,ranks)
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
print ranks
print ordered_search(index, ranks, 'Chef</h1>')
# Chef</h1>
搜索
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
推荐阅读更多精彩内容
- 轻度集成,侵入性低,配合searchcore引擎一句代码集成显示文字高亮效果,匹配文字显示高亮逻辑参考微信,满足大...
- 依赖 建立索引 本次增加了Float、Int类型的域 搜索排序 范围搜索 查询score范围在1~5之间的文档,对...
- 1、构造数据 定义 mapping 查看 mapping 插入数据 查看数据 2、前缀搜索,搜索以 index 开...
- 如果想构建一个图像搜索引擎,那如何对图像进行搜索呢?一种方式是依赖于与图像相关联的标签、关键字和文字描述,这种称为...