1
0
Fork 0
mirror of https://github.com/YunoHost-Apps/searx_ynh.git synced 2024-09-03 20:16:30 +02:00

update version 0.5

This commit is contained in:
Beudbeud 2014-12-13 13:56:02 +01:00
parent 5bcc6bcd65
commit ecf52785ae
53 changed files with 757 additions and 268 deletions

View file

@ -27,3 +27,5 @@ generally made searx better:
- Martin Zimmermann - Martin Zimmermann
- @courgette - @courgette
- @kernc - @kernc
- @Cqoicebordel
- @Reventl0v

View file

@ -20,6 +20,7 @@ $(python):
tests: .installed.cfg tests: .installed.cfg
@bin/test @bin/test
@grunt test --gruntfile searx/static/oscar/gruntfile.js
robot: .installed.cfg robot: .installed.cfg
@bin/robot @bin/robot
@ -48,6 +49,9 @@ styles:
@lessc -x searx/static/oscar/less/bootstrap/bootstrap.less > searx/static/oscar/css/bootstrap.min.css @lessc -x searx/static/oscar/less/bootstrap/bootstrap.less > searx/static/oscar/css/bootstrap.min.css
@lessc -x searx/static/oscar/less/oscar/oscar.less > searx/static/oscar/css/oscar.min.css @lessc -x searx/static/oscar/less/oscar/oscar.less > searx/static/oscar/css/oscar.min.css
grunt:
@grunt --gruntfile searx/static/oscar/gruntfile.js
locales: locales:
@pybabel compile -d searx/translations @pybabel compile -d searx/translations

View file

@ -1,6 +1,6 @@
flask flask
flask-babel flask-babel
grequests requests
lxml lxml
pyyaml pyyaml
python-dateutil python-dateutil

View file

@ -1,7 +1,8 @@
## Bing (Web) ## Bing (Web)
# #
# @website https://www.bing.com # @website https://www.bing.com
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month # @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
# #
# @using-api no (because of query limit) # @using-api no (because of query limit)
# @results HTML (using search portal) # @results HTML (using search portal)

View file

@ -1,17 +1,19 @@
## Bing (Images) ## Bing (Images)
# #
# @website https://www.bing.com/images # @website https://www.bing.com/images
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month # @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
# #
# @using-api no (because of query limit) # @using-api no (because of query limit)
# @results HTML (using search portal) # @results HTML (using search portal)
# @stable no (HTML can change) # @stable no (HTML can change)
# @parse url, title, img_src # @parse url, title, img_src
# #
# @todo currently there are up to 35 images receive per page, because bing does not parse count=10. limited response to 10 images # @todo currently there are up to 35 images receive per page,
# because bing does not parse count=10.
# limited response to 10 images
from urllib import urlencode from urllib import urlencode
from cgi import escape
from lxml import html from lxml import html
from yaml import load from yaml import load
import re import re
@ -51,14 +53,14 @@ def response(resp):
dom = html.fromstring(resp.content) dom = html.fromstring(resp.content)
# init regex for yaml-parsing # init regex for yaml-parsing
p = re.compile( '({|,)([a-z]+):(")') p = re.compile('({|,)([a-z]+):(")')
# parse results # parse results
for result in dom.xpath('//div[@class="dg_u"]'): for result in dom.xpath('//div[@class="dg_u"]'):
link = result.xpath('./a')[0] link = result.xpath('./a')[0]
# parse yaml-data (it is required to add a space, to make it parsable) # parse yaml-data (it is required to add a space, to make it parsable)
yaml_data = load(p.sub( r'\1\2: \3', link.attrib.get('m'))) yaml_data = load(p.sub(r'\1\2: \3', link.attrib.get('m')))
title = link.attrib.get('t1') title = link.attrib.get('t1')
#url = 'http://' + link.attrib.get('t3') #url = 'http://' + link.attrib.get('t3')

View file

@ -1,7 +1,8 @@
## Bing (News) ## Bing (News)
# #
# @website https://www.bing.com/news # @website https://www.bing.com/news
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month # @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
# #
# @using-api no (because of query limit) # @using-api no (because of query limit)
# @results HTML (using search portal) # @results HTML (using search portal)
@ -57,12 +58,12 @@ def response(resp):
url = link.attrib.get('href') url = link.attrib.get('href')
title = ' '.join(link.xpath('.//text()')) title = ' '.join(link.xpath('.//text()'))
contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()') contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
if contentXPath != None: if contentXPath is not None:
content = escape(' '.join(contentXPath)) content = escape(' '.join(contentXPath))
# parse publishedDate # parse publishedDate
publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()') publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
if publishedDateXPath != None: if publishedDateXPath is not None:
publishedDate = escape(' '.join(publishedDateXPath)) publishedDate = escape(' '.join(publishedDateXPath))
if re.match("^[0-9]+ minute(s|) ago$", publishedDate): if re.match("^[0-9]+ minute(s|) ago$", publishedDate):

View file

@ -55,6 +55,6 @@ def response(resp):
resp.search_params['to'].lower() resp.search_params['to'].lower()
) )
results.append({'answer' : answer, 'url': url}) results.append({'answer': answer, 'url': url})
return results return results

View file

@ -12,7 +12,6 @@
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads
from lxml import html
# engine dependent config # engine dependent config
categories = ['videos'] categories = ['videos']

View file

@ -1,7 +1,8 @@
## DuckDuckGo (Web) ## DuckDuckGo (Web)
# #
# @website https://duckduckgo.com/ # @website https://duckduckgo.com/
# @provide-api yes (https://duckduckgo.com/api), but not all results from search-site # @provide-api yes (https://duckduckgo.com/api),
# but not all results from search-site
# #
# @using-api no # @using-api no
# @results HTML (using search portal) # @results HTML (using search portal)
@ -9,7 +10,8 @@
# @parse url, title, content # @parse url, title, content
# #
# @todo rewrite to api # @todo rewrite to api
# @todo language support (the current used site does not support language-change) # @todo language support
# (the current used site does not support language-change)
from urllib import urlencode from urllib import urlencode
from lxml.html import fromstring from lxml.html import fromstring
@ -37,7 +39,7 @@ def request(query, params):
if params['language'] == 'all': if params['language'] == 'all':
locale = 'en-us' locale = 'en-us'
else: else:
locale = params['language'].replace('_','-').lower() locale = params['language'].replace('_', '-').lower()
params['url'] = url.format( params['url'] = url.format(
query=urlencode({'q': query, 'kl': locale}), query=urlencode({'q': query, 'kl': locale}),

View file

@ -3,21 +3,25 @@ from urllib import urlencode
from lxml import html from lxml import html
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
url = 'https://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1&d=1' url = 'https://api.duckduckgo.com/'\
+ '?{query}&format=json&pretty=0&no_redirect=1&d=1'
def result_to_text(url, text, htmlResult): def result_to_text(url, text, htmlResult):
# TODO : remove result ending with "Meaning" or "Category" # TODO : remove result ending with "Meaning" or "Category"
dom = html.fromstring(htmlResult) dom = html.fromstring(htmlResult)
a = dom.xpath('//a') a = dom.xpath('//a')
if len(a)>=1: if len(a) >= 1:
return extract_text(a[0]) return extract_text(a[0])
else: else:
return text return text
def html_to_text(htmlFragment): def html_to_text(htmlFragment):
dom = html.fromstring(htmlFragment) dom = html.fromstring(htmlFragment)
return extract_text(dom) return extract_text(dom)
def request(query, params): def request(query, params):
# TODO add kl={locale} # TODO add kl={locale}
params['url'] = url.format(query=urlencode({'q': query})) params['url'] = url.format(query=urlencode({'q': query}))
@ -38,7 +42,7 @@ def response(resp):
# add answer if there is one # add answer if there is one
answer = search_res.get('Answer', '') answer = search_res.get('Answer', '')
if answer != '': if answer != '':
results.append({ 'answer' : html_to_text(answer) }) results.append({'answer': html_to_text(answer)})
# add infobox # add infobox
if 'Definition' in search_res: if 'Definition' in search_res:
@ -47,7 +51,6 @@ def response(resp):
if 'Abstract' in search_res: if 'Abstract' in search_res:
content = content + search_res.get('Abstract', '') content = content + search_res.get('Abstract', '')
# image # image
image = search_res.get('Image', '') image = search_res.get('Image', '')
image = None if image == '' else image image = None if image == '' else image
@ -55,29 +58,35 @@ def response(resp):
# attributes # attributes
if 'Infobox' in search_res: if 'Infobox' in search_res:
infobox = search_res.get('Infobox', None) infobox = search_res.get('Infobox', None)
if 'content' in infobox: if 'content' in infobox:
for info in infobox.get('content'): for info in infobox.get('content'):
attributes.append({'label': info.get('label'), 'value': info.get('value')}) attributes.append({'label': info.get('label'),
'value': info.get('value')})
# urls # urls
for ddg_result in search_res.get('Results', []): for ddg_result in search_res.get('Results', []):
if 'FirstURL' in ddg_result: if 'FirstURL' in ddg_result:
firstURL = ddg_result.get('FirstURL', '') firstURL = ddg_result.get('FirstURL', '')
text = ddg_result.get('Text', '') text = ddg_result.get('Text', '')
urls.append({'title':text, 'url':firstURL}) urls.append({'title': text, 'url': firstURL})
results.append({'title':heading, 'url': firstURL}) results.append({'title': heading, 'url': firstURL})
# related topics # related topics
for ddg_result in search_res.get('RelatedTopics', None): for ddg_result in search_res.get('RelatedTopics', None):
if 'FirstURL' in ddg_result: if 'FirstURL' in ddg_result:
suggestion = result_to_text(ddg_result.get('FirstURL', None), ddg_result.get('Text', None), ddg_result.get('Result', None)) suggestion = result_to_text(ddg_result.get('FirstURL', None),
ddg_result.get('Text', None),
ddg_result.get('Result', None))
if suggestion != heading: if suggestion != heading:
results.append({'suggestion': suggestion}) results.append({'suggestion': suggestion})
elif 'Topics' in ddg_result: elif 'Topics' in ddg_result:
suggestions = [] suggestions = []
relatedTopics.append({ 'name' : ddg_result.get('Name', ''), 'suggestions': suggestions }) relatedTopics.append({'name': ddg_result.get('Name', ''),
'suggestions': suggestions})
for topic_result in ddg_result.get('Topics', []): for topic_result in ddg_result.get('Topics', []):
suggestion = result_to_text(topic_result.get('FirstURL', None), topic_result.get('Text', None), topic_result.get('Result', None)) suggestion = result_to_text(topic_result.get('FirstURL', None),
topic_result.get('Text', None),
topic_result.get('Result', None))
if suggestion != heading: if suggestion != heading:
suggestions.append(suggestion) suggestions.append(suggestion)
@ -86,21 +95,26 @@ def response(resp):
if abstractURL != '': if abstractURL != '':
# add as result ? problem always in english # add as result ? problem always in english
infobox_id = abstractURL infobox_id = abstractURL
urls.append({'title': search_res.get('AbstractSource'), 'url': abstractURL}) urls.append({'title': search_res.get('AbstractSource'),
'url': abstractURL})
# definition # definition
definitionURL = search_res.get('DefinitionURL', '') definitionURL = search_res.get('DefinitionURL', '')
if definitionURL != '': if definitionURL != '':
# add as result ? as answer ? problem always in english # add as result ? as answer ? problem always in english
infobox_id = definitionURL infobox_id = definitionURL
urls.append({'title': search_res.get('DefinitionSource'), 'url': definitionURL}) urls.append({'title': search_res.get('DefinitionSource'),
'url': definitionURL})
# entity # entity
entity = search_res.get('Entity', None) entity = search_res.get('Entity', None)
# TODO continent / country / department / location / waterfall / mountain range : link to map search, get weather, near by locations # TODO continent / country / department / location / waterfall /
# mountain range :
# link to map search, get weather, near by locations
# TODO musician : link to music search # TODO musician : link to music search
# TODO concert tour : ?? # TODO concert tour : ??
# TODO film / actor / television / media franchise : links to IMDB / rottentomatoes (or scrap result) # TODO film / actor / television / media franchise :
# links to IMDB / rottentomatoes (or scrap result)
# TODO music : link tu musicbrainz / last.fm # TODO music : link tu musicbrainz / last.fm
# TODO book : ?? # TODO book : ??
# TODO artist / playwright : ?? # TODO artist / playwright : ??
@ -114,24 +128,25 @@ def response(resp):
# TODO programming language : ?? # TODO programming language : ??
# TODO file format : ?? # TODO file format : ??
if len(heading)>0: if len(heading) > 0:
# TODO get infobox.meta.value where .label='article_title' # TODO get infobox.meta.value where .label='article_title'
if image==None and len(attributes)==0 and len(urls)==1 and len(relatedTopics)==0 and len(content)==0: if image is None and len(attributes) == 0 and len(urls) == 1 and\
len(relatedTopics) == 0 and len(content) == 0:
results.append({ results.append({
'url': urls[0]['url'], 'url': urls[0]['url'],
'title': heading, 'title': heading,
'content': content 'content': content
}) })
else: else:
results.append({ results.append({
'infobox': heading, 'infobox': heading,
'id': infobox_id, 'id': infobox_id,
'entity': entity, 'entity': entity,
'content': content, 'content': content,
'img_src' : image, 'img_src': image,
'attributes': attributes, 'attributes': attributes,
'urls': urls, 'urls': urls,
'relatedTopics': relatedTopics 'relatedTopics': relatedTopics
}) })
return results return results

View file

@ -27,6 +27,7 @@ search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={lan
search_category = {'general': 'web', search_category = {'general': 'web',
'news': 'news'} 'news': 'news'}
# do search-request # do search-request
def request(query, params): def request(query, params):
offset = (params['pageno']-1) * number_of_results + 1 offset = (params['pageno']-1) * number_of_results + 1
@ -48,7 +49,7 @@ def request(query, params):
query=urlencode({'q': query}), query=urlencode({'q': query}),
language=language, language=language,
categorie=categorie, categorie=categorie,
api_key=api_key ) api_key=api_key)
# using searx User-Agent # using searx User-Agent
params['headers']['User-Agent'] = searx_useragent() params['headers']['User-Agent'] = searx_useragent()

View file

@ -1,4 +1,4 @@
## Google (Web) # Google (Web)
# #
# @website https://www.google.com # @website https://www.google.com
# @provide-api yes (https://developers.google.com/custom-search/) # @provide-api yes (https://developers.google.com/custom-search/)
@ -9,7 +9,7 @@
# @parse url, title, content, suggestion # @parse url, title, content, suggestion
from urllib import urlencode from urllib import urlencode
from urlparse import unquote,urlparse,parse_qsl from urlparse import urlparse, parse_qsl
from lxml import html from lxml import html
from searx.engines.xpath import extract_text, extract_url from searx.engines.xpath import extract_text, extract_url
@ -23,10 +23,13 @@ google_hostname = 'www.google.com'
search_path = '/search' search_path = '/search'
redirect_path = '/url' redirect_path = '/url'
images_path = '/images' images_path = '/images'
search_url = 'https://' + google_hostname + search_path + '?{query}&start={offset}&gbv=1' search_url = ('https://' +
google_hostname +
search_path +
'?{query}&start={offset}&gbv=1')
# specific xpath variables # specific xpath variables
results_xpath= '//li[@class="g"]' results_xpath = '//li[@class="g"]'
url_xpath = './/h3/a/@href' url_xpath = './/h3/a/@href'
title_xpath = './/h3' title_xpath = './/h3'
content_xpath = './/span[@class="st"]' content_xpath = './/span[@class="st"]'
@ -36,15 +39,18 @@ images_xpath = './/div/a'
image_url_xpath = './@href' image_url_xpath = './@href'
image_img_src_xpath = './img/@src' image_img_src_xpath = './img/@src'
# remove google-specific tracking-url # remove google-specific tracking-url
def parse_url(url_string): def parse_url(url_string):
parsed_url = urlparse(url_string) parsed_url = urlparse(url_string)
if parsed_url.netloc in [google_hostname, ''] and parsed_url.path==redirect_path: if (parsed_url.netloc in [google_hostname, '']
and parsed_url.path == redirect_path):
query = dict(parse_qsl(parsed_url.query)) query = dict(parse_qsl(parsed_url.query))
return query['q'] return query['q']
else: else:
return url_string return url_string
# do search-request # do search-request
def request(query, params): def request(query, params):
offset = (params['pageno'] - 1) * 10 offset = (params['pageno'] - 1) * 10
@ -52,7 +58,7 @@ def request(query, params):
if params['language'] == 'all': if params['language'] == 'all':
language = 'en' language = 'en'
else: else:
language = params['language'].replace('_','-').lower() language = params['language'].replace('_', '-').lower()
params['url'] = search_url.format(offset=offset, params['url'] = search_url.format(offset=offset,
query=urlencode({'q': query})) query=urlencode({'q': query}))
@ -74,13 +80,18 @@ def response(resp):
try: try:
url = parse_url(extract_url(result.xpath(url_xpath), search_url)) url = parse_url(extract_url(result.xpath(url_xpath), search_url))
parsed_url = urlparse(url) parsed_url = urlparse(url)
if parsed_url.netloc==google_hostname and parsed_url.path==search_path: if (parsed_url.netloc == google_hostname
and parsed_url.path == search_path):
# remove the link to google news # remove the link to google news
continue continue
if parsed_url.netloc==google_hostname and parsed_url.path==images_path: # images result
# images result if (parsed_url.netloc == google_hostname
results = results + parse_images(result) and parsed_url.path == images_path):
# only thumbnail image provided,
# so skipping image results
# results = results + parse_images(result)
pass
else: else:
# normal result # normal result
content = extract_text(result.xpath(content_xpath)[0]) content = extract_text(result.xpath(content_xpath)[0])
@ -99,6 +110,7 @@ def response(resp):
# return results # return results
return results return results
def parse_images(result): def parse_images(result):
results = [] results = []
for image in result.xpath(images_xpath): for image in result.xpath(images_xpath):

View file

@ -1,14 +1,15 @@
## Google (Images) ## Google (Images)
# #
# @website https://www.google.com # @website https://www.google.com
# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated! # @provide-api yes (https://developers.google.com/web-search/docs/),
# deprecated!
# #
# @using-api yes # @using-api yes
# @results JSON # @results JSON
# @stable yes (but deprecated) # @stable yes (but deprecated)
# @parse url, title, img_src # @parse url, title, img_src
from urllib import urlencode from urllib import urlencode,unquote
from json import loads from json import loads
# engine dependent config # engine dependent config
@ -51,7 +52,7 @@ def response(resp):
results.append({'url': href, results.append({'url': href,
'title': title, 'title': title,
'content': '', 'content': '',
'img_src': result['url'], 'img_src': unquote(result['url']),
'template': 'images.html'}) 'template': 'images.html'})
# return results # return results

View file

@ -1,7 +1,8 @@
## Google (News) ## Google (News)
# #
# @website https://www.google.com # @website https://www.google.com
# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated! # @provide-api yes (https://developers.google.com/web-search/docs/),
# deprecated!
# #
# @using-api yes # @using-api yes
# @results JSON # @results JSON

View file

@ -39,16 +39,16 @@ def response(resp):
url = result_base_url.format(osm_type=osm_type, url = result_base_url.format(osm_type=osm_type,
osm_id=r['osm_id']) osm_id=r['osm_id'])
osm = {'type':osm_type, osm = {'type': osm_type,
'id':r['osm_id']} 'id': r['osm_id']}
geojson = r.get('geojson') geojson = r.get('geojson')
# if no geojson is found and osm_type is a node, add geojson Point # if no geojson is found and osm_type is a node, add geojson Point
if not geojson and\ if not geojson and\
osm_type == 'node': osm_type == 'node':
geojson = {u'type':u'Point', geojson = {u'type': u'Point',
u'coordinates':[r['lon'],r['lat']]} u'coordinates': [r['lon'], r['lat']]}
address_raw = r.get('address') address_raw = r.get('address')
address = {} address = {}
@ -59,20 +59,20 @@ def response(resp):
r['class'] == 'tourism' or\ r['class'] == 'tourism' or\
r['class'] == 'leisure': r['class'] == 'leisure':
if address_raw.get('address29'): if address_raw.get('address29'):
address = {'name':address_raw.get('address29')} address = {'name': address_raw.get('address29')}
else: else:
address = {'name':address_raw.get(r['type'])} address = {'name': address_raw.get(r['type'])}
# add rest of adressdata, if something is already found # add rest of adressdata, if something is already found
if address.get('name'): if address.get('name'):
address.update({'house_number':address_raw.get('house_number'), address.update({'house_number': address_raw.get('house_number'),
'road':address_raw.get('road'), 'road': address_raw.get('road'),
'locality':address_raw.get('city', 'locality': address_raw.get('city',
address_raw.get('town', address_raw.get('town',
address_raw.get('village'))), address_raw.get('village'))),
'postcode':address_raw.get('postcode'), 'postcode': address_raw.get('postcode'),
'country':address_raw.get('country'), 'country': address_raw.get('country'),
'country_code':address_raw.get('country_code')}) 'country_code': address_raw.get('country_code')})
else: else:
address = None address = None

View file

@ -19,7 +19,7 @@ categories = ['videos', 'music', 'files']
paging = True paging = True
# search-url # search-url
url = 'https://thepiratebay.se/' url = 'https://thepiratebay.cr/'
search_url = url + 'search/{search_term}/{pageno}/99/{search_type}' search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
# piratebay specific type-definitions # piratebay specific type-definitions

View file

@ -1,7 +1,8 @@
## Vimeo (Videos) ## Vimeo (Videos)
# #
# @website https://vimeo.com/ # @website https://vimeo.com/
# @provide-api yes (http://developer.vimeo.com/api), they have a maximum count of queries/hour # @provide-api yes (http://developer.vimeo.com/api),
# they have a maximum count of queries/hour
# #
# @using-api no (TODO, rewrite to api) # @using-api no (TODO, rewrite to api)
# @results HTML (using search portal) # @results HTML (using search portal)
@ -35,11 +36,12 @@ publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
# do search-request # do search-request
def request(query, params): def request(query, params):
params['url'] = search_url.format(pageno=params['pageno'] , params['url'] = search_url.format(pageno=params['pageno'],
query=urlencode({'q': query})) query=urlencode({'q': query}))
# TODO required? # TODO required?
params['cookies']['__utma'] = '00000000.000#0000000.0000000000.0000000000.0000000000.0' params['cookies']['__utma'] =\
'00000000.000#0000000.0000000000.0000000000.0000000000.0'
return params return params

View file

@ -2,13 +2,25 @@ import json
from requests import get from requests import get
from urllib import urlencode from urllib import urlencode
resultCount=1 result_count = 1
urlSearch = 'https://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectiontitle&{query}' wikidata_host = 'https://www.wikidata.org'
urlDetail = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&props=labels%7Cinfo%7Csitelinks%7Csitelinks%2Furls%7Cdescriptions%7Cclaims&{query}' wikidata_api = wikidata_host + '/w/api.php'
urlMap = 'https://www.openstreetmap.org/?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M' url_search = wikidata_api \
+ '?action=query&list=search&format=json'\
+ '&srnamespace=0&srprop=sectiontitle&{query}'
url_detail = wikidata_api\
+ '?action=wbgetentities&format=json'\
+ '&props=labels%7Cinfo%7Csitelinks'\
+ '%7Csitelinks%2Furls%7Cdescriptions%7Cclaims'\
+ '&{query}'
url_map = 'https://www.openstreetmap.org/'\
+ '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
def request(query, params): def request(query, params):
params['url'] = urlSearch.format(query=urlencode({'srsearch': query, 'srlimit': resultCount})) params['url'] = url_search.format(
query=urlencode({'srsearch': query,
'srlimit': result_count}))
return params return params
@ -23,7 +35,8 @@ def response(resp):
language = resp.search_params['language'].split('_')[0] language = resp.search_params['language'].split('_')[0]
if language == 'all': if language == 'all':
language = 'en' language = 'en'
url = urlDetail.format(query=urlencode({'ids': '|'.join(wikidata_ids), 'languages': language + '|en'})) url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
'languages': language + '|en'}))
htmlresponse = get(url) htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content) jsonresponse = json.loads(htmlresponse.content)
@ -32,6 +45,7 @@ def response(resp):
return results return results
def getDetail(jsonresponse, wikidata_id, language): def getDetail(jsonresponse, wikidata_id, language):
results = [] results = []
urls = [] urls = []
@ -40,60 +54,103 @@ def getDetail(jsonresponse, wikidata_id, language):
result = jsonresponse.get('entities', {}).get(wikidata_id, {}) result = jsonresponse.get('entities', {}).get(wikidata_id, {})
title = result.get('labels', {}).get(language, {}).get('value', None) title = result.get('labels', {}).get(language, {}).get('value', None)
if title == None: if title is None:
title = result.get('labels', {}).get('en', {}).get('value', None) title = result.get('labels', {}).get('en', {}).get('value', None)
if title == None: if title is None:
return results return results
description = result.get('descriptions', {}).get(language, {}).get('value', None) description = result\
if description == None: .get('descriptions', {})\
description = result.get('descriptions', {}).get('en', {}).get('value', '') .get(language, {})\
.get('value', None)
if description is None:
description = result\
.get('descriptions', {})\
.get('en', {})\
.get('value', '')
claims = result.get('claims', {}) claims = result.get('claims', {})
official_website = get_string(claims, 'P856', None) official_website = get_string(claims, 'P856', None)
if official_website != None: if official_website is not None:
urls.append({ 'title' : 'Official site', 'url': official_website }) urls.append({'title': 'Official site', 'url': official_website})
results.append({ 'title': title, 'url' : official_website }) results.append({'title': title, 'url': official_website})
wikipedia_link_count = 0 wikipedia_link_count = 0
if language != 'en': if language != 'en':
wikipedia_link_count += add_url(urls, 'Wikipedia (' + language + ')', get_wikilink(result, language + 'wiki')) wikipedia_link_count += add_url(urls,
'Wikipedia (' + language + ')',
get_wikilink(result, language +
'wiki'))
wikipedia_en_link = get_wikilink(result, 'enwiki') wikipedia_en_link = get_wikilink(result, 'enwiki')
wikipedia_link_count += add_url(urls, 'Wikipedia (en)', wikipedia_en_link) wikipedia_link_count += add_url(urls,
'Wikipedia (en)',
wikipedia_en_link)
if wikipedia_link_count == 0: if wikipedia_link_count == 0:
misc_language = get_wiki_firstlanguage(result, 'wiki') misc_language = get_wiki_firstlanguage(result, 'wiki')
if misc_language != None: if misc_language is not None:
add_url(urls, 'Wikipedia (' + misc_language + ')', get_wikilink(result, misc_language + 'wiki')) add_url(urls,
'Wikipedia (' + misc_language + ')',
get_wikilink(result, misc_language + 'wiki'))
if language != 'en': if language != 'en':
add_url(urls, 'Wiki voyage (' + language + ')', get_wikilink(result, language + 'wikivoyage')) add_url(urls,
add_url(urls, 'Wiki voyage (en)', get_wikilink(result, 'enwikivoyage')) 'Wiki voyage (' + language + ')',
get_wikilink(result, language + 'wikivoyage'))
add_url(urls,
'Wiki voyage (en)',
get_wikilink(result, 'enwikivoyage'))
if language != 'en': if language != 'en':
add_url(urls, 'Wikiquote (' + language + ')', get_wikilink(result, language + 'wikiquote')) add_url(urls,
add_url(urls, 'Wikiquote (en)', get_wikilink(result, 'enwikiquote')) 'Wikiquote (' + language + ')',
get_wikilink(result, language + 'wikiquote'))
add_url(urls, 'Commons wiki', get_wikilink(result, 'commonswiki')) add_url(urls,
'Wikiquote (en)',
get_wikilink(result, 'enwikiquote'))
add_url(urls, 'Location', get_geolink(claims, 'P625', None)) add_url(urls,
'Commons wiki',
get_wikilink(result, 'commonswiki'))
add_url(urls, 'Wikidata', 'https://www.wikidata.org/wiki/' + wikidata_id + '?uselang='+ language) add_url(urls,
'Location',
get_geolink(claims, 'P625', None))
add_url(urls,
'Wikidata',
'https://www.wikidata.org/wiki/'
+ wikidata_id + '?uselang=' + language)
musicbrainz_work_id = get_string(claims, 'P435') musicbrainz_work_id = get_string(claims, 'P435')
if musicbrainz_work_id != None: if musicbrainz_work_id is not None:
add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/work/' + musicbrainz_work_id) add_url(urls,
'MusicBrainz',
'http://musicbrainz.org/work/'
+ musicbrainz_work_id)
musicbrainz_artist_id = get_string(claims, 'P434') musicbrainz_artist_id = get_string(claims, 'P434')
if musicbrainz_artist_id != None: if musicbrainz_artist_id is not None:
add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/artist/' + musicbrainz_artist_id) add_url(urls,
'MusicBrainz',
'http://musicbrainz.org/artist/'
+ musicbrainz_artist_id)
musicbrainz_release_group_id = get_string(claims, 'P436') musicbrainz_release_group_id = get_string(claims, 'P436')
if musicbrainz_release_group_id != None: if musicbrainz_release_group_id is not None:
add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/release-group/' + musicbrainz_release_group_id) add_url(urls,
'MusicBrainz',
'http://musicbrainz.org/release-group/'
+ musicbrainz_release_group_id)
musicbrainz_label_id = get_string(claims, 'P966') musicbrainz_label_id = get_string(claims, 'P966')
if musicbrainz_label_id != None: if musicbrainz_label_id is not None:
add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/label/' + musicbrainz_label_id) add_url(urls,
'MusicBrainz',
'http://musicbrainz.org/label/'
+ musicbrainz_label_id)
# musicbrainz_area_id = get_string(claims, 'P982') # musicbrainz_area_id = get_string(claims, 'P982')
# P1407 MusicBrainz series ID # P1407 MusicBrainz series ID
@ -102,42 +159,43 @@ def getDetail(jsonresponse, wikidata_id, language):
# P1407 MusicBrainz series ID # P1407 MusicBrainz series ID
postal_code = get_string(claims, 'P281', None) postal_code = get_string(claims, 'P281', None)
if postal_code != None: if postal_code is not None:
attributes.append({'label' : 'Postal code(s)', 'value' : postal_code}) attributes.append({'label': 'Postal code(s)', 'value': postal_code})
date_of_birth = get_time(claims, 'P569', None) date_of_birth = get_time(claims, 'P569', None)
if date_of_birth != None: if date_of_birth is not None:
attributes.append({'label' : 'Date of birth', 'value' : date_of_birth}) attributes.append({'label': 'Date of birth', 'value': date_of_birth})
date_of_death = get_time(claims, 'P570', None) date_of_death = get_time(claims, 'P570', None)
if date_of_death != None: if date_of_death is not None:
attributes.append({'label' : 'Date of death', 'value' : date_of_death}) attributes.append({'label': 'Date of death', 'value': date_of_death})
if len(attributes)==0 and len(urls)==2 and len(description)==0: if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
results.append({ results.append({
'url': urls[0]['url'], 'url': urls[0]['url'],
'title': title, 'title': title,
'content': description 'content': description
}) })
else: else:
results.append({ results.append({
'infobox' : title, 'infobox': title,
'id' : wikipedia_en_link, 'id': wikipedia_en_link,
'content' : description, 'content': description,
'attributes' : attributes, 'attributes': attributes,
'urls' : urls 'urls': urls
}) })
return results return results
def add_url(urls, title, url): def add_url(urls, title, url):
if url != None: if url is not None:
urls.append({'title' : title, 'url' : url}) urls.append({'title': title, 'url': url})
return 1 return 1
else: else:
return 0 return 0
def get_mainsnak(claims, propertyName): def get_mainsnak(claims, propertyName):
propValue = claims.get(propertyName, {}) propValue = claims.get(propertyName, {})
if len(propValue) == 0: if len(propValue) == 0:
@ -157,7 +215,7 @@ def get_string(claims, propertyName, defaultValue=None):
mainsnak = e.get('mainsnak', {}) mainsnak = e.get('mainsnak', {})
datavalue = mainsnak.get('datavalue', {}) datavalue = mainsnak.get('datavalue', {})
if datavalue != None: if datavalue is not None:
result.append(datavalue.get('value', '')) result.append(datavalue.get('value', ''))
if len(result) == 0: if len(result) == 0:
@ -177,7 +235,7 @@ def get_time(claims, propertyName, defaultValue=None):
mainsnak = e.get('mainsnak', {}) mainsnak = e.get('mainsnak', {})
datavalue = mainsnak.get('datavalue', {}) datavalue = mainsnak.get('datavalue', {})
if datavalue != None: if datavalue is not None:
value = datavalue.get('value', '') value = datavalue.get('value', '')
result.append(value.get('time', '')) result.append(value.get('time', ''))
@ -190,7 +248,7 @@ def get_time(claims, propertyName, defaultValue=None):
def get_geolink(claims, propertyName, defaultValue=''): def get_geolink(claims, propertyName, defaultValue=''):
mainsnak = get_mainsnak(claims, propertyName) mainsnak = get_mainsnak(claims, propertyName)
if mainsnak == None: if mainsnak is None:
return defaultValue return defaultValue
datatype = mainsnak.get('datatype', '') datatype = mainsnak.get('datatype', '')
@ -209,21 +267,25 @@ def get_geolink(claims, propertyName, defaultValue=''):
# 1 --> 6 # 1 --> 6
# 0.016666666666667 --> 9 # 0.016666666666667 --> 9
# 0.00027777777777778 --> 19 # 0.00027777777777778 --> 19
# wolframalpha : quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}} # wolframalpha :
# quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
# 14.1186-8.8322 x+0.625447 x^2 # 14.1186-8.8322 x+0.625447 x^2
if precision < 0.0003: if precision < 0.0003:
zoom = 19 zoom = 19
else: else:
zoom = int(15 - precision*8.8322 + precision*precision*0.625447) zoom = int(15 - precision*8.8322 + precision*precision*0.625447)
url = urlMap.replace('{latitude}', str(value.get('latitude',0))).replace('{longitude}', str(value.get('longitude',0))).replace('{zoom}', str(zoom)) url = url_map\
.replace('{latitude}', str(value.get('latitude', 0)))\
.replace('{longitude}', str(value.get('longitude', 0)))\
.replace('{zoom}', str(zoom))
return url return url
def get_wikilink(result, wikiid): def get_wikilink(result, wikiid):
url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None) url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
if url == None: if url is None:
return url return url
elif url.startswith('http://'): elif url.startswith('http://'):
url = url.replace('http://', 'https://') url = url.replace('http://', 'https://')
@ -231,8 +293,9 @@ def get_wikilink(result, wikiid):
url = 'https:' + url url = 'https:' + url
return url return url
def get_wiki_firstlanguage(result, wikipatternid): def get_wiki_firstlanguage(result, wikipatternid):
for k in result.get('sitelinks', {}).keys(): for k in result.get('sitelinks', {}).keys():
if k.endswith(wikipatternid) and len(k)==(2+len(wikipatternid)): if k.endswith(wikipatternid) and len(k) == (2+len(wikipatternid)):
return k[0:2] return k[0:2]
return None return None

View file

@ -1,7 +1,8 @@
## Yacy (Web, Images, Videos, Music, Files) ## Yacy (Web, Images, Videos, Music, Files)
# #
# @website http://yacy.net # @website http://yacy.net
# @provide-api yes (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch) # @provide-api yes
# (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
# #
# @using-api yes # @using-api yes
# @results JSON # @results JSON
@ -16,7 +17,7 @@ from urllib import urlencode
from dateutil import parser from dateutil import parser
# engine dependent config # engine dependent config
categories = ['general', 'images'] #TODO , 'music', 'videos', 'files' categories = ['general', 'images'] # TODO , 'music', 'videos', 'files'
paging = True paging = True
language_support = True language_support = True
number_of_results = 5 number_of_results = 5

View file

@ -1,7 +1,8 @@
## Yahoo (Web) ## Yahoo (Web)
# #
# @website https://search.yahoo.com/web # @website https://search.yahoo.com/web
# @provide-api yes (https://developer.yahoo.com/boss/search/), $0.80/1000 queries # @provide-api yes (https://developer.yahoo.com/boss/search/),
# $0.80/1000 queries
# #
# @using-api no (because pricing) # @using-api no (because pricing)
# @results HTML (using search portal) # @results HTML (using search portal)
@ -40,7 +41,7 @@ def parse_url(url_string):
if endpos > -1: if endpos > -1:
endpositions.append(endpos) endpositions.append(endpos)
if start==0 or len(endpositions) == 0: if start == 0 or len(endpositions) == 0:
return url_string return url_string
else: else:
end = min(endpositions) end = min(endpositions)

View file

@ -15,7 +15,8 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2013- by Adam Tauber, <asciimoo@gmail.com> (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
''' '''
import grequests import requests as requests_lib
import threading
import re import re
from itertools import izip_longest, chain from itertools import izip_longest, chain
from datetime import datetime from datetime import datetime
@ -32,6 +33,21 @@ from searx.query import Query
number_of_searches = 0 number_of_searches = 0
def threaded_requests(requests):
for fn, url, request_args in requests:
th = threading.Thread(
target=fn,
args=(url,),
kwargs=request_args,
name='search_request',
)
th.start()
for th in threading.enumerate():
if th.name == 'search_request':
th.join()
# get default reqest parameter # get default reqest parameter
def default_request_params(): def default_request_params():
return { return {
@ -471,9 +487,9 @@ class Search(object):
# specific type of request (GET or POST) # specific type of request (GET or POST)
if request_params['method'] == 'GET': if request_params['method'] == 'GET':
req = grequests.get req = requests_lib.get
else: else:
req = grequests.post req = requests_lib.post
request_args['data'] = request_params['data'] request_args['data'] = request_params['data']
# ignoring empty urls # ignoring empty urls
@ -481,10 +497,10 @@ class Search(object):
continue continue
# append request to list # append request to list
requests.append(req(request_params['url'], **request_args)) requests.append((req, request_params['url'], request_args))
# send all search-request # send all search-request
grequests.map(requests) threaded_requests(requests)
# update engine-specific stats # update engine-specific stats
for engine_name, engine_results in results.items(): for engine_name, engine_results in results.items():

View file

@ -1,7 +1,7 @@
server: server:
port : 8888 port : 8888
secret_key : "ultrasecretkey" # change this! secret_key : "ultrasecretkey" # change this!
debug : False # Debug mode, only for development debug : True # Debug mode, only for development
request_timeout : 2.0 # seconds request_timeout : 2.0 # seconds
base_url : False # Set custom base_url. Possible values: False or "https://your.custom.host/location/" base_url : False # Set custom base_url. Possible values: False or "https://your.custom.host/location/"
themes_path : "" # Custom ui themes path themes_path : "" # Custom ui themes path
@ -95,9 +95,9 @@ engines:
engine : openstreetmap engine : openstreetmap
shortcut : osm shortcut : osm
- name : piratebay # - name : piratebay
engine : piratebay # engine : piratebay
shortcut : tpb # shortcut : tpb
- name : soundcloud - name : soundcloud
engine : soundcloud engine : soundcloud

1
sources/searx/static/oscar/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
node_modules/

View file

@ -0,0 +1,17 @@
install dependencies
~~~~~~~~~~~~~~~~~~~~
run this command in the directory ``searx/static/oscar``
``npm install``
compile sources
~~~~~~~~~~~~~~~
run this command in the directory ``searx/static/oscar``
``grunt``
or in the root directory:
``make grunt``

View file

@ -0,0 +1,51 @@
module.exports = function(grunt) {
grunt.initConfig({
pkg: grunt.file.readJSON('package.json'),
concat: {
options: {
separator: ';'
},
dist: {
src: ['js/searx_src/*.js'],
dest: 'js/searx.js'
}
},
uglify: {
options: {
banner: '/*! oscar/searx.min.js | <%= grunt.template.today("dd-mm-yyyy") %> | https://github.com/asciimoo/searx */\n'
},
dist: {
files: {
'js/searx.min.js': ['<%= concat.dist.dest %>']
}
}
},
jshint: {
files: ['gruntfile.js', 'js/searx_src/*.js'],
options: {
// options here to override JSHint defaults
globals: {
jQuery: true,
console: true,
module: true,
document: true
}
}
},
watch: {
files: ['<%= jshint.files %>'],
tasks: ['jshint']
}
});
grunt.loadNpmTasks('grunt-contrib-uglify');
grunt.loadNpmTasks('grunt-contrib-jshint');
grunt.loadNpmTasks('grunt-contrib-watch');
grunt.loadNpmTasks('grunt-contrib-concat');
grunt.registerTask('test', ['jshint']);
grunt.registerTask('default', ['jshint', 'concat', 'uglify']);
};

View file

@ -0,0 +1,2 @@
/*! oscar/searx.min.js | 30-11-2014 | https://github.com/asciimoo/searx */
requirejs.config({baseUrl:"/static/oscar/js",paths:{app:"../app"}}),searx.autocompleter&&(searx.searchResults=new Bloodhound({datumTokenizer:Bloodhound.tokenizers.obj.whitespace("value"),queryTokenizer:Bloodhound.tokenizers.whitespace,remote:"/autocompleter?q=%QUERY"}),searx.searchResults.initialize()),$(document).ready(function(){searx.autocompleter&&$("#q").typeahead(null,{name:"search-results",displayKey:function(a){return a},source:searx.searchResults.ttAdapter()})}),$(document).ready(function(){$("#q.autofocus").focus(),$(".select-all-on-click").click(function(){$(this).select()}),$(".btn-collapse").click(function(){var a=$(this).data("btn-text-collapsed"),b=$(this).data("btn-text-not-collapsed");""!==a&&""!==b&&(new_html=$(this).hasClass("collapsed")?$(this).html().replace(a,b):$(this).html().replace(b,a),$(this).html(new_html))}),$(".btn-toggle .btn").click(function(){var a="btn-"+$(this).data("btn-class"),b=$(this).data("btn-label-default"),c=$(this).data("btn-label-toggled");""!==c&&(new_html=$(this).hasClass("btn-default")?$(this).html().replace(b,c):$(this).html().replace(c,b),$(this).html(new_html)),$(this).toggleClass(a),$(this).toggleClass("btn-default")})}),$(document).ready(function(){$(".searx_overpass_request").on("click",function(a){var b="https://overpass-api.de/api/interpreter?data=",c=b+"[out:json][timeout:25];(",d=");out meta;",e=$(this).data("osm-id"),f=$(this).data("osm-type"),g=$(this).data("result-table"),h="#"+$(this).data("result-table-loadicon"),i=["addr:city","addr:country","addr:housenumber","addr:postcode","addr:street"];if(e&&f&&g){g="#"+g;var j=null;switch(f){case"node":j=c+"node("+e+");"+d;break;case"way":j=c+"way("+e+");"+d;break;case"relation":j=c+"relation("+e+");"+d}if(j){$.ajax(j).done(function(a){if(a&&a.elements&&a.elements[0]){var b=a.elements[0],c=$(g).html();for(var d in b.tags)if(null===b.tags.name||-1==i.indexOf(d)){switch(c+="<tr><td>"+d+"</td><td>",d){case"phone":case"fax":c+='<a href="tel:'+b.tags[d].replace(/ /g,"")+'">'+b.tags[d]+"</a>";break;case"email":c+='<a href="mailto:'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"website":case"url":c+='<a href="'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"wikidata":c+='<a href="https://www.wikidata.org/wiki/'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"wikipedia":if(-1!=b.tags[d].indexOf(":")){c+='<a href="https://'+b.tags[d].substring(0,b.tags[d].indexOf(":"))+".wikipedia.org/wiki/"+b.tags[d].substring(b.tags[d].indexOf(":")+1)+'">'+b.tags[d]+"</a>";break}default:c+=b.tags[d]}c+="</td></tr>"}$(g).html(c),$(g).removeClass("hidden"),$(h).addClass("hidden")}}).fail(function(){$(h).html($(h).html()+'<p class="text-muted">could not load data!</p>')})}}$(this).off(a)}),$(".searx_init_map").on("click",function(a){var b=$(this).data("leaflet-target"),c=$(this).data("map-lon"),d=$(this).data("map-lat"),e=$(this).data("map-zoom"),f=$(this).data("map-boundingbox"),g=$(this).data("map-geojson");require(["leaflet-0.7.3.min"],function(){f&&(southWest=L.latLng(f[0],f[2]),northEast=L.latLng(f[1],f[3]),map_bounds=L.latLngBounds(southWest,northEast)),L.Icon.Default.imagePath="/static/oscar/img/map";{var a=L.map(b),h="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png",i='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors',j=new L.TileLayer(h,{minZoom:1,maxZoom:19,attribution:i}),k="http://otile{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg",l='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="http://developer.mapquest.com/content/osm/mq_logo.png">',m=new L.TileLayer(k,{minZoom:1,maxZoom:18,subdomains:"1234",attribution:l}),n="http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg",o='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="https://developer.mapquest.com/content/osm/mq_logo.png"> | Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';new L.TileLayer(n,{minZoom:1,maxZoom:11,subdomains:"1234",attribution:o})}map_bounds?setTimeout(function(){a.fitBounds(map_bounds,{maxZoom:17})},0):c&&d&&(e?a.setView(new L.LatLng(d,c),e):a.setView(new L.LatLng(d,c),8)),a.addLayer(m);var p={"OSM Mapnik":j,MapQuest:m};L.control.layers(p).addTo(a),g&&L.geoJson(g).addTo(a)}),$(this).off(a)})});

View file

@ -0,0 +1,23 @@
/**
* searx is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* searx is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with searx. If not, see < http://www.gnu.org/licenses/ >.
*
* (C) 2014 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
*/
requirejs.config({
baseUrl: '/static/oscar/js',
paths: {
app: '../app'
}
});

View file

@ -0,0 +1,37 @@
/**
* searx is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* searx is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with searx. If not, see < http://www.gnu.org/licenses/ >.
*
* (C) 2014 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
*/
if(searx.autocompleter) {
searx.searchResults = new Bloodhound({
datumTokenizer: Bloodhound.tokenizers.obj.whitespace('value'),
queryTokenizer: Bloodhound.tokenizers.whitespace,
remote: '/autocompleter?q=%QUERY'
});
searx.searchResults.initialize();
}
$(document).ready(function(){
if(searx.autocompleter) {
$('#q').typeahead(null, {
name: 'search-results',
displayKey: function(result) {
return result;
},
source: searx.searchResults.ttAdapter()
});
}
});

View file

@ -0,0 +1,66 @@
/**
* searx is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* searx is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with searx. If not, see < http://www.gnu.org/licenses/ >.
*
* (C) 2014 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
*/
$(document).ready(function(){
/**
* focus element if class="autofocus" and id="q"
*/
$('#q.autofocus').focus();
/**
* select full content on click if class="select-all-on-click"
*/
$(".select-all-on-click").click(function () {
$(this).select();
});
/**
* change text during btn-collapse click if possible
*/
$('.btn-collapse').click(function() {
var btnTextCollapsed = $(this).data('btn-text-collapsed');
var btnTextNotCollapsed = $(this).data('btn-text-not-collapsed');
if(btnTextCollapsed !== '' && btnTextNotCollapsed !== '') {
if($(this).hasClass('collapsed')) {
new_html = $(this).html().replace(btnTextCollapsed, btnTextNotCollapsed);
} else {
new_html = $(this).html().replace(btnTextNotCollapsed, btnTextCollapsed);
}
$(this).html(new_html);
}
});
/**
* change text during btn-toggle click if possible
*/
$('.btn-toggle .btn').click(function() {
var btnClass = 'btn-' + $(this).data('btn-class');
var btnLabelDefault = $(this).data('btn-label-default');
var btnLabelToggled = $(this).data('btn-label-toggled');
if(btnLabelToggled !== '') {
if($(this).hasClass('btn-default')) {
new_html = $(this).html().replace(btnLabelDefault, btnLabelToggled);
} else {
new_html = $(this).html().replace(btnLabelToggled, btnLabelDefault);
}
$(this).html(new_html);
}
$(this).toggleClass(btnClass);
$(this).toggleClass('btn-default');
});
});

View file

@ -0,0 +1,172 @@
/**
* searx is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* searx is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with searx. If not, see < http://www.gnu.org/licenses/ >.
*
* (C) 2014 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
*/
$(document).ready(function(){
$(".searx_overpass_request").on( "click", function( event ) {
var overpass_url = "https://overpass-api.de/api/interpreter?data=";
var query_start = overpass_url + "[out:json][timeout:25];(";
var query_end = ");out meta;";
var osm_id = $(this).data('osm-id');
var osm_type = $(this).data('osm-type');
var result_table = $(this).data('result-table');
var result_table_loadicon = "#" + $(this).data('result-table-loadicon');
// tags which can be ignored
var osm_ignore_tags = [ "addr:city", "addr:country", "addr:housenumber", "addr:postcode", "addr:street" ];
if(osm_id && osm_type && result_table) {
result_table = "#" + result_table;
var query = null;
switch(osm_type) {
case 'node':
query = query_start + "node(" + osm_id + ");" + query_end;
break;
case 'way':
query = query_start + "way(" + osm_id + ");" + query_end;
break;
case 'relation':
query = query_start + "relation(" + osm_id + ");" + query_end;
break;
default:
break;
}
if(query) {
//alert(query);
var ajaxRequest = $.ajax( query )
.done(function( html) {
if(html && html.elements && html.elements[0]) {
var element = html.elements[0];
var newHtml = $(result_table).html();
for (var row in element.tags) {
if(element.tags.name === null || osm_ignore_tags.indexOf(row) == -1) {
newHtml += "<tr><td>" + row + "</td><td>";
switch(row) {
case "phone":
case "fax":
newHtml += "<a href=\"tel:" + element.tags[row].replace(/ /g,'') + "\">" + element.tags[row] + "</a>";
break;
case "email":
newHtml += "<a href=\"mailto:" + element.tags[row] + "\">" + element.tags[row] + "</a>";
break;
case "website":
case "url":
newHtml += "<a href=\"" + element.tags[row] + "\">" + element.tags[row] + "</a>";
break;
case "wikidata":
newHtml += "<a href=\"https://www.wikidata.org/wiki/" + element.tags[row] + "\">" + element.tags[row] + "</a>";
break;
case "wikipedia":
if(element.tags[row].indexOf(":") != -1) {
newHtml += "<a href=\"https://" + element.tags[row].substring(0,element.tags[row].indexOf(":")) + ".wikipedia.org/wiki/" + element.tags[row].substring(element.tags[row].indexOf(":")+1) + "\">" + element.tags[row] + "</a>";
break;
}
/* jshint ignore:start */
default:
/* jshint ignore:end */
newHtml += element.tags[row];
break;
}
newHtml += "</td></tr>";
}
}
$(result_table).html(newHtml);
$(result_table).removeClass('hidden');
$(result_table_loadicon).addClass('hidden');
}
})
.fail(function() {
$(result_table_loadicon).html($(result_table_loadicon).html() + "<p class=\"text-muted\">could not load data!</p>");
});
}
}
// this event occour only once per element
$( this ).off( event );
});
$(".searx_init_map").on( "click", function( event ) {
var leaflet_target = $(this).data('leaflet-target');
var map_lon = $(this).data('map-lon');
var map_lat = $(this).data('map-lat');
var map_zoom = $(this).data('map-zoom');
var map_boundingbox = $(this).data('map-boundingbox');
var map_geojson = $(this).data('map-geojson');
require(['leaflet-0.7.3.min'], function(leaflet) {
if(map_boundingbox) {
southWest = L.latLng(map_boundingbox[0], map_boundingbox[2]);
northEast = L.latLng(map_boundingbox[1], map_boundingbox[3]);
map_bounds = L.latLngBounds(southWest, northEast);
}
// TODO hack
// change default imagePath
L.Icon.Default.imagePath = "/static/oscar/img/map";
// init map
var map = L.map(leaflet_target);
// create the tile layer with correct attribution
var osmMapnikUrl='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png';
var osmMapnikAttrib='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors';
var osmMapnik = new L.TileLayer(osmMapnikUrl, {minZoom: 1, maxZoom: 19, attribution: osmMapnikAttrib});
var osmMapquestUrl='http://otile{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg';
var osmMapquestAttrib='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="http://developer.mapquest.com/content/osm/mq_logo.png">';
var osmMapquest = new L.TileLayer(osmMapquestUrl, {minZoom: 1, maxZoom: 18, subdomains: '1234', attribution: osmMapquestAttrib});
var osmMapquestOpenAerialUrl='http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg';
var osmMapquestOpenAerialAttrib='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="https://developer.mapquest.com/content/osm/mq_logo.png"> | Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';
var osmMapquestOpenAerial = new L.TileLayer(osmMapquestOpenAerialUrl, {minZoom: 1, maxZoom: 11, subdomains: '1234', attribution: osmMapquestOpenAerialAttrib});
// init map view
if(map_bounds) {
// TODO hack: https://github.com/Leaflet/Leaflet/issues/2021
setTimeout(function () {
map.fitBounds(map_bounds, {
maxZoom:17
});
}, 0);
} else if (map_lon && map_lat) {
if(map_zoom)
map.setView(new L.LatLng(map_lat, map_lon),map_zoom);
else
map.setView(new L.LatLng(map_lat, map_lon),8);
}
map.addLayer(osmMapquest);
var baseLayers = {
"OSM Mapnik": osmMapnik,
"MapQuest": osmMapquest/*,
"MapQuest Open Aerial": osmMapquestOpenAerial*/
};
L.control.layers(baseLayers).addTo(map);
if(map_geojson)
L.geoJson(map_geojson).addTo(map);
/*else if(map_bounds)
L.rectangle(map_bounds, {color: "#ff7800", weight: 3, fill:false}).addTo(map);*/
});
// this event occour only once per element
$( this ).off( event );
});
});

View file

@ -0,0 +1,15 @@
{
"devDependencies": {
"grunt": "~0.4.5",
"grunt-contrib-uglify": "~0.6.0",
"grunt-contrib-watch" : "~0.6.1",
"grunt-contrib-concat" : "~0.5.0",
"grunt-contrib-jshint" : "~0.10.0"
},
"scripts": {
"build": "npm install && grunt",
"start": "grunt watch",
"test": "grunt"
}
}

View file

@ -19,14 +19,14 @@
<p>Searx is a <a href="https://en.wikipedia.org/wiki/Metasearch_engine">metasearch engine</a>, <p>Searx is a <a href="https://en.wikipedia.org/wiki/Metasearch_engine">metasearch engine</a>,
inspired by the <a href="http://seeks-project.info/">seeks project</a>.<br /> inspired by the <a href="http://seeks-project.info/">seeks project</a>.<br />
It provides basic privacy by mixing your queries with searches on other platforms without storing search data. Queries are made using a POST request on every browser (except chrome*). Therefore they show up in neither our logs, nor your url history. In case of Chrome* users there is an exception, Searx uses the search bar to perform GET requests.<br /> It provides basic privacy by mixing your queries with searches on other platforms without storing search data. Queries are made using a POST request on every browser (except chrome*). Therefore they show up in neither our logs, nor your url history. In case of Chrome* users there is an exception, if Searx used from the search bar it performs GET requests.<br />
Searx can be added to your browser's search bar; moreover, it can be set as the default search engine. Searx can be added to your browser's search bar; moreover, it can be set as the default search engine.
</p> </p>
<h2>How can I make it my own?</h2> <h2>How can I make it my own?</h2>
<p>Searx appreciates your concern regarding logs, so take the <a href="https://github.com/asciimoo/searx">code</a> and run it yourself! <br />Add your Searx to this <a href="https://github.com/asciimoo/searx/wiki/Searx-instances">list</a> to help other people reclaim their privacy and make the Internet freer! <p>Searx appreciates your concern regarding logs, so take the <a href="https://github.com/asciimoo/searx">code</a> and run it yourself! <br />Add your Searx to this <a href="https://github.com/asciimoo/searx/wiki/Searx-instances">list</a> to help other people reclaim their privacy and make the Internet freer!
<br />The more decentralized the Internet, is the more freedom we have!</p> <br />The more decentralized Internet is the more freedom we have!</p>
<h2>More about searx</h2> <h2>More about searx</h2>

View file

@ -73,6 +73,6 @@
<script src="{{ url_for('static', filename='js/bootstrap.min.js') }}"></script> <script src="{{ url_for('static', filename='js/bootstrap.min.js') }}"></script>
{% if autocomplete %}<script src="{{ url_for('static', filename='js/typeahead.bundle.min.js') }}"></script>{% endif %} {% if autocomplete %}<script src="{{ url_for('static', filename='js/typeahead.bundle.min.js') }}"></script>{% endif %}
<script src="{{ url_for('static', filename='js/require-2.1.15.min.js') }}"></script> <script src="{{ url_for('static', filename='js/require-2.1.15.min.js') }}"></script>
<script src="{{ url_for('static', filename='js/scripts.js') }}"></script> <script src="{{ url_for('static', filename='js/searx.min.js') }}"></script>
</body> </body>
</html> </html>

View file

@ -3,7 +3,7 @@
<h4 class="panel-title">{{ infobox.infobox }}</h4> <h4 class="panel-title">{{ infobox.infobox }}</h4>
</div> </div>
<div class="panel-body"> <div class="panel-body">
{% if infobox.img_src %}<img class="img-responsive center-block infobox_part" src="{{ infobox.img_src }}" />{% endif %} {% if infobox.img_src %}<img class="img-responsive center-block infobox_part" src="{{ infobox.img_src }}" alt="{{ infobox.infobox }}" />{% endif %}
{% if infobox.content %}<p class="infobox_part">{{ infobox.content }}</p>{% endif %} {% if infobox.content %}<p class="infobox_part">{{ infobox.content }}</p>{% endif %}
{% if infobox.attributes %} {% if infobox.attributes %}

View file

@ -1,9 +1,9 @@
{% from 'oscar/macros.html' import icon %} {% from 'oscar/macros.html' import icon %}
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4> <h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.publishedDate }}" pubdate>{{ result.publishedDate }}</time>{% endif %} {% if result.publishedDate %}<time class="text-muted" datetime="{{ result.pubdate }}" >{{ result.publishedDate }}</time>{% endif %}
<small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small> <small><a class="text-info" href="https://web.archive.org/web/{{ result.url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
{% if result.content %}<p class="result-content">{{ result.content|safe }}</p>{% endif %} {% if result.content %}<p class="result-content">{{ result.content|safe }}</p>{% endif %}

View file

@ -1,5 +1,5 @@
<a href="{{ result.img_src }}" data-toggle="modal" data-target="#modal-{{ index }}"> <a href="{{ result.img_src }}" data-toggle="modal" data-target="#modal-{{ index }}">
<img src="{{ result.img_src }}" alt="{{ result.title|e }}" class="img-thumbnail"> <img src="{{ result.img_src }}" alt="{{ result.title|striptags }}" title="{{ result.title|striptags }}" class="img-thumbnail">
</a> </a>
<div class="modal fade" id="modal-{{ index }}" tabindex="-1" role="dialog" aria-hidden="true"> <div class="modal fade" id="modal-{{ index }}" tabindex="-1" role="dialog" aria-hidden="true">
@ -7,7 +7,7 @@
<div class="modal-content"> <div class="modal-content">
<div class="modal-header"> <div class="modal-header">
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">&times;</span><span class="sr-only">Close</span></button> <button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">&times;</span><span class="sr-only">Close</span></button>
<h4 class="modal-title">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}{{ result.title|striptags }}</h4> <h4 class="modal-title">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}{{ result.title|striptags }}</h4>
</div> </div>
<div class="modal-body"> <div class="modal-body">
<img class="img-responsive center-block" src="{{ result.img_src }}" alt="{{ result.title }}"> <img class="img-responsive center-block" src="{{ result.img_src }}" alt="{{ result.title }}">

View file

@ -1,8 +1,8 @@
{% from 'oscar/macros.html' import icon %} {% from 'oscar/macros.html' import icon %}
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4> <h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.publishedDate }}" pubdate>{{ result.publishedDate }}</time>{% endif %} {% if result.publishedDate %}<time class="text-muted" datetime="{{ result.pubdate }}" >{{ result.publishedDate }}</time>{% endif %}
<small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small> <small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small>

View file

@ -1,9 +1,9 @@
{% from 'oscar/macros.html' import icon %} {% from 'oscar/macros.html' import icon %}
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4> <h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.publishedDate }}" pubdate>{{ result.publishedDate }}</time>{% endif %} {% if result.publishedDate %}<time class="text-muted" datetime="{{ result.pubdate }}" >{{ result.publishedDate }}</time>{% endif %}
<small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small> <small><a class="text-info" href="https://web.archive.org/web/{{ result.url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
<p class="result-content">{{ icon('transfer') }} {{ _('Seeder') }} <span class="badge">{{ result.seed }}</span>, {{ _('Leecher') }} <span class="badge">{{ result.leech }}</span> <p class="result-content">{{ icon('transfer') }} {{ _('Seeder') }} <span class="badge">{{ result.seed }}</span>, {{ _('Leecher') }} <span class="badge">{{ result.leech }}</span>
<br/> <br/>

View file

@ -1,13 +1,13 @@
{% from 'oscar/macros.html' import icon %} {% from 'oscar/macros.html' import icon %}
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4> <h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.publishedDate }}" pubdate>{{ result.publishedDate }}</time>{% endif %} {% if result.publishedDate %}<time class="text-muted" datetime="{{ result.pubdate }}" >{{ result.publishedDate }}</time>{% endif %}
<small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small> <small><a class="text-info" href="https://web.archive.org/web/{{ result.url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
<div class="container-fluid"> <div class="container-fluid">
<div class="row"> <div class="row">
<img class="thumbnail col-xs-6 col-sm-4 col-md-4 result-content" src="{{ result.thumbnail|safe }}" /> <img class="thumbnail col-xs-6 col-sm-4 col-md-4 result-content" src="{{ result.thumbnail|safe }}" alt="{{ result.title|urlencode }} {{ result['favicon'] }}" />
{% if result.content %}<p class="col-xs-12 col-sm-8 col-md-8 result-content">{{ result.content|safe }}</p>{% endif %} {% if result.content %}<p class="col-xs-12 col-sm-8 col-md-8 result-content">{{ result.content|safe }}</p>{% endif %}
</div> </div>
</div> </div>

View file

@ -51,6 +51,11 @@
</div><!-- /#main_results --> </div><!-- /#main_results -->
<div class="col-sm-4" id="sidebar_results"> <div class="col-sm-4" id="sidebar_results">
{% if infoboxes %}
{% for infobox in infoboxes %}
{% include 'oscar/infobox.html' %}
{% endfor %}
{% endif %}
{% if suggestions %} {% if suggestions %}
<div class="panel panel-default"> <div class="panel panel-default">
@ -76,7 +81,7 @@
<form role="form"> <form role="form">
<div class="form-group"> <div class="form-group">
<label for="search_url">{{ _('Search URL') }}</label> <label for="search_url">{{ _('Search URL') }}</label>
<input type="url" class="form-control select-all-on-click cursor-text" name="search_url" value="{{ base_url }}?q={{ q|urlencode }}&pageno={{ pageno }}{% if selected_categories %}&category_{{ selected_categories|join("&category_") }}{% endif %}" readonly> <input id="search_url" type="url" class="form-control select-all-on-click cursor-text" name="search_url" value="{{ base_url }}?q={{ q|urlencode }}&amp;pageno={{ pageno }}{% if selected_categories %}&amp;category_{{ selected_categories|join("&category_")|replace(' ','+') }}{% endif %}" readonly>
</div> </div>
</form> </form>
@ -94,13 +99,6 @@
<div class="clearfix"></div> <div class="clearfix"></div>
</div> </div>
</div> </div>
{% if infoboxes %}
{% for infobox in infoboxes %}
{% include 'oscar/infobox.html' %}
{% endfor %}
{% endif %}
</div><!-- /#sidebar_results --> </div><!-- /#sidebar_results -->
</div> </div>
{% endblock %} {% endblock %}

View file

@ -2,7 +2,7 @@
<form method="{{ method or 'POST' }}" action="{{ url_for('index') }}" id="search_form" role="search"> <form method="{{ method or 'POST' }}" action="{{ url_for('index') }}" id="search_form" role="search">
<div class="input-group col-md-8 col-md-offset-2"> <div class="input-group col-md-8 col-md-offset-2">
<input type="search" name="q" class="form-control input-lg" id="q" placeholder="{{ _('Search for...') }}" autocomplete="off" value="{{ q }}"> <input type="search" name="q" class="form-control input-lg autofocus" id="q" placeholder="{{ _('Search for...') }}" autocomplete="off" value="{{ q }}">
<span class="input-group-btn"> <span class="input-group-btn">
<button type="submit" class="btn btn-default input-lg">{{ icon('search') }}<span class="sr-only">{{ _('Start search') }}</span></button> <button type="submit" class="btn btn-default input-lg">{{ icon('search') }}<span class="sr-only">{{ _('Start search') }}</span></button>
</span> </span>

View file

@ -18,7 +18,9 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
# version of searx # version of searx
VERSION_MAJOR = 0 VERSION_MAJOR = 0
VERSION_MINOR = 4 VERSION_MINOR = 5
VERSION_BUILD = 0 VERSION_BUILD = 0
VERSION_STRING = "%d.%d.%d" % (VERSION_MAJOR,VERSION_MINOR,VERSION_BUILD) VERSION_STRING = "{0}.{1}.{2}".format(VERSION_MAJOR,
VERSION_MINOR,
VERSION_BUILD)

View file

@ -17,10 +17,6 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2013- by Adam Tauber, <asciimoo@gmail.com> (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
''' '''
from gevent import monkey
monkey.patch_all()
if __name__ == '__main__': if __name__ == '__main__':
from sys import path from sys import path
from os.path import realpath, dirname from os.path import realpath, dirname
@ -298,10 +294,9 @@ def index():
# TODO, check if timezone is calculated right # TODO, check if timezone is calculated right
if 'publishedDate' in result: if 'publishedDate' in result:
if result['publishedDate'].replace(tzinfo=None)\ result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')
>= datetime.now() - timedelta(days=1): if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1):
timedifference = datetime.now() - result['publishedDate']\ timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None)
.replace(tzinfo=None)
minutes = int((timedifference.seconds / 60) % 60) minutes = int((timedifference.seconds / 60) % 60)
hours = int(timedifference.seconds / 60 / 60) hours = int(timedifference.seconds / 60 / 60)
if hours == 0: if hours == 0:
@ -309,8 +304,6 @@ def index():
else: else:
result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa
else: else:
result['pubdate'] = result['publishedDate']\
.strftime('%a, %d %b %Y %H:%M:%S %z')
result['publishedDate'] = format_date(result['publishedDate']) result['publishedDate'] = format_date(result['publishedDate'])
if search.request_data.get('format') == 'json': if search.request_data.get('format') == 'json':
@ -409,7 +402,7 @@ def autocompleter():
# return autocompleter results # return autocompleter results
if request_data.get('format') == 'x-suggestions': if request_data.get('format') == 'x-suggestions':
return Response(json.dumps([query, results]), return Response(json.dumps([query.query, results]),
mimetype='application/json') mimetype='application/json')
else: else:
return Response(json.dumps(results), return Response(json.dumps(results),

View file

@ -41,7 +41,7 @@ setup(
install_requires=[ install_requires=[
'flask', 'flask',
'flask-babel', 'flask-babel',
'grequests', 'requests',
'lxml', 'lxml',
'pyyaml', 'pyyaml',
'setuptools', 'setuptools',

View file

@ -36,18 +36,6 @@ zc.recipe.testrunner = 2.0.0
# WebTest==2.0.11 # WebTest==2.0.11
beautifulsoup4 = 4.3.2 beautifulsoup4 = 4.3.2
# Required by:
# grequests==0.2.0
gevent = 1.0
# Required by:
# gevent==1.0
greenlet = 0.4.2
# Required by:
# searx==0.1
grequests = 0.2.0
# Required by: # Required by:
# robotframework-httplibrary==0.4.2 # robotframework-httplibrary==0.4.2
jsonpatch = 1.3 jsonpatch = 1.3