diff --git a/sources/AUTHORS.rst b/sources/AUTHORS.rst
index 906a0bf..ef1ae78 100644
--- a/sources/AUTHORS.rst
+++ b/sources/AUTHORS.rst
@@ -27,3 +27,5 @@ generally made searx better:
- Martin Zimmermann
- @courgette
- @kernc
+- @Cqoicebordel
+- @Reventl0v
diff --git a/sources/Makefile b/sources/Makefile
index 54f8a10..f9882e5 100644
--- a/sources/Makefile
+++ b/sources/Makefile
@@ -20,6 +20,7 @@ $(python):
tests: .installed.cfg
@bin/test
+ @grunt test --gruntfile searx/static/oscar/gruntfile.js
robot: .installed.cfg
@bin/robot
@@ -48,6 +49,9 @@ styles:
@lessc -x searx/static/oscar/less/bootstrap/bootstrap.less > searx/static/oscar/css/bootstrap.min.css
@lessc -x searx/static/oscar/less/oscar/oscar.less > searx/static/oscar/css/oscar.min.css
+grunt:
+ @grunt --gruntfile searx/static/oscar/gruntfile.js
+
locales:
@pybabel compile -d searx/translations
diff --git a/sources/requirements.txt b/sources/requirements.txt
index 88c1bc7..07b53d2 100644
--- a/sources/requirements.txt
+++ b/sources/requirements.txt
@@ -1,6 +1,6 @@
flask
flask-babel
-grequests
+requests
lxml
pyyaml
python-dateutil
diff --git a/sources/searx/engines/bing.py b/sources/searx/engines/bing.py
index 56c6b36..5de461c 100644
--- a/sources/searx/engines/bing.py
+++ b/sources/searx/engines/bing.py
@@ -1,8 +1,9 @@
## Bing (Web)
-#
+#
# @website https://www.bing.com
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
-#
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
+# max. 5000 query/month
+#
# @using-api no (because of query limit)
# @results HTML (using search portal)
# @stable no (HTML can change)
@@ -58,8 +59,8 @@ def response(resp):
content = escape(' '.join(result.xpath('.//p//text()')))
# append result
- results.append({'url': url,
- 'title': title,
+ results.append({'url': url,
+ 'title': title,
'content': content})
# return results if something is found
@@ -74,8 +75,8 @@ def response(resp):
content = escape(' '.join(result.xpath('.//p//text()')))
# append result
- results.append({'url': url,
- 'title': title,
+ results.append({'url': url,
+ 'title': title,
'content': content})
# return results
diff --git a/sources/searx/engines/bing_images.py b/sources/searx/engines/bing_images.py
index b3eabba..6c5e49b 100644
--- a/sources/searx/engines/bing_images.py
+++ b/sources/searx/engines/bing_images.py
@@ -1,17 +1,19 @@
## Bing (Images)
-#
+#
# @website https://www.bing.com/images
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
-#
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
+# max. 5000 query/month
+#
# @using-api no (because of query limit)
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, img_src
#
-# @todo currently there are up to 35 images receive per page, because bing does not parse count=10. limited response to 10 images
+# @todo currently there are up to 35 images receive per page,
+# because bing does not parse count=10.
+# limited response to 10 images
from urllib import urlencode
-from cgi import escape
from lxml import html
from yaml import load
import re
@@ -51,15 +53,15 @@ def response(resp):
dom = html.fromstring(resp.content)
# init regex for yaml-parsing
- p = re.compile( '({|,)([a-z]+):(")')
+ p = re.compile('({|,)([a-z]+):(")')
# parse results
for result in dom.xpath('//div[@class="dg_u"]'):
link = result.xpath('./a')[0]
# parse yaml-data (it is required to add a space, to make it parsable)
- yaml_data = load(p.sub( r'\1\2: \3', link.attrib.get('m')))
-
+ yaml_data = load(p.sub(r'\1\2: \3', link.attrib.get('m')))
+
title = link.attrib.get('t1')
#url = 'http://' + link.attrib.get('t3')
url = yaml_data.get('surl')
@@ -69,7 +71,7 @@ def response(resp):
results.append({'template': 'images.html',
'url': url,
'title': title,
- 'content': '',
+ 'content': '',
'img_src': img_src})
# TODO stop parsing if 10 images are found
diff --git a/sources/searx/engines/bing_news.py b/sources/searx/engines/bing_news.py
index 279f0d6..5dce4a2 100644
--- a/sources/searx/engines/bing_news.py
+++ b/sources/searx/engines/bing_news.py
@@ -1,8 +1,9 @@
## Bing (News)
-#
+#
# @website https://www.bing.com/news
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
-#
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
+# max. 5000 query/month
+#
# @using-api no (because of query limit)
# @results HTML (using search portal)
# @stable no (HTML can change)
@@ -57,12 +58,12 @@ def response(resp):
url = link.attrib.get('href')
title = ' '.join(link.xpath('.//text()'))
contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
- if contentXPath != None:
+ if contentXPath is not None:
content = escape(' '.join(contentXPath))
-
+
# parse publishedDate
publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
- if publishedDateXPath != None:
+ if publishedDateXPath is not None:
publishedDate = escape(' '.join(publishedDateXPath))
if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
@@ -89,10 +90,10 @@ def response(resp):
except TypeError:
# FIXME
publishedDate = datetime.now()
-
+
# append result
- results.append({'url': url,
- 'title': title,
+ results.append({'url': url,
+ 'title': title,
'publishedDate': publishedDate,
'content': content})
diff --git a/sources/searx/engines/currency_convert.py b/sources/searx/engines/currency_convert.py
index b5f0953..d8841c1 100644
--- a/sources/searx/engines/currency_convert.py
+++ b/sources/searx/engines/currency_convert.py
@@ -55,6 +55,6 @@ def response(resp):
resp.search_params['to'].lower()
)
- results.append({'answer' : answer, 'url': url})
+ results.append({'answer': answer, 'url': url})
return results
diff --git a/sources/searx/engines/dailymotion.py b/sources/searx/engines/dailymotion.py
index 75c2e50..a5bffa8 100644
--- a/sources/searx/engines/dailymotion.py
+++ b/sources/searx/engines/dailymotion.py
@@ -1,8 +1,8 @@
## Dailymotion (Videos)
-#
+#
# @website https://www.dailymotion.com
# @provide-api yes (http://www.dailymotion.com/developer)
-#
+#
# @using-api yes
# @results JSON
# @stable yes
@@ -12,7 +12,6 @@
from urllib import urlencode
from json import loads
-from lxml import html
# engine dependent config
categories = ['videos']
diff --git a/sources/searx/engines/deviantart.py b/sources/searx/engines/deviantart.py
index ff5e1d4..d436e81 100644
--- a/sources/searx/engines/deviantart.py
+++ b/sources/searx/engines/deviantart.py
@@ -1,8 +1,8 @@
## Deviantart (Images)
-#
+#
# @website https://www.deviantart.com/
# @provide-api yes (https://www.deviantart.com/developers/) (RSS)
-#
+#
# @using-api no (TODO, rewrite to api)
# @results HTML
# @stable no (HTML can change)
diff --git a/sources/searx/engines/duckduckgo.py b/sources/searx/engines/duckduckgo.py
index 296dd9b..583e33f 100644
--- a/sources/searx/engines/duckduckgo.py
+++ b/sources/searx/engines/duckduckgo.py
@@ -1,15 +1,17 @@
## DuckDuckGo (Web)
-#
+#
# @website https://duckduckgo.com/
-# @provide-api yes (https://duckduckgo.com/api), but not all results from search-site
-#
+# @provide-api yes (https://duckduckgo.com/api),
+# but not all results from search-site
+#
# @using-api no
# @results HTML (using search portal)
# @stable no (HTML can change)
# @parse url, title, content
#
# @todo rewrite to api
-# @todo language support (the current used site does not support language-change)
+# @todo language support
+# (the current used site does not support language-change)
from urllib import urlencode
from lxml.html import fromstring
@@ -37,7 +39,7 @@ def request(query, params):
if params['language'] == 'all':
locale = 'en-us'
else:
- locale = params['language'].replace('_','-').lower()
+ locale = params['language'].replace('_', '-').lower()
params['url'] = url.format(
query=urlencode({'q': query, 'kl': locale}),
diff --git a/sources/searx/engines/duckduckgo_definitions.py b/sources/searx/engines/duckduckgo_definitions.py
index c008f22..8f81d2c 100644
--- a/sources/searx/engines/duckduckgo_definitions.py
+++ b/sources/searx/engines/duckduckgo_definitions.py
@@ -3,21 +3,25 @@ from urllib import urlencode
from lxml import html
from searx.engines.xpath import extract_text
-url = 'https://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1&d=1'
+url = 'https://api.duckduckgo.com/'\
+ + '?{query}&format=json&pretty=0&no_redirect=1&d=1'
+
def result_to_text(url, text, htmlResult):
# TODO : remove result ending with "Meaning" or "Category"
dom = html.fromstring(htmlResult)
a = dom.xpath('//a')
- if len(a)>=1:
+ if len(a) >= 1:
return extract_text(a[0])
else:
return text
+
def html_to_text(htmlFragment):
dom = html.fromstring(htmlFragment)
return extract_text(dom)
+
def request(query, params):
# TODO add kl={locale}
params['url'] = url.format(query=urlencode({'q': query}))
@@ -38,16 +42,15 @@ def response(resp):
# add answer if there is one
answer = search_res.get('Answer', '')
if answer != '':
- results.append({ 'answer' : html_to_text(answer) })
+ results.append({'answer': html_to_text(answer)})
# add infobox
if 'Definition' in search_res:
- content = content + search_res.get('Definition', '')
+ content = content + search_res.get('Definition', '')
if 'Abstract' in search_res:
content = content + search_res.get('Abstract', '')
-
# image
image = search_res.get('Image', '')
image = None if image == '' else image
@@ -55,29 +58,35 @@ def response(resp):
# attributes
if 'Infobox' in search_res:
infobox = search_res.get('Infobox', None)
- if 'content' in infobox:
+ if 'content' in infobox:
for info in infobox.get('content'):
- attributes.append({'label': info.get('label'), 'value': info.get('value')})
+ attributes.append({'label': info.get('label'),
+ 'value': info.get('value')})
# urls
for ddg_result in search_res.get('Results', []):
if 'FirstURL' in ddg_result:
firstURL = ddg_result.get('FirstURL', '')
text = ddg_result.get('Text', '')
- urls.append({'title':text, 'url':firstURL})
- results.append({'title':heading, 'url': firstURL})
+ urls.append({'title': text, 'url': firstURL})
+ results.append({'title': heading, 'url': firstURL})
# related topics
for ddg_result in search_res.get('RelatedTopics', None):
if 'FirstURL' in ddg_result:
- suggestion = result_to_text(ddg_result.get('FirstURL', None), ddg_result.get('Text', None), ddg_result.get('Result', None))
+ suggestion = result_to_text(ddg_result.get('FirstURL', None),
+ ddg_result.get('Text', None),
+ ddg_result.get('Result', None))
if suggestion != heading:
results.append({'suggestion': suggestion})
elif 'Topics' in ddg_result:
suggestions = []
- relatedTopics.append({ 'name' : ddg_result.get('Name', ''), 'suggestions': suggestions })
+ relatedTopics.append({'name': ddg_result.get('Name', ''),
+ 'suggestions': suggestions})
for topic_result in ddg_result.get('Topics', []):
- suggestion = result_to_text(topic_result.get('FirstURL', None), topic_result.get('Text', None), topic_result.get('Result', None))
+ suggestion = result_to_text(topic_result.get('FirstURL', None),
+ topic_result.get('Text', None),
+ topic_result.get('Result', None))
if suggestion != heading:
suggestions.append(suggestion)
@@ -86,21 +95,26 @@ def response(resp):
if abstractURL != '':
# add as result ? problem always in english
infobox_id = abstractURL
- urls.append({'title': search_res.get('AbstractSource'), 'url': abstractURL})
+ urls.append({'title': search_res.get('AbstractSource'),
+ 'url': abstractURL})
# definition
definitionURL = search_res.get('DefinitionURL', '')
if definitionURL != '':
# add as result ? as answer ? problem always in english
infobox_id = definitionURL
- urls.append({'title': search_res.get('DefinitionSource'), 'url': definitionURL})
+ urls.append({'title': search_res.get('DefinitionSource'),
+ 'url': definitionURL})
# entity
entity = search_res.get('Entity', None)
- # TODO continent / country / department / location / waterfall / mountain range : link to map search, get weather, near by locations
+ # TODO continent / country / department / location / waterfall /
+ # mountain range :
+ # link to map search, get weather, near by locations
# TODO musician : link to music search
# TODO concert tour : ??
- # TODO film / actor / television / media franchise : links to IMDB / rottentomatoes (or scrap result)
+ # TODO film / actor / television / media franchise :
+ # links to IMDB / rottentomatoes (or scrap result)
# TODO music : link tu musicbrainz / last.fm
# TODO book : ??
# TODO artist / playwright : ??
@@ -114,24 +128,25 @@ def response(resp):
# TODO programming language : ??
# TODO file format : ??
- if len(heading)>0:
+ if len(heading) > 0:
# TODO get infobox.meta.value where .label='article_title'
- if image==None and len(attributes)==0 and len(urls)==1 and len(relatedTopics)==0 and len(content)==0:
+ if image is None and len(attributes) == 0 and len(urls) == 1 and\
+ len(relatedTopics) == 0 and len(content) == 0:
results.append({
- 'url': urls[0]['url'],
- 'title': heading,
- 'content': content
- })
+ 'url': urls[0]['url'],
+ 'title': heading,
+ 'content': content
+ })
else:
results.append({
- 'infobox': heading,
- 'id': infobox_id,
- 'entity': entity,
- 'content': content,
- 'img_src' : image,
- 'attributes': attributes,
- 'urls': urls,
- 'relatedTopics': relatedTopics
- })
+ 'infobox': heading,
+ 'id': infobox_id,
+ 'entity': entity,
+ 'content': content,
+ 'img_src': image,
+ 'attributes': attributes,
+ 'urls': urls,
+ 'relatedTopics': relatedTopics
+ })
return results
diff --git a/sources/searx/engines/dummy.py b/sources/searx/engines/dummy.py
index 5a2cdf6..c60b7a5 100644
--- a/sources/searx/engines/dummy.py
+++ b/sources/searx/engines/dummy.py
@@ -1,5 +1,5 @@
## Dummy
-#
+#
# @results empty array
# @stable yes
diff --git a/sources/searx/engines/faroo.py b/sources/searx/engines/faroo.py
index 8c69c5b..dada475 100644
--- a/sources/searx/engines/faroo.py
+++ b/sources/searx/engines/faroo.py
@@ -1,8 +1,8 @@
## Faroo (Web, News)
-#
+#
# @website http://www.faroo.com
# @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
-#
+#
# @using-api yes
# @results JSON
# @stable yes
@@ -24,9 +24,10 @@ api_key = None
url = 'http://www.faroo.com/'
search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
-search_category = {'general': 'web',
+search_category = {'general': 'web',
'news': 'news'}
+
# do search-request
def request(query, params):
offset = (params['pageno']-1) * number_of_results + 1
@@ -48,7 +49,7 @@ def request(query, params):
query=urlencode({'q': query}),
language=language,
categorie=categorie,
- api_key=api_key )
+ api_key=api_key)
# using searx User-Agent
params['headers']['User-Agent'] = searx_useragent()
@@ -101,7 +102,7 @@ def response(resp):
results.append({'template': 'images.html',
'url': result['url'],
'title': result['title'],
- 'content': result['kwic'],
+ 'content': result['kwic'],
'img_src': result['iurl']})
# return results
diff --git a/sources/searx/engines/generalfile.py b/sources/searx/engines/generalfile.py
index 11d8b69..b7d7162 100644
--- a/sources/searx/engines/generalfile.py
+++ b/sources/searx/engines/generalfile.py
@@ -1,8 +1,8 @@
## General Files (Files)
-#
+#
# @website http://www.general-files.org
# @provide-api no (nothing found)
-#
+#
# @using-api no (because nothing found)
# @results HTML (using search portal)
# @stable no (HTML can change)
diff --git a/sources/searx/engines/github.py b/sources/searx/engines/github.py
index 53fec02..a68aed1 100644
--- a/sources/searx/engines/github.py
+++ b/sources/searx/engines/github.py
@@ -1,8 +1,8 @@
## Github (It)
-#
+#
# @website https://github.com/
# @provide-api yes (https://developer.github.com/v3/)
-#
+#
# @using-api yes
# @results JSON
# @stable yes (using api)
diff --git a/sources/searx/engines/google.py b/sources/searx/engines/google.py
index 9dbe8b8..d4db82a 100644
--- a/sources/searx/engines/google.py
+++ b/sources/searx/engines/google.py
@@ -1,15 +1,15 @@
-## Google (Web)
-#
+# Google (Web)
+#
# @website https://www.google.com
# @provide-api yes (https://developers.google.com/custom-search/)
-#
+#
# @using-api no
# @results HTML
# @stable no (HTML can change)
# @parse url, title, content, suggestion
from urllib import urlencode
-from urlparse import unquote,urlparse,parse_qsl
+from urlparse import urlparse, parse_qsl
from lxml import html
from searx.engines.xpath import extract_text, extract_url
@@ -23,10 +23,13 @@ google_hostname = 'www.google.com'
search_path = '/search'
redirect_path = '/url'
images_path = '/images'
-search_url = 'https://' + google_hostname + search_path + '?{query}&start={offset}&gbv=1'
+search_url = ('https://' +
+ google_hostname +
+ search_path +
+ '?{query}&start={offset}&gbv=1')
# specific xpath variables
-results_xpath= '//li[@class="g"]'
+results_xpath = '//li[@class="g"]'
url_xpath = './/h3/a/@href'
title_xpath = './/h3'
content_xpath = './/span[@class="st"]'
@@ -36,15 +39,18 @@ images_xpath = './/div/a'
image_url_xpath = './@href'
image_img_src_xpath = './img/@src'
+
# remove google-specific tracking-url
def parse_url(url_string):
parsed_url = urlparse(url_string)
- if parsed_url.netloc in [google_hostname, ''] and parsed_url.path==redirect_path:
+ if (parsed_url.netloc in [google_hostname, '']
+ and parsed_url.path == redirect_path):
query = dict(parse_qsl(parsed_url.query))
return query['q']
else:
return url_string
+
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10
@@ -52,7 +58,7 @@ def request(query, params):
if params['language'] == 'all':
language = 'en'
else:
- language = params['language'].replace('_','-').lower()
+ language = params['language'].replace('_', '-').lower()
params['url'] = search_url.format(offset=offset,
query=urlencode({'q': query}))
@@ -74,19 +80,24 @@ def response(resp):
try:
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
parsed_url = urlparse(url)
- if parsed_url.netloc==google_hostname and parsed_url.path==search_path:
+ if (parsed_url.netloc == google_hostname
+ and parsed_url.path == search_path):
# remove the link to google news
continue
- if parsed_url.netloc==google_hostname and parsed_url.path==images_path:
- # images result
- results = results + parse_images(result)
+ # images result
+ if (parsed_url.netloc == google_hostname
+ and parsed_url.path == images_path):
+ # only thumbnail image provided,
+ # so skipping image results
+ # results = results + parse_images(result)
+ pass
else:
# normal result
content = extract_text(result.xpath(content_xpath)[0])
# append result
- results.append({'url': url,
- 'title': title,
+ results.append({'url': url,
+ 'title': title,
'content': content})
except:
continue
@@ -99,12 +110,13 @@ def response(resp):
# return results
return results
+
def parse_images(result):
results = []
for image in result.xpath(images_xpath):
url = parse_url(extract_text(image.xpath(image_url_xpath)[0]))
img_src = extract_text(image.xpath(image_img_src_xpath)[0])
-
+
# append result
results.append({'url': url,
'title': '',
diff --git a/sources/searx/engines/google_images.py b/sources/searx/engines/google_images.py
index 6c99f28..491f5c2 100644
--- a/sources/searx/engines/google_images.py
+++ b/sources/searx/engines/google_images.py
@@ -1,14 +1,15 @@
## Google (Images)
-#
+#
# @website https://www.google.com
-# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
-#
+# @provide-api yes (https://developers.google.com/web-search/docs/),
+# deprecated!
+#
# @using-api yes
# @results JSON
# @stable yes (but deprecated)
# @parse url, title, img_src
-from urllib import urlencode
+from urllib import urlencode,unquote
from json import loads
# engine dependent config
@@ -51,7 +52,7 @@ def response(resp):
results.append({'url': href,
'title': title,
'content': '',
- 'img_src': result['url'],
+ 'img_src': unquote(result['url']),
'template': 'images.html'})
# return results
diff --git a/sources/searx/engines/google_news.py b/sources/searx/engines/google_news.py
index becc7e2..eb114f9 100644
--- a/sources/searx/engines/google_news.py
+++ b/sources/searx/engines/google_news.py
@@ -1,8 +1,9 @@
## Google (News)
-#
+#
# @website https://www.google.com
-# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
-#
+# @provide-api yes (https://developers.google.com/web-search/docs/),
+# deprecated!
+#
# @using-api yes
# @results JSON
# @stable yes (but deprecated)
diff --git a/sources/searx/engines/openstreetmap.py b/sources/searx/engines/openstreetmap.py
index f727ca8..36b6011 100644
--- a/sources/searx/engines/openstreetmap.py
+++ b/sources/searx/engines/openstreetmap.py
@@ -39,16 +39,16 @@ def response(resp):
url = result_base_url.format(osm_type=osm_type,
osm_id=r['osm_id'])
- osm = {'type':osm_type,
- 'id':r['osm_id']}
+ osm = {'type': osm_type,
+ 'id': r['osm_id']}
- geojson = r.get('geojson')
+ geojson = r.get('geojson')
# if no geojson is found and osm_type is a node, add geojson Point
if not geojson and\
osm_type == 'node':
- geojson = {u'type':u'Point',
- u'coordinates':[r['lon'],r['lat']]}
+ geojson = {u'type': u'Point',
+ u'coordinates': [r['lon'], r['lat']]}
address_raw = r.get('address')
address = {}
@@ -59,20 +59,20 @@ def response(resp):
r['class'] == 'tourism' or\
r['class'] == 'leisure':
if address_raw.get('address29'):
- address = {'name':address_raw.get('address29')}
+ address = {'name': address_raw.get('address29')}
else:
- address = {'name':address_raw.get(r['type'])}
+ address = {'name': address_raw.get(r['type'])}
# add rest of adressdata, if something is already found
if address.get('name'):
- address.update({'house_number':address_raw.get('house_number'),
- 'road':address_raw.get('road'),
- 'locality':address_raw.get('city',
- address_raw.get('town',
- address_raw.get('village'))),
- 'postcode':address_raw.get('postcode'),
- 'country':address_raw.get('country'),
- 'country_code':address_raw.get('country_code')})
+ address.update({'house_number': address_raw.get('house_number'),
+ 'road': address_raw.get('road'),
+ 'locality': address_raw.get('city',
+ address_raw.get('town',
+ address_raw.get('village'))),
+ 'postcode': address_raw.get('postcode'),
+ 'country': address_raw.get('country'),
+ 'country_code': address_raw.get('country_code')})
else:
address = None
diff --git a/sources/searx/engines/piratebay.py b/sources/searx/engines/piratebay.py
index 9533b62..f6144fa 100644
--- a/sources/searx/engines/piratebay.py
+++ b/sources/searx/engines/piratebay.py
@@ -1,8 +1,8 @@
## Piratebay (Videos, Music, Files)
-#
+#
# @website https://thepiratebay.se
# @provide-api no (nothing found)
-#
+#
# @using-api no
# @results HTML (using search portal)
# @stable yes (HTML can change)
@@ -19,11 +19,11 @@ categories = ['videos', 'music', 'files']
paging = True
# search-url
-url = 'https://thepiratebay.se/'
+url = 'https://thepiratebay.cr/'
search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
# piratebay specific type-definitions
-search_types = {'files': '0',
+search_types = {'files': '0',
'music': '100',
'videos': '200'}
diff --git a/sources/searx/engines/soundcloud.py b/sources/searx/engines/soundcloud.py
index aebea23..390e7ca 100644
--- a/sources/searx/engines/soundcloud.py
+++ b/sources/searx/engines/soundcloud.py
@@ -1,8 +1,8 @@
## Soundcloud (Music)
-#
+#
# @website https://soundcloud.com
# @provide-api yes (https://developers.soundcloud.com/)
-#
+#
# @using-api yes
# @results JSON
# @stable yes
diff --git a/sources/searx/engines/stackoverflow.py b/sources/searx/engines/stackoverflow.py
index edbe74a..dcbb189 100644
--- a/sources/searx/engines/stackoverflow.py
+++ b/sources/searx/engines/stackoverflow.py
@@ -1,8 +1,8 @@
## Stackoverflow (It)
-#
+#
# @website https://stackoverflow.com/
# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
-#
+#
# @using-api no
# @results HTML
# @stable no (HTML can change)
@@ -50,8 +50,8 @@ def response(resp):
content = escape(' '.join(result.xpath(content_xpath)))
# append result
- results.append({'url': href,
- 'title': title,
+ results.append({'url': href,
+ 'title': title,
'content': content})
# return results
diff --git a/sources/searx/engines/twitter.py b/sources/searx/engines/twitter.py
index 8de7814..0689150 100644
--- a/sources/searx/engines/twitter.py
+++ b/sources/searx/engines/twitter.py
@@ -1,8 +1,8 @@
## Twitter (Social media)
-#
+#
# @website https://www.bing.com/news
# @provide-api yes (https://dev.twitter.com/docs/using-search)
-#
+#
# @using-api no
# @results HTML (using search portal)
# @stable no (HTML can change)
diff --git a/sources/searx/engines/vimeo.py b/sources/searx/engines/vimeo.py
index 2a91e76..c66c414 100644
--- a/sources/searx/engines/vimeo.py
+++ b/sources/searx/engines/vimeo.py
@@ -1,8 +1,9 @@
## Vimeo (Videos)
-#
+#
# @website https://vimeo.com/
-# @provide-api yes (http://developer.vimeo.com/api), they have a maximum count of queries/hour
-#
+# @provide-api yes (http://developer.vimeo.com/api),
+# they have a maximum count of queries/hour
+#
# @using-api no (TODO, rewrite to api)
# @results HTML (using search portal)
# @stable no (HTML can change)
@@ -35,11 +36,12 @@ publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
# do search-request
def request(query, params):
- params['url'] = search_url.format(pageno=params['pageno'] ,
+ params['url'] = search_url.format(pageno=params['pageno'],
query=urlencode({'q': query}))
# TODO required?
- params['cookies']['__utma'] = '00000000.000#0000000.0000000000.0000000000.0000000000.0'
+ params['cookies']['__utma'] =\
+ '00000000.000#0000000.0000000000.0000000000.0000000000.0'
return params
diff --git a/sources/searx/engines/wikidata.py b/sources/searx/engines/wikidata.py
index 7877e11..ab799e6 100644
--- a/sources/searx/engines/wikidata.py
+++ b/sources/searx/engines/wikidata.py
@@ -2,13 +2,25 @@ import json
from requests import get
from urllib import urlencode
-resultCount=1
-urlSearch = 'https://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectiontitle&{query}'
-urlDetail = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&props=labels%7Cinfo%7Csitelinks%7Csitelinks%2Furls%7Cdescriptions%7Cclaims&{query}'
-urlMap = 'https://www.openstreetmap.org/?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
+result_count = 1
+wikidata_host = 'https://www.wikidata.org'
+wikidata_api = wikidata_host + '/w/api.php'
+url_search = wikidata_api \
+ + '?action=query&list=search&format=json'\
+ + '&srnamespace=0&srprop=sectiontitle&{query}'
+url_detail = wikidata_api\
+ + '?action=wbgetentities&format=json'\
+ + '&props=labels%7Cinfo%7Csitelinks'\
+ + '%7Csitelinks%2Furls%7Cdescriptions%7Cclaims'\
+ + '&{query}'
+url_map = 'https://www.openstreetmap.org/'\
+ + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
+
def request(query, params):
- params['url'] = urlSearch.format(query=urlencode({'srsearch': query, 'srlimit': resultCount}))
+ params['url'] = url_search.format(
+ query=urlencode({'srsearch': query,
+ 'srlimit': result_count}))
return params
@@ -23,7 +35,8 @@ def response(resp):
language = resp.search_params['language'].split('_')[0]
if language == 'all':
language = 'en'
- url = urlDetail.format(query=urlencode({'ids': '|'.join(wikidata_ids), 'languages': language + '|en'}))
+ url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
+ 'languages': language + '|en'}))
htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content)
@@ -32,6 +45,7 @@ def response(resp):
return results
+
def getDetail(jsonresponse, wikidata_id, language):
results = []
urls = []
@@ -40,60 +54,103 @@ def getDetail(jsonresponse, wikidata_id, language):
result = jsonresponse.get('entities', {}).get(wikidata_id, {})
title = result.get('labels', {}).get(language, {}).get('value', None)
- if title == None:
+ if title is None:
title = result.get('labels', {}).get('en', {}).get('value', None)
- if title == None:
+ if title is None:
return results
- description = result.get('descriptions', {}).get(language, {}).get('value', None)
- if description == None:
- description = result.get('descriptions', {}).get('en', {}).get('value', '')
+ description = result\
+ .get('descriptions', {})\
+ .get(language, {})\
+ .get('value', None)
+
+ if description is None:
+ description = result\
+ .get('descriptions', {})\
+ .get('en', {})\
+ .get('value', '')
claims = result.get('claims', {})
official_website = get_string(claims, 'P856', None)
- if official_website != None:
- urls.append({ 'title' : 'Official site', 'url': official_website })
- results.append({ 'title': title, 'url' : official_website })
+ if official_website is not None:
+ urls.append({'title': 'Official site', 'url': official_website})
+ results.append({'title': title, 'url': official_website})
wikipedia_link_count = 0
if language != 'en':
- wikipedia_link_count += add_url(urls, 'Wikipedia (' + language + ')', get_wikilink(result, language + 'wiki'))
+ wikipedia_link_count += add_url(urls,
+ 'Wikipedia (' + language + ')',
+ get_wikilink(result, language +
+ 'wiki'))
wikipedia_en_link = get_wikilink(result, 'enwiki')
- wikipedia_link_count += add_url(urls, 'Wikipedia (en)', wikipedia_en_link)
+ wikipedia_link_count += add_url(urls,
+ 'Wikipedia (en)',
+ wikipedia_en_link)
if wikipedia_link_count == 0:
misc_language = get_wiki_firstlanguage(result, 'wiki')
- if misc_language != None:
- add_url(urls, 'Wikipedia (' + misc_language + ')', get_wikilink(result, misc_language + 'wiki'))
+ if misc_language is not None:
+ add_url(urls,
+ 'Wikipedia (' + misc_language + ')',
+ get_wikilink(result, misc_language + 'wiki'))
if language != 'en':
- add_url(urls, 'Wiki voyage (' + language + ')', get_wikilink(result, language + 'wikivoyage'))
- add_url(urls, 'Wiki voyage (en)', get_wikilink(result, 'enwikivoyage'))
+ add_url(urls,
+ 'Wiki voyage (' + language + ')',
+ get_wikilink(result, language + 'wikivoyage'))
+
+ add_url(urls,
+ 'Wiki voyage (en)',
+ get_wikilink(result, 'enwikivoyage'))
if language != 'en':
- add_url(urls, 'Wikiquote (' + language + ')', get_wikilink(result, language + 'wikiquote'))
- add_url(urls, 'Wikiquote (en)', get_wikilink(result, 'enwikiquote'))
+ add_url(urls,
+ 'Wikiquote (' + language + ')',
+ get_wikilink(result, language + 'wikiquote'))
- add_url(urls, 'Commons wiki', get_wikilink(result, 'commonswiki'))
+ add_url(urls,
+ 'Wikiquote (en)',
+ get_wikilink(result, 'enwikiquote'))
- add_url(urls, 'Location', get_geolink(claims, 'P625', None))
+ add_url(urls,
+ 'Commons wiki',
+ get_wikilink(result, 'commonswiki'))
- add_url(urls, 'Wikidata', 'https://www.wikidata.org/wiki/' + wikidata_id + '?uselang='+ language)
+ add_url(urls,
+ 'Location',
+ get_geolink(claims, 'P625', None))
+
+ add_url(urls,
+ 'Wikidata',
+ 'https://www.wikidata.org/wiki/'
+ + wikidata_id + '?uselang=' + language)
musicbrainz_work_id = get_string(claims, 'P435')
- if musicbrainz_work_id != None:
- add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/work/' + musicbrainz_work_id)
+ if musicbrainz_work_id is not None:
+ add_url(urls,
+ 'MusicBrainz',
+ 'http://musicbrainz.org/work/'
+ + musicbrainz_work_id)
musicbrainz_artist_id = get_string(claims, 'P434')
- if musicbrainz_artist_id != None:
- add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/artist/' + musicbrainz_artist_id)
+ if musicbrainz_artist_id is not None:
+ add_url(urls,
+ 'MusicBrainz',
+ 'http://musicbrainz.org/artist/'
+ + musicbrainz_artist_id)
musicbrainz_release_group_id = get_string(claims, 'P436')
- if musicbrainz_release_group_id != None:
- add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/release-group/' + musicbrainz_release_group_id)
+ if musicbrainz_release_group_id is not None:
+ add_url(urls,
+ 'MusicBrainz',
+ 'http://musicbrainz.org/release-group/'
+ + musicbrainz_release_group_id)
musicbrainz_label_id = get_string(claims, 'P966')
- if musicbrainz_label_id != None:
- add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/label/' + musicbrainz_label_id)
+ if musicbrainz_label_id is not None:
+ add_url(urls,
+ 'MusicBrainz',
+ 'http://musicbrainz.org/label/'
+ + musicbrainz_label_id)
# musicbrainz_area_id = get_string(claims, 'P982')
# P1407 MusicBrainz series ID
@@ -102,42 +159,43 @@ def getDetail(jsonresponse, wikidata_id, language):
# P1407 MusicBrainz series ID
postal_code = get_string(claims, 'P281', None)
- if postal_code != None:
- attributes.append({'label' : 'Postal code(s)', 'value' : postal_code})
+ if postal_code is not None:
+ attributes.append({'label': 'Postal code(s)', 'value': postal_code})
date_of_birth = get_time(claims, 'P569', None)
- if date_of_birth != None:
- attributes.append({'label' : 'Date of birth', 'value' : date_of_birth})
+ if date_of_birth is not None:
+ attributes.append({'label': 'Date of birth', 'value': date_of_birth})
date_of_death = get_time(claims, 'P570', None)
- if date_of_death != None:
- attributes.append({'label' : 'Date of death', 'value' : date_of_death})
+ if date_of_death is not None:
+ attributes.append({'label': 'Date of death', 'value': date_of_death})
- if len(attributes)==0 and len(urls)==2 and len(description)==0:
+ if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
results.append({
- 'url': urls[0]['url'],
- 'title': title,
- 'content': description
- })
+ 'url': urls[0]['url'],
+ 'title': title,
+ 'content': description
+ })
else:
results.append({
- 'infobox' : title,
- 'id' : wikipedia_en_link,
- 'content' : description,
- 'attributes' : attributes,
- 'urls' : urls
- })
+ 'infobox': title,
+ 'id': wikipedia_en_link,
+ 'content': description,
+ 'attributes': attributes,
+ 'urls': urls
+ })
return results
def add_url(urls, title, url):
- if url != None:
- urls.append({'title' : title, 'url' : url})
+ if url is not None:
+ urls.append({'title': title, 'url': url})
return 1
else:
return 0
+
def get_mainsnak(claims, propertyName):
propValue = claims.get(propertyName, {})
if len(propValue) == 0:
@@ -157,7 +215,7 @@ def get_string(claims, propertyName, defaultValue=None):
mainsnak = e.get('mainsnak', {})
datavalue = mainsnak.get('datavalue', {})
- if datavalue != None:
+ if datavalue is not None:
result.append(datavalue.get('value', ''))
if len(result) == 0:
@@ -177,7 +235,7 @@ def get_time(claims, propertyName, defaultValue=None):
mainsnak = e.get('mainsnak', {})
datavalue = mainsnak.get('datavalue', {})
- if datavalue != None:
+ if datavalue is not None:
value = datavalue.get('value', '')
result.append(value.get('time', ''))
@@ -190,7 +248,7 @@ def get_time(claims, propertyName, defaultValue=None):
def get_geolink(claims, propertyName, defaultValue=''):
mainsnak = get_mainsnak(claims, propertyName)
- if mainsnak == None:
+ if mainsnak is None:
return defaultValue
datatype = mainsnak.get('datatype', '')
@@ -209,21 +267,25 @@ def get_geolink(claims, propertyName, defaultValue=''):
# 1 --> 6
# 0.016666666666667 --> 9
# 0.00027777777777778 --> 19
- # wolframalpha : quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
+ # wolframalpha :
+ # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
# 14.1186-8.8322 x+0.625447 x^2
if precision < 0.0003:
zoom = 19
else:
zoom = int(15 - precision*8.8322 + precision*precision*0.625447)
- url = urlMap.replace('{latitude}', str(value.get('latitude',0))).replace('{longitude}', str(value.get('longitude',0))).replace('{zoom}', str(zoom))
+ url = url_map\
+ .replace('{latitude}', str(value.get('latitude', 0)))\
+ .replace('{longitude}', str(value.get('longitude', 0)))\
+ .replace('{zoom}', str(zoom))
return url
def get_wikilink(result, wikiid):
url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
- if url == None:
+ if url is None:
return url
elif url.startswith('http://'):
url = url.replace('http://', 'https://')
@@ -231,8 +293,9 @@ def get_wikilink(result, wikiid):
url = 'https:' + url
return url
+
def get_wiki_firstlanguage(result, wikipatternid):
for k in result.get('sitelinks', {}).keys():
- if k.endswith(wikipatternid) and len(k)==(2+len(wikipatternid)):
+ if k.endswith(wikipatternid) and len(k) == (2+len(wikipatternid)):
return k[0:2]
return None
diff --git a/sources/searx/engines/yacy.py b/sources/searx/engines/yacy.py
index 2345b24..3ee0e91 100644
--- a/sources/searx/engines/yacy.py
+++ b/sources/searx/engines/yacy.py
@@ -1,8 +1,9 @@
## Yacy (Web, Images, Videos, Music, Files)
-#
+#
# @website http://yacy.net
-# @provide-api yes (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
-#
+# @provide-api yes
+# (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
+#
# @using-api yes
# @results JSON
# @stable yes
@@ -16,7 +17,7 @@ from urllib import urlencode
from dateutil import parser
# engine dependent config
-categories = ['general', 'images'] #TODO , 'music', 'videos', 'files'
+categories = ['general', 'images'] # TODO , 'music', 'videos', 'files'
paging = True
language_support = True
number_of_results = 5
@@ -28,7 +29,7 @@ search_url = '/yacysearch.json?{query}&startRecord={offset}&maximumRecords={limi
# yacy specific type-definitions
search_types = {'general': 'text',
'images': 'image',
- 'files': 'app',
+ 'files': 'app',
'music': 'audio',
'videos': 'video'}
diff --git a/sources/searx/engines/yahoo.py b/sources/searx/engines/yahoo.py
index 5e34a2b..938540e 100644
--- a/sources/searx/engines/yahoo.py
+++ b/sources/searx/engines/yahoo.py
@@ -1,8 +1,9 @@
## Yahoo (Web)
-#
+#
# @website https://search.yahoo.com/web
-# @provide-api yes (https://developer.yahoo.com/boss/search/), $0.80/1000 queries
-#
+# @provide-api yes (https://developer.yahoo.com/boss/search/),
+# $0.80/1000 queries
+#
# @using-api no (because pricing)
# @results HTML (using search portal)
# @stable no (HTML can change)
@@ -40,8 +41,8 @@ def parse_url(url_string):
if endpos > -1:
endpositions.append(endpos)
- if start==0 or len(endpositions) == 0:
- return url_string
+ if start == 0 or len(endpositions) == 0:
+ return url_string
else:
end = min(endpositions)
return unquote(url_string[start:end])
@@ -84,8 +85,8 @@ def response(resp):
content = extract_text(result.xpath(content_xpath)[0])
# append result
- results.append({'url': url,
- 'title': title,
+ results.append({'url': url,
+ 'title': title,
'content': content})
# if no suggestion found, return results
diff --git a/sources/searx/engines/youtube.py b/sources/searx/engines/youtube.py
index 7d1c207..973e799 100644
--- a/sources/searx/engines/youtube.py
+++ b/sources/searx/engines/youtube.py
@@ -1,8 +1,8 @@
## Youtube (Videos)
-#
+#
# @website https://www.youtube.com/
# @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
-#
+#
# @using-api yes
# @results JSON
# @stable yes
diff --git a/sources/searx/search.py b/sources/searx/search.py
index 0e7aaed..5b5cc62 100644
--- a/sources/searx/search.py
+++ b/sources/searx/search.py
@@ -15,7 +15,8 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2013- by Adam Tauber, could not load data! could not load data! Searx is a metasearch engine,
inspired by the seeks project. Searx appreciates your concern regarding logs, so take the code and run it yourself! "}$(g).html(c),$(g).removeClass("hidden"),$(h).addClass("hidden")}}).fail(function(){$(h).html($(h).html()+'"+d+" ",d){case"phone":case"fax":c+=''+b.tags[d]+"";break;case"email":c+=''+b.tags[d]+"";break;case"website":case"url":c+=''+b.tags[d]+"";break;case"wikidata":c+=''+b.tags[d]+"";break;case"wikipedia":if(-1!=b.tags[d].indexOf(":")){c+=''+b.tags[d]+"";break}default:c+=b.tags[d]}c+=" ',m=new L.TileLayer(k,{minZoom:1,maxZoom:18,subdomains:"1234",attribution:l}),n="http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg",o='Map data © OpenStreetMap contributors | Tiles Courtesy of MapQuest
| Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';new L.TileLayer(n,{minZoom:1,maxZoom:11,subdomains:"1234",attribution:o})}map_bounds?setTimeout(function(){a.fitBounds(map_bounds,{maxZoom:17})},0):c&&d&&(e?a.setView(new L.LatLng(d,c),e):a.setView(new L.LatLng(d,c),8)),a.addLayer(m);var p={"OSM Mapnik":j,MapQuest:m};L.control.layers(p).addTo(a),g&&L.geoJson(g).addTo(a)}),$(this).off(a)})});
\ No newline at end of file
diff --git a/sources/searx/static/oscar/js/searx_src/00_requirejs_config.js b/sources/searx/static/oscar/js/searx_src/00_requirejs_config.js
new file mode 100644
index 0000000..3676784
--- /dev/null
+++ b/sources/searx/static/oscar/js/searx_src/00_requirejs_config.js
@@ -0,0 +1,23 @@
+/**
+ * searx is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * searx is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with searx. If not, see < http://www.gnu.org/licenses/ >.
+ *
+ * (C) 2014 by Thomas Pointhuber,
";
+ }
+ }
+ $(result_table).html(newHtml);
+ $(result_table).removeClass('hidden');
+ $(result_table_loadicon).addClass('hidden');
+ }
+ })
+ .fail(function() {
+ $(result_table_loadicon).html($(result_table_loadicon).html() + "" + row + " ";
+ switch(row) {
+ case "phone":
+ case "fax":
+ newHtml += "" + element.tags[row] + "";
+ break;
+ case "email":
+ newHtml += "" + element.tags[row] + "";
+ break;
+ case "website":
+ case "url":
+ newHtml += "" + element.tags[row] + "";
+ break;
+ case "wikidata":
+ newHtml += "" + element.tags[row] + "";
+ break;
+ case "wikipedia":
+ if(element.tags[row].indexOf(":") != -1) {
+ newHtml += "" + element.tags[row] + "";
+ break;
+ }
+ /* jshint ignore:start */
+ default:
+ /* jshint ignore:end */
+ newHtml += element.tags[row];
+ break;
+ }
+ newHtml += " ';
+ var osmMapquest = new L.TileLayer(osmMapquestUrl, {minZoom: 1, maxZoom: 18, subdomains: '1234', attribution: osmMapquestAttrib});
+
+ var osmMapquestOpenAerialUrl='http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg';
+ var osmMapquestOpenAerialAttrib='Map data © OpenStreetMap contributors | Tiles Courtesy of MapQuest
| Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';
+ var osmMapquestOpenAerial = new L.TileLayer(osmMapquestOpenAerialUrl, {minZoom: 1, maxZoom: 11, subdomains: '1234', attribution: osmMapquestOpenAerialAttrib});
+
+ // init map view
+ if(map_bounds) {
+ // TODO hack: https://github.com/Leaflet/Leaflet/issues/2021
+ setTimeout(function () {
+ map.fitBounds(map_bounds, {
+ maxZoom:17
+ });
+ }, 0);
+ } else if (map_lon && map_lat) {
+ if(map_zoom)
+ map.setView(new L.LatLng(map_lat, map_lon),map_zoom);
+ else
+ map.setView(new L.LatLng(map_lat, map_lon),8);
+ }
+
+ map.addLayer(osmMapquest);
+
+ var baseLayers = {
+ "OSM Mapnik": osmMapnik,
+ "MapQuest": osmMapquest/*,
+ "MapQuest Open Aerial": osmMapquestOpenAerial*/
+ };
+
+ L.control.layers(baseLayers).addTo(map);
+
+
+ if(map_geojson)
+ L.geoJson(map_geojson).addTo(map);
+ /*else if(map_bounds)
+ L.rectangle(map_bounds, {color: "#ff7800", weight: 3, fill:false}).addTo(map);*/
+ });
+
+ // this event occour only once per element
+ $( this ).off( event );
+ });
+});
diff --git a/sources/searx/static/oscar/package.json b/sources/searx/static/oscar/package.json
new file mode 100644
index 0000000..945b794
--- /dev/null
+++ b/sources/searx/static/oscar/package.json
@@ -0,0 +1,15 @@
+{
+ "devDependencies": {
+ "grunt": "~0.4.5",
+ "grunt-contrib-uglify": "~0.6.0",
+ "grunt-contrib-watch" : "~0.6.1",
+ "grunt-contrib-concat" : "~0.5.0",
+ "grunt-contrib-jshint" : "~0.10.0"
+ },
+
+ "scripts": {
+ "build": "npm install && grunt",
+ "start": "grunt watch",
+ "test": "grunt"
+ }
+}
diff --git a/sources/searx/templates/default/about.html b/sources/searx/templates/default/about.html
index 19aba19..01d6f44 100644
--- a/sources/searx/templates/default/about.html
+++ b/sources/searx/templates/default/about.html
@@ -19,14 +19,14 @@
-It provides basic privacy by mixing your queries with searches on other platforms without storing search data. Queries are made using a POST request on every browser (except chrome*). Therefore they show up in neither our logs, nor your url history. In case of Chrome* users there is an exception, Searx uses the search bar to perform GET requests.
+It provides basic privacy by mixing your queries with searches on other platforms without storing search data. Queries are made using a POST request on every browser (except chrome*). Therefore they show up in neither our logs, nor your url history. In case of Chrome* users there is an exception, if Searx used from the search bar it performs GET requests.
Searx can be added to your browser's search bar; moreover, it can be set as the default search engine.
How can I make it my own?
Add your Searx to this list to help other people reclaim their privacy and make the Internet freer!
-
The more decentralized the Internet, is the more freedom we have!
The more decentralized Internet is the more freedom we have!