From c2d95ecba4bab16f67848bf3a8469abe6f7da0e6 Mon Sep 17 00:00:00 2001 From: Adrien Beudin Date: Tue, 13 Jan 2015 16:40:30 +0100 Subject: [PATCH] update version 0.6 --- sources/CHANGELOG.rst | 26 ++++ sources/searx/engines/500px.py | 57 ++++++++ sources/searx/engines/__init__.py | 23 ++- sources/searx/engines/bing_news.py | 11 +- sources/searx/engines/faroo.py | 15 +- sources/searx/engines/flickr-noapi.py | 89 ++++++++++++ sources/searx/engines/flickr.py | 86 ++++++++---- sources/searx/engines/google_images.py | 2 +- sources/searx/engines/kickass.py | 87 ++++++++++++ sources/searx/engines/mediawiki.py | 11 +- sources/searx/engines/openstreetmap.py | 14 +- sources/searx/engines/photon.py | 132 ++++++++++++++++++ sources/searx/engines/searchcode_code.py | 65 +++++++++ sources/searx/engines/searchcode_doc.py | 49 +++++++ sources/searx/engines/soundcloud.py | 7 +- sources/searx/engines/subtitleseeker.py | 74 ++++++++++ sources/searx/engines/yacy.py | 29 ++-- sources/searx/engines/yahoo.py | 9 +- sources/searx/https_rewrite.py | 68 ++++++++- sources/searx/https_rules/Soundcloud.xml | 2 +- sources/searx/search.py | 127 +++++++++-------- sources/searx/settings.yml | 37 ++++- .../static/courgette/img/icon_kickass.ico | Bin 0 -> 1150 bytes .../searx/static/default/img/icon_kickass.ico | Bin 0 -> 1150 bytes .../searx/static/oscar/img/icons/kickass.png | Bin 0 -> 4527 bytes sources/searx/static/oscar/js/searx.min.js | 4 +- .../oscar/js/searx_src/00_requirejs_config.js | 2 +- .../oscar/js/searx_src/element_modifiers.js | 21 +++ .../static/oscar/js/searx_src/leaflet_map.js | 2 +- .../courgette/result_templates/default.html | 4 +- .../courgette/result_templates/images.html | 2 +- .../courgette/result_templates/map.html | 4 +- .../courgette/result_templates/videos.html | 8 +- .../searx/templates/courgette/results.html | 6 +- sources/searx/templates/default/infobox.html | 2 +- .../default/result_templates/default.html | 2 +- .../default/result_templates/images.html | 2 +- .../default/result_templates/map.html | 4 +- .../default/result_templates/torrent.html | 5 +- .../default/result_templates/videos.html | 6 +- sources/searx/templates/default/results.html | 2 +- .../oscar/result_templates/default.html | 2 +- .../oscar/result_templates/images.html | 2 +- .../templates/oscar/result_templates/map.html | 2 +- .../oscar/result_templates/torrent.html | 2 +- .../oscar/result_templates/videos.html | 4 +- sources/searx/tests/test_webapp.py | 2 +- .../translations/de/LC_MESSAGES/messages.mo | Bin 5181 -> 5436 bytes .../translations/de/LC_MESSAGES/messages.po | 92 ++++++------ .../translations/en/LC_MESSAGES/messages.mo | Bin 4814 -> 5000 bytes .../translations/es/LC_MESSAGES/messages.mo | Bin 5009 -> 5195 bytes .../translations/fr/LC_MESSAGES/messages.mo | Bin 5294 -> 5496 bytes .../translations/fr/LC_MESSAGES/messages.po | 62 ++++---- .../translations/hu/LC_MESSAGES/messages.mo | Bin 4936 -> 5168 bytes .../translations/hu/LC_MESSAGES/messages.po | 122 ++++++++-------- .../translations/it/LC_MESSAGES/messages.mo | Bin 4963 -> 5149 bytes .../translations/ja/LC_MESSAGES/messages.mo | Bin 4855 -> 5041 bytes .../translations/nl/LC_MESSAGES/messages.mo | Bin 4926 -> 5198 bytes .../translations/nl/LC_MESSAGES/messages.po | 126 ++++++++--------- sources/searx/version.py | 2 +- sources/searx/webapp.py | 75 ++-------- 61 files changed, 1141 insertions(+), 448 deletions(-) create mode 100644 sources/CHANGELOG.rst create mode 100644 sources/searx/engines/500px.py create mode 100644 sources/searx/engines/flickr-noapi.py create mode 100644 sources/searx/engines/kickass.py create mode 100644 sources/searx/engines/photon.py create mode 100644 sources/searx/engines/searchcode_code.py create mode 100644 sources/searx/engines/searchcode_doc.py create mode 100644 sources/searx/engines/subtitleseeker.py create mode 100644 sources/searx/static/courgette/img/icon_kickass.ico create mode 100644 sources/searx/static/default/img/icon_kickass.ico create mode 100644 sources/searx/static/oscar/img/icons/kickass.png diff --git a/sources/CHANGELOG.rst b/sources/CHANGELOG.rst new file mode 100644 index 0000000..efdfdde --- /dev/null +++ b/sources/CHANGELOG.rst @@ -0,0 +1,26 @@ +0.6.0 +===== + +- Changelog added +- New engines + - Flickr (api) + - Subtitleseeker + - photon + - 500px + - Searchcode + - Searchcode doc + - Kickass torrent +- Precise search request timeout handling +- Better favicon support +- Stricter config parsing +- Translation updates +- Multiple ui fixes +- Flickr (noapi) engine fix +- Pep8 fixes + + +News +~~~~ + +Health status of searx instances and engines: http://stats.searx.oe5tpo.com +(source: https://github.com/pointhi/searx_stats) diff --git a/sources/searx/engines/500px.py b/sources/searx/engines/500px.py new file mode 100644 index 0000000..5d53af3 --- /dev/null +++ b/sources/searx/engines/500px.py @@ -0,0 +1,57 @@ +## 500px (Images) +# +# @website https://500px.com +# @provide-api yes (https://developers.500px.com/) +# +# @using-api no +# @results HTML +# @stable no (HTML can change) +# @parse url, title, thumbnail, img_src, content +# +# @todo rewrite to api + + +from urllib import urlencode +from urlparse import urljoin +from lxml import html + +# engine dependent config +categories = ['images'] +paging = True + +# search-url +base_url = 'https://500px.com' +search_url = base_url+'/search?search?page={pageno}&type=photos&{query}' + + +# do search-request +def request(query, params): + params['url'] = search_url.format(pageno=params['pageno'], + query=urlencode({'q': query})) + + return params + + +# get response from search-request +def response(resp): + results = [] + + dom = html.fromstring(resp.text) + + # parse results + for result in dom.xpath('//div[@class="photo"]'): + link = result.xpath('.//a')[0] + url = urljoin(base_url, link.attrib.get('href')) + title = result.xpath('.//div[@class="title"]//text()')[0] + img_src = link.xpath('.//img')[0].attrib['src'] + content = result.xpath('.//div[@class="info"]//text()')[0] + + # append result + results.append({'url': url, + 'title': title, + 'img_src': img_src, + 'content': content, + 'template': 'images.html'}) + + # return results + return results diff --git a/sources/searx/engines/__init__.py b/sources/searx/engines/__init__.py index 80356a8..d42339a 100644 --- a/sources/searx/engines/__init__.py +++ b/sources/searx/engines/__init__.py @@ -41,11 +41,8 @@ def load_module(filename): module.name = modname return module -if 'engines' not in settings or not settings['engines']: - print '[E] Error no engines found. Edit your settings.yml' - exit(2) -for engine_data in settings['engines']: +def load_engine(engine_data): engine_name = engine_data['engine'] engine = load_module(engine_name + '.py') @@ -84,10 +81,10 @@ for engine_data in settings['engines']: if engine_attr.startswith('_'): continue if getattr(engine, engine_attr) is None: - print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr) # noqa + print('[E] Engine config error: Missing attribute "{0}.{1}"'\ + .format(engine.name, engine_attr)) sys.exit(1) - engines[engine.name] = engine engine.stats = { 'result_count': 0, 'search_count': 0, @@ -104,7 +101,12 @@ for engine_data in settings['engines']: if engine.shortcut: # TODO check duplications + if engine.shortcut in engine_shortcuts: + print('[E] Engine config error: ambigious shortcut: {0}'\ + .format(engine.shortcut)) + sys.exit(1) engine_shortcuts[engine.shortcut] = engine.name + return engine def get_engines_stats(): @@ -194,3 +196,12 @@ def get_engines_stats(): sorted(errors, key=itemgetter('avg'), reverse=True) ), ] + + +if 'engines' not in settings or not settings['engines']: + print '[E] Error no engines found. Edit your settings.yml' + exit(2) + +for engine_data in settings['engines']: + engine = load_engine(engine_data) + engines[engine.name] = engine diff --git a/sources/searx/engines/bing_news.py b/sources/searx/engines/bing_news.py index 5dce4a2..3dda04c 100644 --- a/sources/searx/engines/bing_news.py +++ b/sources/searx/engines/bing_news.py @@ -57,12 +57,16 @@ def response(resp): link = result.xpath('.//div[@class="newstitle"]/a')[0] url = link.attrib.get('href') title = ' '.join(link.xpath('.//text()')) - contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()') + contentXPath = result.xpath('.//div[@class="sn_txt"]/div' + '//span[@class="sn_snip"]//text()') if contentXPath is not None: content = escape(' '.join(contentXPath)) # parse publishedDate - publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()') + publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div' + '//span[contains(@class,"sn_ST")]' + '//span[contains(@class,"sn_tm")]' + '//text()') if publishedDateXPath is not None: publishedDate = escape(' '.join(publishedDateXPath)) @@ -74,7 +78,8 @@ def response(resp): timeNumbers = re.findall(r'\d+', publishedDate) publishedDate = datetime.now()\ - timedelta(hours=int(timeNumbers[0])) - elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate): + elif re.match("^[0-9]+ hour(s|)," + " [0-9]+ minute(s|) ago$", publishedDate): timeNumbers = re.findall(r'\d+', publishedDate) publishedDate = datetime.now()\ - timedelta(hours=int(timeNumbers[0]))\ diff --git a/sources/searx/engines/faroo.py b/sources/searx/engines/faroo.py index dada475..5360ea1 100644 --- a/sources/searx/engines/faroo.py +++ b/sources/searx/engines/faroo.py @@ -22,10 +22,17 @@ api_key = None # search-url url = 'http://www.faroo.com/' -search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}' +search_url = url + 'api?{query}'\ + '&start={offset}'\ + '&length={number_of_results}'\ + '&l={language}'\ + '&src={categorie}'\ + '&i=false'\ + '&f=json'\ + '&key={api_key}' # noqa search_category = {'general': 'web', - 'news': 'news'} + 'news': 'news'} # do search-request @@ -80,8 +87,8 @@ def response(resp): # parse results for result in search_res['results']: if result['news']: - # timestamp (how many milliseconds have passed between now and the beginning of 1970) - publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) + # timestamp (milliseconds since 1970) + publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) # noqa # append news result results.append({'url': result['url'], diff --git a/sources/searx/engines/flickr-noapi.py b/sources/searx/engines/flickr-noapi.py new file mode 100644 index 0000000..522503b --- /dev/null +++ b/sources/searx/engines/flickr-noapi.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +# Flickr (Images) +# +# @website https://www.flickr.com +# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html) +# +# @using-api no +# @results HTML +# @stable no +# @parse url, title, thumbnail, img_src + +from urllib import urlencode +from json import loads +import re + +categories = ['images'] + +url = 'https://secure.flickr.com/' +search_url = url+'search/?{query}&page={page}' +photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}' +regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL) +image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's') + +paging = True + + +def build_flickr_url(user_id, photo_id): + return photo_url.format(userid=user_id, photoid=photo_id) + + +def request(query, params): + params['url'] = search_url.format(query=urlencode({'text': query}), + page=params['pageno']) + return params + + +def response(resp): + results = [] + + matches = regex.search(resp.text) + + if matches is None: + return results + + match = matches.group(1) + search_results = loads(match) + + if '_data' not in search_results: + return [] + + photos = search_results['_data'] + + for photo in photos: + + # In paged configuration, the first pages' photos are represented by a None object + if photo is None: + continue + + img_src = None + # From the biggest to the lowest format + for image_size in image_sizes: + if image_size in photo['sizes']: + img_src = photo['sizes'][image_size]['displayUrl'] + break + + if not img_src: + continue + + if 'id' not in photo['owner']: + continue + + url = build_flickr_url(photo['owner']['id'], photo['id']) + + title = photo['title'] + + content = '' + photo['owner']['username'] + '
' + + if 'description' in photo: + content = content + '' + photo['description'] + '' + + # append result + results.append({'url': url, + 'title': title, + 'img_src': img_src, + 'content': content, + 'template': 'images.html'}) + + return results diff --git a/sources/searx/engines/flickr.py b/sources/searx/engines/flickr.py index 4ec2841..2fa5ed7 100644 --- a/sources/searx/engines/flickr.py +++ b/sources/searx/engines/flickr.py @@ -1,54 +1,80 @@ #!/usr/bin/env python +## Flickr (Images) +# +# @website https://www.flickr.com +# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html) +# +# @using-api yes +# @results JSON +# @stable yes +# @parse url, title, thumbnail, img_src +#More info on api-key : https://www.flickr.com/services/apps/create/ + from urllib import urlencode -#from json import loads -from urlparse import urljoin -from lxml import html -from time import time +from json import loads categories = ['images'] -url = 'https://secure.flickr.com/' -search_url = url+'search/?{query}&page={page}' -results_xpath = '//div[@class="view display-item-tile"]/figure/div' +nb_per_page = 15 +paging = True +api_key= None + + +url = 'https://api.flickr.com/services/rest/?method=flickr.photos.search&api_key={api_key}&{text}&sort=relevance&extras=description%2C+owner_name%2C+url_o%2C+url_z&per_page={nb_per_page}&format=json&nojsoncallback=1&page={page}' +photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}' paging = True +def build_flickr_url(user_id, photo_id): + return photo_url.format(userid=user_id,photoid=photo_id) + def request(query, params): - params['url'] = search_url.format(query=urlencode({'text': query}), - page=params['pageno']) - time_string = str(int(time())-3) - params['cookies']['BX'] = '3oqjr6d9nmpgl&b=3&s=dh' - params['cookies']['xb'] = '421409' - params['cookies']['localization'] = 'en-us' - params['cookies']['flrbp'] = time_string +\ - '-3a8cdb85a427a33efda421fbda347b2eaf765a54' - params['cookies']['flrbs'] = time_string +\ - '-ed142ae8765ee62c9ec92a9513665e0ee1ba6776' - params['cookies']['flrb'] = '9' + params['url'] = url.format(text=urlencode({'text': query}), + api_key=api_key, + nb_per_page=nb_per_page, + page=params['pageno']) return params def response(resp): results = [] - dom = html.fromstring(resp.text) - for result in dom.xpath(results_xpath): - img = result.xpath('.//img') + + search_results = loads(resp.text) - if not img: + # return empty array if there are no results + if not 'photos' in search_results: + return [] + + if not 'photo' in search_results['photos']: + return [] + + photos = search_results['photos']['photo'] + + # parse results + for photo in photos: + if 'url_o' in photo: + img_src = photo['url_o'] + elif 'url_z' in photo: + img_src = photo['url_z'] + else: continue - img = img[0] - img_src = 'https:'+img.attrib.get('src') + url = build_flickr_url(photo['owner'], photo['id']) - if not img_src: - continue - - href = urljoin(url, result.xpath('.//a')[0].attrib.get('href')) - title = img.attrib.get('alt', '') - results.append({'url': href, + title = photo['title'] + + content = ''+ photo['ownername'] +'
' + + content = content + '' + photo['description']['_content'] + '' + + # append result + results.append({'url': url, 'title': title, 'img_src': img_src, + 'content': content, 'template': 'images.html'}) + + # return results return results diff --git a/sources/searx/engines/google_images.py b/sources/searx/engines/google_images.py index 491f5c2..79fac3f 100644 --- a/sources/searx/engines/google_images.py +++ b/sources/searx/engines/google_images.py @@ -9,7 +9,7 @@ # @stable yes (but deprecated) # @parse url, title, img_src -from urllib import urlencode,unquote +from urllib import urlencode, unquote from json import loads # engine dependent config diff --git a/sources/searx/engines/kickass.py b/sources/searx/engines/kickass.py new file mode 100644 index 0000000..f1fcd9e --- /dev/null +++ b/sources/searx/engines/kickass.py @@ -0,0 +1,87 @@ +## Kickass Torrent (Videos, Music, Files) +# +# @website https://kickass.so +# @provide-api no (nothing found) +# +# @using-api no +# @results HTML (using search portal) +# @stable yes (HTML can change) +# @parse url, title, content, seed, leech, magnetlink + +from urlparse import urljoin +from cgi import escape +from urllib import quote +from lxml import html +from operator import itemgetter + +# engine dependent config +categories = ['videos', 'music', 'files'] +paging = True + +# search-url +url = 'https://kickass.so/' +search_url = url + 'search/{search_term}/{pageno}/' + +# specific xpath variables +magnet_xpath = './/a[@title="Torrent magnet link"]' +#content_xpath = './/font[@class="detDesc"]//text()' + + +# do search-request +def request(query, params): + params['url'] = search_url.format(search_term=quote(query), + pageno=params['pageno']) + + # FIX: SSLError: hostname 'kickass.so' + # doesn't match either of '*.kickass.to', 'kickass.to' + params['verify'] = False + + return params + + +# get response from search-request +def response(resp): + results = [] + + dom = html.fromstring(resp.text) + + search_res = dom.xpath('//table[@class="data"]//tr') + + # return empty array if nothing is found + if not search_res: + return [] + + # parse results + for result in search_res[1:]: + link = result.xpath('.//a[@class="cellMainLink"]')[0] + href = urljoin(url, link.attrib['href']) + title = ' '.join(link.xpath('.//text()')) + content = escape(html.tostring(result.xpath('.//span[@class="font11px lightgrey block"]')[0], method="text")) + seed = result.xpath('.//td[contains(@class, "green")]/text()')[0] + leech = result.xpath('.//td[contains(@class, "red")]/text()')[0] + + # convert seed to int if possible + if seed.isdigit(): + seed = int(seed) + else: + seed = 0 + + # convert leech to int if possible + if leech.isdigit(): + leech = int(leech) + else: + leech = 0 + + magnetlink = result.xpath(magnet_xpath)[0].attrib['href'] + + # append result + results.append({'url': href, + 'title': title, + 'content': content, + 'seed': seed, + 'leech': leech, + 'magnetlink': magnetlink, + 'template': 'torrent.html'}) + + # return results sorted by seeder + return sorted(results, key=itemgetter('seed'), reverse=True) diff --git a/sources/searx/engines/mediawiki.py b/sources/searx/engines/mediawiki.py index 4a8b0e8..8ca32c6 100644 --- a/sources/searx/engines/mediawiki.py +++ b/sources/searx/engines/mediawiki.py @@ -28,15 +28,17 @@ search_url = base_url + 'w/api.php?action=query'\ '&srprop=timestamp'\ '&format=json'\ '&sroffset={offset}'\ - '&srlimit={limit}' + '&srlimit={limit}' # noqa # do search-request def request(query, params): offset = (params['pageno'] - 1) * number_of_results + string_args = dict(query=urlencode({'srsearch': query}), - offset=offset, - limit=number_of_results) + offset=offset, + limit=number_of_results) + format_strings = list(Formatter().parse(base_url)) if params['language'] == 'all': @@ -67,7 +69,8 @@ def response(resp): # parse results for result in search_results['query']['search']: - url = base_url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')) + url = base_url.format(language=resp.search_params['language']) +\ + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')) # append result results.append({'url': url, diff --git a/sources/searx/engines/openstreetmap.py b/sources/searx/engines/openstreetmap.py index 36b6011..68446ef 100644 --- a/sources/searx/engines/openstreetmap.py +++ b/sources/searx/engines/openstreetmap.py @@ -9,20 +9,24 @@ # @parse url, title from json import loads +from searx.utils import searx_useragent # engine dependent config categories = ['map'] paging = False # search-url -url = 'https://nominatim.openstreetmap.org/search/{query}?format=json&polygon_geojson=1&addressdetails=1' - +base_url = 'https://nominatim.openstreetmap.org/' +search_string = 'search/{query}?format=json&polygon_geojson=1&addressdetails=1' result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}' # do search-request def request(query, params): - params['url'] = url.format(query=query) + params['url'] = base_url + search_string.format(query=query) + + # using searx User-Agent + params['headers']['User-Agent'] = searx_useragent() return params @@ -68,8 +72,8 @@ def response(resp): address.update({'house_number': address_raw.get('house_number'), 'road': address_raw.get('road'), 'locality': address_raw.get('city', - address_raw.get('town', - address_raw.get('village'))), + address_raw.get('town', # noqa + address_raw.get('village'))), # noqa 'postcode': address_raw.get('postcode'), 'country': address_raw.get('country'), 'country_code': address_raw.get('country_code')}) diff --git a/sources/searx/engines/photon.py b/sources/searx/engines/photon.py new file mode 100644 index 0000000..16340d2 --- /dev/null +++ b/sources/searx/engines/photon.py @@ -0,0 +1,132 @@ +## Photon (Map) +# +# @website https://photon.komoot.de +# @provide-api yes (https://photon.komoot.de/) +# +# @using-api yes +# @results JSON +# @stable yes +# @parse url, title + +from urllib import urlencode +from json import loads +from searx.utils import searx_useragent + +# engine dependent config +categories = ['map'] +paging = False +language_support = True +number_of_results = 10 + +# search-url +base_url = 'https://photon.komoot.de/' +search_string = 'api/?{query}&limit={limit}' +result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}' + +# list of supported languages +allowed_languages = ['de', 'en', 'fr', 'it'] + + +# do search-request +def request(query, params): + params['url'] = base_url +\ + search_string.format(query=urlencode({'q': query}), + limit=number_of_results) + + if params['language'] != 'all': + language = params['language'].split('_')[0] + if language in allowed_languages: + params['url'] = params['url'] + "&lang=" + language + + # using searx User-Agent + params['headers']['User-Agent'] = searx_useragent() + + # FIX: SSLError: SSL3_GET_SERVER_CERTIFICATE:certificate verify failed + params['verify'] = False + + return params + + +# get response from search-request +def response(resp): + results = [] + json = loads(resp.text) + + # parse results + for r in json.get('features', {}): + + properties = r.get('properties') + + if not properties: + continue + + # get title + title = properties['name'] + + # get osm-type + if properties.get('osm_type') == 'N': + osm_type = 'node' + elif properties.get('osm_type') == 'W': + osm_type = 'way' + elif properties.get('osm_type') == 'R': + osm_type = 'relation' + else: + # continue if invalide osm-type + continue + + url = result_base_url.format(osm_type=osm_type, + osm_id=properties.get('osm_id')) + + osm = {'type': osm_type, + 'id': properties.get('osm_id')} + + geojson = r.get('geometry') + + if properties.get('extent'): + boundingbox = [properties.get('extent')[3], + properties.get('extent')[1], + properties.get('extent')[0], + properties.get('extent')[2]] + else: + # TODO: better boundingbox calculation + boundingbox = [geojson['coordinates'][1], + geojson['coordinates'][1], + geojson['coordinates'][0], + geojson['coordinates'][0]] + + # address calculation + address = {} + + # get name + if properties.get('osm_key') == 'amenity' or\ + properties.get('osm_key') == 'shop' or\ + properties.get('osm_key') == 'tourism' or\ + properties.get('osm_key') == 'leisure': + address = {'name': properties.get('name')} + + # add rest of adressdata, if something is already found + if address.get('name'): + address.update({'house_number': properties.get('housenumber'), + 'road': properties.get('street'), + 'locality': properties.get('city', + properties.get('town', # noqa + properties.get('village'))), # noqa + 'postcode': properties.get('postcode'), + 'country': properties.get('country')}) + else: + address = None + + # append result + results.append({'template': 'map.html', + 'title': title, + 'content': '', + 'longitude': geojson['coordinates'][0], + 'latitude': geojson['coordinates'][1], + 'boundingbox': boundingbox, + 'geojson': geojson, + 'address': address, + 'osm': osm, + 'url': url}) + + # return results + return results diff --git a/sources/searx/engines/searchcode_code.py b/sources/searx/engines/searchcode_code.py new file mode 100644 index 0000000..2ba0e52 --- /dev/null +++ b/sources/searx/engines/searchcode_code.py @@ -0,0 +1,65 @@ +## Searchcode (It) +# +# @website https://searchcode.com/ +# @provide-api yes (https://searchcode.com/api/) +# +# @using-api yes +# @results JSON +# @stable yes +# @parse url, title, content + +from urllib import urlencode +from json import loads +import cgi +import re + +# engine dependent config +categories = ['it'] +paging = True + +# search-url +url = 'https://searchcode.com/' +search_url = url+'api/codesearch_I/?{query}&p={pageno}' + + +# do search-request +def request(query, params): + params['url'] = search_url.format(query=urlencode({'q': query}), + pageno=params['pageno']-1) + + return params + + +# get response from search-request +def response(resp): + results = [] + + search_results = loads(resp.text) + + # parse results + for result in search_results['results']: + href = result['url'] + title = "" + result['name'] + " - " + result['filename'] + content = result['repo'] + "
" + + lines = dict() + for line, code in result['lines'].items(): + lines[int(line)] = code + + content = content + '
'
+        for line, code in sorted(lines.items()):
+            content = content + '"
+            
+        content = content + "
' + content = content + str(line) + '' + # Replace every two spaces with ' &nbps;' to keep formatting while allowing the browser to break the line if necessary + content = content + cgi.escape(code).replace('\t', ' ').replace(' ', '  ').replace(' ', '  ') + content = content + "
" + + # append result + results.append({'url': href, + 'title': title, + 'content': content}) + + # return results + return results diff --git a/sources/searx/engines/searchcode_doc.py b/sources/searx/engines/searchcode_doc.py new file mode 100644 index 0000000..e07cbea --- /dev/null +++ b/sources/searx/engines/searchcode_doc.py @@ -0,0 +1,49 @@ +## Searchcode (It) +# +# @website https://searchcode.com/ +# @provide-api yes (https://searchcode.com/api/) +# +# @using-api yes +# @results JSON +# @stable yes +# @parse url, title, content + +from urllib import urlencode +from json import loads + +# engine dependent config +categories = ['it'] +paging = True + +# search-url +url = 'https://searchcode.com/' +search_url = url+'api/search_IV/?{query}&p={pageno}' + + +# do search-request +def request(query, params): + params['url'] = search_url.format(query=urlencode({'q': query}), + pageno=params['pageno']-1) + + return params + + +# get response from search-request +def response(resp): + results = [] + + search_results = loads(resp.text) + + # parse results + for result in search_results['results']: + href = result['url'] + title = "[" + result['type'] + "] " + result['namespace'] + " " + result['name'] + content = '[' + result['type'] + "] " + result['name'] + " " + result['synopsis'] + "
" + result['description'] + + # append result + results.append({'url': href, + 'title': title, + 'content': content}) + + # return results + return results diff --git a/sources/searx/engines/soundcloud.py b/sources/searx/engines/soundcloud.py index 390e7ca..164a569 100644 --- a/sources/searx/engines/soundcloud.py +++ b/sources/searx/engines/soundcloud.py @@ -20,7 +20,12 @@ guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28' # search-url url = 'https://api.soundcloud.com/' -search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}' +search_url = url + 'search?{query}'\ + '&facet=model'\ + '&limit=20'\ + '&offset={offset}'\ + '&linked_partitioning=1'\ + '&client_id={client_id}' # noqa # do search-request diff --git a/sources/searx/engines/subtitleseeker.py b/sources/searx/engines/subtitleseeker.py new file mode 100644 index 0000000..2f1636f --- /dev/null +++ b/sources/searx/engines/subtitleseeker.py @@ -0,0 +1,74 @@ +## Subtitleseeker (Video) +# +# @website http://www.subtitleseeker.com +# @provide-api no +# +# @using-api no +# @results HTML +# @stable no (HTML can change) +# @parse url, title, content + +from cgi import escape +from urllib import quote_plus +from lxml import html +from searx.languages import language_codes + +# engine dependent config +categories = ['videos'] +paging = True +language = "" + +# search-url +url = 'http://www.subtitleseeker.com/' +search_url = url+'search/TITLES/{query}&p={pageno}' + +# specific xpath variables +results_xpath = '//div[@class="boxRows"]' + + +# do search-request +def request(query, params): + params['url'] = search_url.format(query=quote_plus(query), + pageno=params['pageno']) + return params + + +# get response from search-request +def response(resp): + results = [] + + dom = html.fromstring(resp.text) + + search_lang = "" + + if resp.search_params['language'] != 'all': + search_lang = [lc[1] + for lc in language_codes + if lc[0][:2] == resp.search_params['language']][0] + + # parse results + for result in dom.xpath(results_xpath): + link = result.xpath(".//a")[0] + href = link.attrib.get('href') + + if language is not "": + href = href + language + '/' + elif search_lang: + href = href + search_lang + '/' + + title = escape(link.xpath(".//text()")[0]) + + content = result.xpath('.//div[contains(@class,"red")]//text()')[0] + content = content + " - " + content = content + html.tostring(result.xpath('.//div[contains(@class,"grey-web")]')[0], method='text') + + if result.xpath(".//span") != []: + content = content + " - (" + result.xpath(".//span//text()")[0].strip() + ")" + + # append result + results.append({'url': href, + 'title': title, + 'content': escape(content)}) + + # return results + return results diff --git a/sources/searx/engines/yacy.py b/sources/searx/engines/yacy.py index 3ee0e91..4c4fac7 100644 --- a/sources/searx/engines/yacy.py +++ b/sources/searx/engines/yacy.py @@ -24,7 +24,11 @@ number_of_results = 5 # search-url base_url = 'http://localhost:8090' -search_url = '/yacysearch.json?{query}&startRecord={offset}&maximumRecords={limit}&contentdom={search_type}&resource=global' +search_url = '/yacysearch.json?{query}'\ + '&startRecord={offset}'\ + '&maximumRecords={limit}'\ + '&contentdom={search_type}'\ + '&resource=global' # noqa # yacy specific type-definitions search_types = {'general': 'text', @@ -39,10 +43,11 @@ def request(query, params): offset = (params['pageno'] - 1) * number_of_results search_type = search_types.get(params['category'], '0') - params['url'] = base_url + search_url.format(query=urlencode({'query': query}), - offset=offset, - limit=number_of_results, - search_type=search_type) + params['url'] = base_url +\ + search_url.format(query=urlencode({'query': query}), + offset=offset, + limit=number_of_results, + search_type=search_type) # add language tag if specified if params['language'] != 'all': @@ -70,19 +75,19 @@ def response(resp): # append result results.append({'url': result['link'], - 'title': result['title'], - 'content': result['description'], - 'publishedDate': publishedDate}) + 'title': result['title'], + 'content': result['description'], + 'publishedDate': publishedDate}) elif resp.search_params['category'] == 'images': # parse image results for result in search_results: # append result results.append({'url': result['url'], - 'title': result['title'], - 'content': '', - 'img_src': result['image'], - 'template': 'images.html'}) + 'title': result['title'], + 'content': '', + 'img_src': result['image'], + 'template': 'images.html'}) #TODO parse video, audio and file results diff --git a/sources/searx/engines/yahoo.py b/sources/searx/engines/yahoo.py index 938540e..c6c5b0d 100644 --- a/sources/searx/engines/yahoo.py +++ b/sources/searx/engines/yahoo.py @@ -20,7 +20,8 @@ paging = True language_support = True # search-url -search_url = 'https://search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}' +base_url = 'https://search.yahoo.com/' +search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}' # specific xpath variables results_xpath = '//div[@class="res"]' @@ -57,9 +58,9 @@ def request(query, params): else: language = params['language'].split('_')[0] - params['url'] = search_url.format(offset=offset, - query=urlencode({'p': query}), - lang=language) + params['url'] = base_url + search_url.format(offset=offset, + query=urlencode({'p': query}), + lang=language) # TODO required? params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\ diff --git a/sources/searx/https_rewrite.py b/sources/searx/https_rewrite.py index 9faf359..408474a 100644 --- a/sources/searx/https_rewrite.py +++ b/sources/searx/https_rewrite.py @@ -16,6 +16,7 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >. ''' import re +from urlparse import urlparse from lxml import etree from os import listdir from os.path import isfile, isdir, join @@ -86,15 +87,23 @@ def load_single_https_ruleset(filepath): # TODO hack, which convert a javascript regex group # into a valid python regex group - rule_from = ruleset.attrib.get('from').replace('$', '\\') - rule_to = ruleset.attrib.get('to').replace('$', '\\') + rule_from = ruleset.attrib['from'].replace('$', '\\') + if rule_from.endswith('\\'): + rule_from = rule_from[:-1]+'$' + rule_to = ruleset.attrib['to'].replace('$', '\\') + if rule_to.endswith('\\'): + rule_to = rule_to[:-1]+'$' # TODO, not working yet because of the hack above, # currently doing that in webapp.py # rule_from_rgx = re.compile(rule_from, re.I) # append rule - rules.append((rule_from, rule_to)) + try: + rules.append((re.compile(rule_from, re.I | re.U), rule_to)) + except: + # TODO log regex error + continue # this child define an exclusion elif ruleset.tag == 'exclusion': @@ -143,3 +152,56 @@ def load_https_rules(rules_path): https_rules.append(ruleset) print(' * {n} https-rules loaded'.format(n=len(https_rules))) + + + +def https_url_rewrite(result): + skip_https_rewrite = False + # check if HTTPS rewrite is possible + for target, rules, exclusions in https_rules: + + # check if target regex match with url + if target.match(result['parsed_url'].netloc): + # process exclusions + for exclusion in exclusions: + # check if exclusion match with url + if exclusion.match(result['url']): + skip_https_rewrite = True + break + + # skip https rewrite if required + if skip_https_rewrite: + break + + # process rules + for rule in rules: + try: + new_result_url = rule[0].sub(rule[1], result['url']) + except: + break + + # parse new url + new_parsed_url = urlparse(new_result_url) + + # continiue if nothing was rewritten + if result['url'] == new_result_url: + continue + + # get domainname from result + # TODO, does only work correct with TLD's like + # asdf.com, not for asdf.com.de + # TODO, using publicsuffix instead of this rewrite rule + old_result_domainname = '.'.join( + result['parsed_url'].hostname.split('.')[-2:]) + new_result_domainname = '.'.join( + new_parsed_url.hostname.split('.')[-2:]) + + # check if rewritten hostname is the same, + # to protect against wrong or malicious rewrite rules + if old_result_domainname == new_result_domainname: + # set new url + result['url'] = new_result_url + + # target has matched, do not search over the other rules + break + return result diff --git a/sources/searx/https_rules/Soundcloud.xml b/sources/searx/https_rules/Soundcloud.xml index 0baa583..6958e8c 100644 --- a/sources/searx/https_rules/Soundcloud.xml +++ b/sources/searx/https_rules/Soundcloud.xml @@ -89,7 +89,7 @@ - timeout_limit: + engines[engine_name].stats['page_load_time'] += timeout_limit engines[engine_name].stats['errors'] += 1 - results[engine_name] = cb_res - - # print engine name and specific error message - print '[E] Error with engine "{0}":\n\t{1}'.format( - engine_name, str(e)) return + # callback + search_results = callback(response) + # add results for result in search_results: result['engine'] = engine_name - # if it is a suggestion, add it to list of suggestions - if 'suggestion' in result: - # TODO type checks - suggestions.add(result['suggestion']) - continue - - # if it is an answer, add it to list of answers - if 'answer' in result: - answers.add(result['answer']) - continue - - # if it is an infobox, add it to list of infoboxes - if 'infobox' in result: - infoboxes.append(result) - continue - - # append result - cb_res.append(result) - - results[engine_name] = cb_res + results_queue.put_nowait((engine_name, search_results)) # update stats with current page-load-time - engines[engine_name].stats['page_load_time'] += \ - (datetime.now() - params['started']).total_seconds() + engines[engine_name].stats['page_load_time'] += search_duration return process_callback @@ -420,6 +413,7 @@ class Search(object): # init vars requests = [] + results_queue = Queue() results = {} suggestions = set() answers = set() @@ -452,14 +446,13 @@ class Search(object): request_params = default_request_params() request_params['headers']['User-Agent'] = user_agent request_params['category'] = selected_engine['category'] - request_params['started'] = datetime.now() + request_params['started'] = time() request_params['pageno'] = self.pageno request_params['language'] = self.lang # update request parameters dependent on # search-engine (contained in engines folder) - request_params = engine.request(self.query.encode('utf-8'), - request_params) + engine.request(self.query.encode('utf-8'), request_params) if request_params['url'] is None: # TODO add support of offline engines @@ -468,13 +461,9 @@ class Search(object): # create a callback wrapper for the search engine results callback = make_callback( selected_engine['name'], - results, - suggestions, - answers, - infoboxes, + results_queue, engine.response, - request_params - ) + request_params) # create dictionary which contain all # informations about the request @@ -482,7 +471,8 @@ class Search(object): headers=request_params['headers'], hooks=dict(response=callback), cookies=request_params['cookies'], - timeout=engine.timeout + timeout=engine.timeout, + verify=request_params['verify'] ) # specific type of request (GET or POST) @@ -497,11 +487,34 @@ class Search(object): continue # append request to list - requests.append((req, request_params['url'], request_args)) + requests.append((req, request_params['url'], request_args, selected_engine['name'])) + if not requests: + return results, suggestions, answers, infoboxes # send all search-request threaded_requests(requests) + + while not results_queue.empty(): + engine_name, engine_results = results_queue.get_nowait() + + # TODO type checks + [suggestions.add(x['suggestion']) + for x in list(engine_results) + if 'suggestion' in x + and engine_results.remove(x) is None] + + [answers.add(x['answer']) + for x in list(engine_results) + if 'answer' in x + and engine_results.remove(x) is None] + + infoboxes.extend(x for x in list(engine_results) + if 'infobox' in x + and engine_results.remove(x) is None) + + results[engine_name] = engine_results + # update engine-specific stats for engine_name, engine_results in results.items(): engines[engine_name].stats['search_count'] += 1 diff --git a/sources/searx/settings.yml b/sources/searx/settings.yml index c84c27d..260b56c 100644 --- a/sources/searx/settings.yml +++ b/sources/searx/settings.yml @@ -1,7 +1,7 @@ server: port : 8888 secret_key : "ultrasecretkey" # change this! - debug : True # Debug mode, only for development + debug : False # Debug mode, only for development request_timeout : 2.0 # seconds base_url : False # Set custom base_url. Possible values: False or "https://your.custom.host/location/" themes_path : "" # Custom ui themes path @@ -64,12 +64,20 @@ engines: # engine : filecrop # categories : files # shortcut : fc + + - name : 500px + engine : 500px + shortcut : px - name : flickr - engine : flickr categories : images shortcut : fl - timeout: 3.0 +# You can use the engine using the official stable API, but you need an API key +# See : https://www.flickr.com/services/apps/create/ +# engine : flickr +# api_key: 'apikey' # required! +# Or you can use the html non-stable engine, activated by default + engine : flickr-noapi - name : general-file engine : generalfile @@ -95,10 +103,18 @@ engines: engine : openstreetmap shortcut : osm + - name : photon + engine : photon + shortcut : ph + # - name : piratebay # engine : piratebay # shortcut : tpb + - name : kickass + engine : kickass + shortcut : ka + - name : soundcloud engine : soundcloud shortcut : sc @@ -106,6 +122,21 @@ engines: - name : stackoverflow engine : stackoverflow shortcut : st + + - name : searchcode doc + engine : searchcode_doc + shortcut : scd + + - name : searchcode code + engine : searchcode_code + shortcut : scc + + - name : subtitleseeker + engine : subtitleseeker + shortcut : ss +# The language is an option. You can put any language written in english +# Examples : English, French, German, Hungarian, Chinese... +# language : English - name : startpage engine : startpage diff --git a/sources/searx/static/courgette/img/icon_kickass.ico b/sources/searx/static/courgette/img/icon_kickass.ico new file mode 100644 index 0000000000000000000000000000000000000000..4aa2c77a578d0322b42542e862e4dfdf77da920f GIT binary patch literal 1150 zcmZQzU<5(|0R|wcz>vYhz#zuJz@P!dKp~(AL>x$gfiO1%0|*lXWF5VW6}`jvNn5+> z5u+ZaM$W-IQ7^gZhiP%^e;8H|NxUs%<0=bNgNs)2314Sk+V=$Z4sc>bFIjy3&oG4uR}zY31tcIaj(Ir|vfR&@SzsO|p`!m$$< z{Qvgz*MAUr^!nZZjG4>9;vhNWl%jX&`i)|3YM$;#}0cM7}o4>4k| z9}pP>((jN}el2p!vi~5QId9$n+51oZzxMPcSU-q3bmRX2d#~R9FIlz~tOlmmG%~pZ zq~AHG;!51iRsTV_bon;8K6J*^J;%XnU}`O*Q(Hj#oia-HCC^>|AB5BAZ~Wh}<R=_t- k@G~%cU}s=>z|6pK0E8JC7#ctrs0b?t%7N4zGiG1_0JW|9m;e9( literal 0 HcmV?d00001 diff --git a/sources/searx/static/default/img/icon_kickass.ico b/sources/searx/static/default/img/icon_kickass.ico new file mode 100644 index 0000000000000000000000000000000000000000..4aa2c77a578d0322b42542e862e4dfdf77da920f GIT binary patch literal 1150 zcmZQzU<5(|0R|wcz>vYhz#zuJz@P!dKp~(AL>x$gfiO1%0|*lXWF5VW6}`jvNn5+> z5u+ZaM$W-IQ7^gZhiP%^e;8H|NxUs%<0=bNgNs)2314Sk+V=$Z4sc>bFIjy3&oG4uR}zY31tcIaj(Ir|vfR&@SzsO|p`!m$$< z{Qvgz*MAUr^!nZZjG4>9;vhNWl%jX&`i)|3YM$;#}0cM7}o4>4k| z9}pP>((jN}el2p!vi~5QId9$n+51oZzxMPcSU-q3bmRX2d#~R9FIlz~tOlmmG%~pZ zq~AHG;!51iRsTV_bon;8K6J*^J;%XnU}`O*Q(Hj#oia-HCC^>|AB5BAZ~Wh}<R=_t- k@G~%cU}s=>z|6pK0E8JC7#ctrs0b?t%7N4zGiG1_0JW|9m;e9( literal 0 HcmV?d00001 diff --git a/sources/searx/static/oscar/img/icons/kickass.png b/sources/searx/static/oscar/img/icons/kickass.png new file mode 100644 index 0000000000000000000000000000000000000000..567d1039ff8e74b4ef68634cf5d04d80f271aa0d GIT binary patch literal 4527 zcmV;g5m4@lP)Oz@Z0f2-7z;ux~O9+4z06=<WDR*FRcSTFz- zW=q650N5=6FiBTtNC2?60Km==3$g$R3;-}uh=nNt1bYBr$Ri_o0EC$U6h`t_Jn<{8 z5a%iY0C<_QJh>z}MS)ugEpZ1|S1ukX&Pf+56gFW3VVXcL!g-k)GJ!M?;PcD?0HBc- z5#WRK{dmp}uFlRjj{U%*%WZ25jX z{P*?XzTzZ-GF^d31o+^>%=Ap99M6&ogks$0k4OBs3;+Bb(;~!4V!2o<6ys46agIcq zjPo+3B8fthDa9qy|77CdEc*jK-!%ZRYCZvbku9iQV*~a}ClFY4z~c7+0P?$U!PF=S z1Au6Q;m>#f??3%Vpd|o+W=WE9003S@Bra6Svp>fO002awfhw>;8}z{#EWidF!3EsG z3;bXU&9EIRU@z1_9W=mEXoiz;4lcq~xDGvV5BgyU zp1~-*fe8db$Osc*A=-!mVv1NJjtCc-h4>-CNCXm#Bp}I%6j35eku^v$Qi@a{RY)E3 zJ#qp$hg?Rwkvqr$GJ^buyhkyVfwECO)C{#lxu`c9ghrwZ&}4KmnvWKso6vH!8a<3Q zq36)6Xb;+tK10Vaz~~qUGsJ8#F2=(`u{bOVlVi)VBCHIn#u~6ztOL7=^<&SmcLWlF zMZgI*1b0FpVIDz9SWH+>*hr`#93(Um+6gxa1B6k+CnA%mOSC4s5&6UzVlpv@SV$}* z))J2sFA#f(L&P^E5{W}HC%KRUNwK6<(h|}}(r!{C=`5+6G)NjFlgZj-YqAG9lq?`C z$c5yc>d>VnA`E_*3F2Qp##d8RZb=H01_mm@+|Cqnc9PsG(F5HIG_C zt)aG3uTh7n6Et<2In9F>NlT@zqLtGcXcuVrX|L#Xx)I%#9!{6gSJKPrN9dR61N3(c z4Tcqi$B1Vr8Jidf7-t!G7_XR2rWwr)$3XQ?}=hpK0&Z&W{| zep&sA23f;Q!%st`QJ}G3cbou<7-yIK2z4nfCCCtN2-XOGSWo##{8Q{ATurxr~;I`ytDs%xbip}RzP zziy}Qn4Z2~fSycmr`~zJ=lUFdFa1>gZThG6M+{g7vkW8#+YHVaJjFF}Z#*3@$J_By zLtVo_L#1JrVVB{Ak-5=4qt!-@Mh}c>#$4kh<88)m#-k<%CLtzEP3leVno>={htGUuD;o7bD)w_sX$S}eAxwzy?UvgBH(S?;#HZiQMoS*2K2 zT3xe7t(~nU*1N5{rxB;QPLocnp4Ml>u<^FZwyC!nu;thW+pe~4wtZn|Vi#w(#jeBd zlf9FDx_yoPJqHbk*$%56S{;6Kv~mM9!g3B(KJ}#RZ#@)!hR|78Dq|Iq-afF%KE1Brn_fm;Im z_u$xr8UFki1L{Ox>G0o)(&RAZ;=|I=wN2l97;cLaHH6leTB-XXa*h%dBOEvi`+x zi?=Txl?TadvyiL>SuF~-LZ;|cS}4~l2eM~nS7yJ>iOM;atDY;(?aZ^v+mJV$@1Ote z62cPUlD4IWOIIx&SmwQ~YB{nzae3Pc;}r!fhE@iwJh+OsDs9zItL;~pu715HdQEGA zUct(O!LkCy1<%NCg+}G`0PgpNm-?d@-hMgNe6^V+j6x$b<6@S<$+<4_1hi}Ti zncS4LsjI}fWY1>OX6feMEuLErma3QLmkw?X+1j)X-&VBk_4Y;EFPF_I+q;9dL%E~B zJh;4Nr^(LEJ3myURP{Rblsw%57T)g973R8o)DE9*xN#~;4_o$q%o z4K@u`jhx2fBXC4{U8Qn{*%*B$Ge=nny$HAYq{=vy|sI0 z_vss+H_qMky?OB#|JK!>IX&II^LlUh#rO5!7TtbwC;iULyV-Xq?ybB}ykGP{?LpZ? z-G|jbTmIbG@7#ZCz;~eY(cDM(28Dyq{*m>M4?_iynUBkc4TkHUI6gT!;y-fz>HMcd z&t%Ugo)`Y2{>!cx7B7DI)$7;J(U{Spm-3gBzioV_{p!H$8L!*M!p0uH$#^p{Ui4P` z?ZJ24cOCDe-w#jZd?0@)|7iKK^;6KN`;!@ylm7$*nDhK&GcDTy000JJOGiWi{{a60 z|De66lK=n!32;bRa{vGf6951U69E94oEQKA00(qQO+^RZ3=at@4%J+v%K!id;Ymb6 zR9M4>mu;*aRT+h!Ju~Ordwbi`A|I_lL5rHGfG7|Q#;7627^4*Vp~eWtNQ}`KYc<9n z=pT%HsfjTVi67A)B?7Tgl46Mb01Jc(W1jGeFn65t>iEa!6!vH=InfKgKZ zXH-kTU)p{rVfW4*1VZzP&)*Pt@7(bnU@Z^^8%VaJ?J;NsOl@m$KiH`TT1m#5w~ZBR zW804Tls6z)`r@~4|M~B3y1bU0!tR|rLL${QbqlmP_!ZHI9CePO`>l%zWB^I!86l<2 zkp+c8Nywn2U>IGw{m$^48@Gi9Xlm*o0ux})Hb@9#2~egl)0@w9n@a=&QX<6(p=!vW zqT7_@y!E7fHYBP2_4V7nZSl*dB=7{F%G9N@xC{1{nYz*+D5FrWLz z?M#jyS_y7QNhBGSYY9?<#*zutC0BOIV^M;wlnb&0SH2fh%g44|!|od4>O>`Sl$w>o~VApBZ{4(uzGv@5~T-q08Zj}H37|*Zcw6}kN)3;v8g&(;JOWd>T zMxNdK03w4+qV!Y((AX6r5Qa4ns*259-d7@fb6^0a2q6&42Ov}xYuCS}MBJq|*99f4 zMiNz3HM0sgDYm@u+&{VRuIsQ9Z!EpwGcy%J2>IQ(*Sts;C>#x|2FE;LQi|@Riw;7Re|lweH+c4tZgT}^!!tW6%!Dn@jMHQ zC$7qUV9!0bAc+{KOpYF5a`Z48Pujxr@@qY~y;7E_Ffg4=Y3jPpN--3Nvl3?31w*L3 z>CE#P7K1I8553Itp;tVbTMUw^n|T%%=Qw)g5K#5NS(2H7(isClQ`dpKFC*)?;9VhZ zJb5ctB2;xx|ABoEGM-;YjIr+(vis`}TvA?h@ExEDA@pvAH3avtHK)|FZo}p<)I(L( z9Q^kaNDjvXSD{?W>nIf|c5M-8rqd~PUC(@A7I|+(l4;wPwd+qItdir>t1k{rW)*nn z2R5lluLIA+N}#Ek(JUo~9-bd8=XT1PbtfIu4kkwr&j3qWx=2^7q5M10eK(jY^*c`4 z`;P-|8Hb8?a`+hiuiJ2H1{;C{mP*CX?AXjUlbM3bj>S~E8$Gy(MMS81ed$FyzsP+( zv3bjTfzX2!0+S=lTz=KH9A19C2k%_416N5|Mb}XB7!_7W-c@X#0X`oO{uy_~tdcu+2039$~z&w(op16PnEi08JZXRsn;26&B=~yiyd;?tADs zE*LoLofq)+AKcN;63u9iyZ(4X5v;pmbe7J6y;Y&EbB^R*uSpI?QtrImHuDSXx$o{< zj%&r;SxWrl!M||i@Efy0%0QYra2Z9BqNo(}>%HZdh0+j9rc?SHH+ zq<(hHDuQSBKFXhOyKWVuvw_s=*+rcjhWxTa)Nq2;%t8l)MAOW3`)|L;yWjtDF1X|h zPT9N_spYw+ALoI4ck=Y!M;Om7tOPGiFevcS%W0xbO~@u;2J`%};9U;uW{!PN+|T0= z-_6k@%T%GF9Q=;s>iHZ z9FxjJDWw7~Ovu`2r7sllO7bfZTp8k5zWb*irKC9tOIA!$l$DZ{qSUrYF-g-l=Id6P zw$UlcF-b8d#VEBgU$-f*VUUuPk}X*(7AU#C>DE2J5N;d5zxv5#_0`|s6>h%v(_c>V zqV2$-r!#Az2CAa%n2S~wWyF>gl5uAjiV8WCR-U>EwwG?Yb"+d+"",d){case"phone":case"fax":c+=''+b.tags[d]+"";break;case"email":c+=''+b.tags[d]+"";break;case"website":case"url":c+=''+b.tags[d]+"";break;case"wikidata":c+=''+b.tags[d]+"";break;case"wikipedia":if(-1!=b.tags[d].indexOf(":")){c+=''+b.tags[d]+"";break}default:c+=b.tags[d]}c+=""}$(g).html(c),$(g).removeClass("hidden"),$(h).addClass("hidden")}}).fail(function(){$(h).html($(h).html()+'

could not load data!

')})}}$(this).off(a)}),$(".searx_init_map").on("click",function(a){var b=$(this).data("leaflet-target"),c=$(this).data("map-lon"),d=$(this).data("map-lat"),e=$(this).data("map-zoom"),f=$(this).data("map-boundingbox"),g=$(this).data("map-geojson");require(["leaflet-0.7.3.min"],function(){f&&(southWest=L.latLng(f[0],f[2]),northEast=L.latLng(f[1],f[3]),map_bounds=L.latLngBounds(southWest,northEast)),L.Icon.Default.imagePath="/static/oscar/img/map";{var a=L.map(b),h="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png",i='Map data © OpenStreetMap contributors',j=new L.TileLayer(h,{minZoom:1,maxZoom:19,attribution:i}),k="http://otile{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg",l='Map data © OpenStreetMap contributors | Tiles Courtesy of MapQuest ',m=new L.TileLayer(k,{minZoom:1,maxZoom:18,subdomains:"1234",attribution:l}),n="http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg",o='Map data © OpenStreetMap contributors | Tiles Courtesy of MapQuest | Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';new L.TileLayer(n,{minZoom:1,maxZoom:11,subdomains:"1234",attribution:o})}map_bounds?setTimeout(function(){a.fitBounds(map_bounds,{maxZoom:17})},0):c&&d&&(e?a.setView(new L.LatLng(d,c),e):a.setView(new L.LatLng(d,c),8)),a.addLayer(m);var p={"OSM Mapnik":j,MapQuest:m};L.control.layers(p).addTo(a),g&&L.geoJson(g).addTo(a)}),$(this).off(a)})}); \ No newline at end of file +/*! oscar/searx.min.js | 19-12-2014 | https://github.com/asciimoo/searx */ +requirejs.config({baseUrl:"./static/oscar/js",paths:{app:"../app"}}),searx.autocompleter&&(searx.searchResults=new Bloodhound({datumTokenizer:Bloodhound.tokenizers.obj.whitespace("value"),queryTokenizer:Bloodhound.tokenizers.whitespace,remote:"/autocompleter?q=%QUERY"}),searx.searchResults.initialize()),$(document).ready(function(){searx.autocompleter&&$("#q").typeahead(null,{name:"search-results",displayKey:function(a){return a},source:searx.searchResults.ttAdapter()})}),$(document).ready(function(){$("#q.autofocus").focus(),$(".select-all-on-click").click(function(){$(this).select()}),$(".btn-collapse").click(function(){var a=$(this).data("btn-text-collapsed"),b=$(this).data("btn-text-not-collapsed");""!==a&&""!==b&&(new_html=$(this).hasClass("collapsed")?$(this).html().replace(a,b):$(this).html().replace(b,a),$(this).html(new_html))}),$(".btn-toggle .btn").click(function(){var a="btn-"+$(this).data("btn-class"),b=$(this).data("btn-label-default"),c=$(this).data("btn-label-toggled");""!==c&&(new_html=$(this).hasClass("btn-default")?$(this).html().replace(b,c):$(this).html().replace(c,b),$(this).html(new_html)),$(this).toggleClass(a),$(this).toggleClass("btn-default")}),$(".btn-sm").dblclick(function(){var a="btn-"+$(this).data("btn-class");$(this).hasClass("btn-default")?($(".btn-sm > input").attr("checked","checked"),$(".btn-sm > input").prop("checked",!0),$(".btn-sm").addClass(a),$(".btn-sm").addClass("active"),$(".btn-sm").removeClass("btn-default")):($(".btn-sm > input").attr("checked",""),$(".btn-sm > input").removeAttr("checked"),$(".btn-sm > input").checked=!1,$(".btn-sm").removeClass(a),$(".btn-sm").removeClass("active"),$(".btn-sm").addClass("btn-default"))})}),$(document).ready(function(){$(".searx_overpass_request").on("click",function(a){var b="https://overpass-api.de/api/interpreter?data=",c=b+"[out:json][timeout:25];(",d=");out meta;",e=$(this).data("osm-id"),f=$(this).data("osm-type"),g=$(this).data("result-table"),h="#"+$(this).data("result-table-loadicon"),i=["addr:city","addr:country","addr:housenumber","addr:postcode","addr:street"];if(e&&f&&g){g="#"+g;var j=null;switch(f){case"node":j=c+"node("+e+");"+d;break;case"way":j=c+"way("+e+");"+d;break;case"relation":j=c+"relation("+e+");"+d}if(j){$.ajax(j).done(function(a){if(a&&a.elements&&a.elements[0]){var b=a.elements[0],c=$(g).html();for(var d in b.tags)if(null===b.tags.name||-1==i.indexOf(d)){switch(c+=""+d+"",d){case"phone":case"fax":c+=''+b.tags[d]+"";break;case"email":c+=''+b.tags[d]+"";break;case"website":case"url":c+=''+b.tags[d]+"";break;case"wikidata":c+=''+b.tags[d]+"";break;case"wikipedia":if(-1!=b.tags[d].indexOf(":")){c+=''+b.tags[d]+"";break}default:c+=b.tags[d]}c+=""}$(g).html(c),$(g).removeClass("hidden"),$(h).addClass("hidden")}}).fail(function(){$(h).html($(h).html()+'

could not load data!

')})}}$(this).off(a)}),$(".searx_init_map").on("click",function(a){var b=$(this).data("leaflet-target"),c=$(this).data("map-lon"),d=$(this).data("map-lat"),e=$(this).data("map-zoom"),f=$(this).data("map-boundingbox"),g=$(this).data("map-geojson");require(["leaflet-0.7.3.min"],function(){f&&(southWest=L.latLng(f[0],f[2]),northEast=L.latLng(f[1],f[3]),map_bounds=L.latLngBounds(southWest,northEast)),L.Icon.Default.imagePath="./static/oscar/img/map";{var a=L.map(b),h="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png",i='Map data © OpenStreetMap contributors',j=new L.TileLayer(h,{minZoom:1,maxZoom:19,attribution:i}),k="http://otile{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg",l='Map data © OpenStreetMap contributors | Tiles Courtesy of MapQuest ',m=new L.TileLayer(k,{minZoom:1,maxZoom:18,subdomains:"1234",attribution:l}),n="http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg",o='Map data © OpenStreetMap contributors | Tiles Courtesy of MapQuest | Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';new L.TileLayer(n,{minZoom:1,maxZoom:11,subdomains:"1234",attribution:o})}map_bounds?setTimeout(function(){a.fitBounds(map_bounds,{maxZoom:17})},0):c&&d&&(e?a.setView(new L.LatLng(d,c),e):a.setView(new L.LatLng(d,c),8)),a.addLayer(m);var p={"OSM Mapnik":j,MapQuest:m};L.control.layers(p).addTo(a),g&&L.geoJson(g).addTo(a)}),$(this).off(a)})}); \ No newline at end of file diff --git a/sources/searx/static/oscar/js/searx_src/00_requirejs_config.js b/sources/searx/static/oscar/js/searx_src/00_requirejs_config.js index 3676784..99ec4b5 100644 --- a/sources/searx/static/oscar/js/searx_src/00_requirejs_config.js +++ b/sources/searx/static/oscar/js/searx_src/00_requirejs_config.js @@ -16,7 +16,7 @@ */ requirejs.config({ - baseUrl: '/static/oscar/js', + baseUrl: './static/oscar/js', paths: { app: '../app' } diff --git a/sources/searx/static/oscar/js/searx_src/element_modifiers.js b/sources/searx/static/oscar/js/searx_src/element_modifiers.js index 088bd7a..dd45b77 100644 --- a/sources/searx/static/oscar/js/searx_src/element_modifiers.js +++ b/sources/searx/static/oscar/js/searx_src/element_modifiers.js @@ -63,4 +63,25 @@ $(document).ready(function(){ $(this).toggleClass(btnClass); $(this).toggleClass('btn-default'); }); + + /** + * Select or deselect every categories on double clic + */ + $(".btn-sm").dblclick(function() { + var btnClass = 'btn-' + $(this).data('btn-class'); // primary + if($(this).hasClass('btn-default')) { + $(".btn-sm > input").attr('checked', 'checked'); + $(".btn-sm > input").prop("checked", true); + $(".btn-sm").addClass(btnClass); + $(".btn-sm").addClass('active'); + $(".btn-sm").removeClass('btn-default'); + } else { + $(".btn-sm > input").attr('checked', ''); + $(".btn-sm > input").removeAttr('checked'); + $(".btn-sm > input").checked = false; + $(".btn-sm").removeClass(btnClass); + $(".btn-sm").removeClass('active'); + $(".btn-sm").addClass('btn-default'); + } + }); }); diff --git a/sources/searx/static/oscar/js/searx_src/leaflet_map.js b/sources/searx/static/oscar/js/searx_src/leaflet_map.js index 88af1e7..b5112ef 100644 --- a/sources/searx/static/oscar/js/searx_src/leaflet_map.js +++ b/sources/searx/static/oscar/js/searx_src/leaflet_map.js @@ -116,7 +116,7 @@ $(document).ready(function(){ // TODO hack // change default imagePath - L.Icon.Default.imagePath = "/static/oscar/img/map"; + L.Icon.Default.imagePath = "./static/oscar/img/map"; // init map var map = L.map(leaflet_target); diff --git a/sources/searx/templates/courgette/result_templates/default.html b/sources/searx/templates/courgette/result_templates/default.html index 734f906..3e93eee 100644 --- a/sources/searx/templates/courgette/result_templates/default.html +++ b/sources/searx/templates/courgette/result_templates/default.html @@ -1,7 +1,7 @@
- {% if result['favicon'] %} - + {% if "icon_"~result.engine~".ico" in favicons %} + {{result.engine}} {% endif %}
diff --git a/sources/searx/templates/courgette/result_templates/images.html b/sources/searx/templates/courgette/result_templates/images.html index 1f15ff2..ebda538 100644 --- a/sources/searx/templates/courgette/result_templates/images.html +++ b/sources/searx/templates/courgette/result_templates/images.html @@ -1,6 +1,6 @@ diff --git a/sources/searx/templates/courgette/result_templates/map.html b/sources/searx/templates/courgette/result_templates/map.html index 734f906..3e93eee 100644 --- a/sources/searx/templates/courgette/result_templates/map.html +++ b/sources/searx/templates/courgette/result_templates/map.html @@ -1,7 +1,7 @@
- {% if result['favicon'] %} - + {% if "icon_"~result.engine~".ico" in favicons %} + {{result.engine}} {% endif %}
diff --git a/sources/searx/templates/courgette/result_templates/videos.html b/sources/searx/templates/courgette/result_templates/videos.html index 8ceb0b1..2694a91 100644 --- a/sources/searx/templates/courgette/result_templates/videos.html +++ b/sources/searx/templates/courgette/result_templates/videos.html @@ -1,12 +1,10 @@
- {% if result['favicon'] %} - + {% if "icon_"~result.engine~".ico" in favicons %} + {{result.engine}} {% endif %} -

{{ result.title|safe }}

{% if result.publishedDate %}

{{ result.publishedDate }}

{% endif %} -  {{ result.title }} + {{ result.title|striptags }}

{{ result.url }}

-

diff --git a/sources/searx/templates/courgette/results.html b/sources/searx/templates/courgette/results.html index d0b53b4..62bef8c 100644 --- a/sources/searx/templates/courgette/results.html +++ b/sources/searx/templates/courgette/results.html @@ -10,7 +10,7 @@
{{ _('Search URL') }}: - +
{{ _('Download results') }} @@ -43,9 +43,9 @@ {% for result in results %} {% if result['template'] %} - {% include 'default/result_templates/'+result['template'] %} + {% include 'courgette/result_templates/'+result['template'] %} {% else %} - {% include 'default/result_templates/default.html' %} + {% include 'courgette/result_templates/default.html' %} {% endif %} {% endfor %} diff --git a/sources/searx/templates/default/infobox.html b/sources/searx/templates/default/infobox.html index d03b008..d3ff8f0 100644 --- a/sources/searx/templates/default/infobox.html +++ b/sources/searx/templates/default/infobox.html @@ -1,6 +1,6 @@

{{ infobox.infobox }}

- {% if infobox.img_src %}{% endif %} + {% if infobox.img_src %}{{ infobox.infobox|striptags }}{% endif %}

{{ infobox.entity }}

{{ infobox.content | safe }}

{% if infobox.attributes %} diff --git a/sources/searx/templates/default/result_templates/default.html b/sources/searx/templates/default/result_templates/default.html index d0e725a..0f3e3e6 100644 --- a/sources/searx/templates/default/result_templates/default.html +++ b/sources/searx/templates/default/result_templates/default.html @@ -1,5 +1,5 @@
-

{% if result['favicon'] %}{% endif %}{{ result.title|safe }}

+

{% if "icon_"~result.engine~".ico" in favicons %}{{result.engine}}{% endif %}{{ result.title|safe }}

{{ result.pretty_url }} cached

{% if result.publishedDate %}

{{ result.publishedDate }}

{% endif %}

{% if result.img_src %}{% endif %}{% if result.content %}{{ result.content|safe }}
{% endif %}

diff --git a/sources/searx/templates/default/result_templates/images.html b/sources/searx/templates/default/result_templates/images.html index bead78c..4c6d59e 100644 --- a/sources/searx/templates/default/result_templates/images.html +++ b/sources/searx/templates/default/result_templates/images.html @@ -1,6 +1,6 @@ diff --git a/sources/searx/templates/default/result_templates/map.html b/sources/searx/templates/default/result_templates/map.html index 78221aa..f9ba7dc 100644 --- a/sources/searx/templates/default/result_templates/map.html +++ b/sources/searx/templates/default/result_templates/map.html @@ -1,7 +1,7 @@
- {% if result['favicon'] %} - + {% if "icon_"~result.engine~".ico" in favicons %} + {{result.engine}} {% endif %}
diff --git a/sources/searx/templates/default/result_templates/torrent.html b/sources/searx/templates/default/result_templates/torrent.html index 6c62793..5925f13 100644 --- a/sources/searx/templates/default/result_templates/torrent.html +++ b/sources/searx/templates/default/result_templates/torrent.html @@ -1,7 +1,6 @@

{{ result.title|safe }}

- {% if result.content %}

{{ result.content|safe }}

{% endif %} -

Seed: {{ result.seed }}, Leech: {{ result.leech }}

-

magnet link

{{ result.pretty_url }}

+ {% if result.content %}

{{ result.content|safe }}

{% endif %} +

magnet link - Seed: {{ result.seed }}, Leech: {{ result.leech }}

diff --git a/sources/searx/templates/default/result_templates/videos.html b/sources/searx/templates/default/result_templates/videos.html index 233a6c0..16b2d5f 100644 --- a/sources/searx/templates/default/result_templates/videos.html +++ b/sources/searx/templates/default/result_templates/videos.html @@ -1,8 +1,6 @@
-

-

{% if result['favicon'] %}{% endif %}{{ result.title|safe }}

+

{% if "icon_"~result.engine~".ico" in favicons %}{{result.engine}}{% endif %}{{ result.title|safe }}

{% if result.publishedDate %}

{{ result.publishedDate }}

{% endif %} -  {{ result.title }} + {{ result.title|striptags }}

{{ result.url }}

-

diff --git a/sources/searx/templates/default/results.html b/sources/searx/templates/default/results.html index 5419835..199eb1d 100644 --- a/sources/searx/templates/default/results.html +++ b/sources/searx/templates/default/results.html @@ -10,7 +10,7 @@
{{ _('Search URL') }}: - +
{{ _('Download results') }} diff --git a/sources/searx/templates/oscar/result_templates/default.html b/sources/searx/templates/oscar/result_templates/default.html index 0f090d8..3c8e625 100644 --- a/sources/searx/templates/oscar/result_templates/default.html +++ b/sources/searx/templates/oscar/result_templates/default.html @@ -1,6 +1,6 @@ {% from 'oscar/macros.html' import icon %} -

{% if result['favicon'] %}{{ result['favicon'] }} {% endif %}{{ result.title|safe }}

+

{% if result.engine~".png" in favicons %}{{ result.engine }} {% endif %}{{ result.title|safe }}

{% if result.publishedDate %}{% endif %} {{ icon('link') }} {{ _('cached') }} diff --git a/sources/searx/templates/oscar/result_templates/images.html b/sources/searx/templates/oscar/result_templates/images.html index 7689f9f..94627c9 100644 --- a/sources/searx/templates/oscar/result_templates/images.html +++ b/sources/searx/templates/oscar/result_templates/images.html @@ -7,7 +7,7 @@