mirror of
https://github.com/qbittorrent/qBittorrent.git
synced 2024-11-22 17:26:21 +03:00
Merge pull request #4781 from DoumanAsh/python_align_search_style
[search engine] Align search engine url getting mechanism. Closes #4778
This commit is contained in:
commit
1cfda610cc
10 changed files with 64 additions and 132 deletions
|
@ -1,4 +1,4 @@
|
|||
#VERSION: 1.1
|
||||
#VERSION: 1.2
|
||||
#AUTHORS: Douman (custparasite@gmx.se)
|
||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
||||
|
||||
|
@ -27,13 +27,12 @@
|
|||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from HTMLParser import HTMLParser
|
||||
from httplib import HTTPSConnection as https
|
||||
from re import compile as re_compile
|
||||
from re import DOTALL
|
||||
from itertools import islice
|
||||
#qBt
|
||||
from novaprinter import prettyPrinter
|
||||
from helpers import download_file
|
||||
from helpers import download_file, retrieve_url
|
||||
|
||||
class demonoid(object):
|
||||
""" Search engine class """
|
||||
|
@ -120,18 +119,12 @@ class demonoid(object):
|
|||
|
||||
def search(self, what, cat='all'):
|
||||
""" Performs search """
|
||||
connection = https("www.demonoid.pw")
|
||||
|
||||
#prepare query
|
||||
cat = self.supported_categories[cat.lower()]
|
||||
query = "".join(("/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&to=1&uid=0&sort=S"))
|
||||
query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&to=1&uid=0&sort=S"))
|
||||
|
||||
connection.request("GET", query)
|
||||
response = connection.getresponse()
|
||||
if response.status != 200:
|
||||
return
|
||||
data = retrieve_url(query)
|
||||
|
||||
data = response.read().decode("utf-8")
|
||||
add_res_list = re_compile("/files.*page=[0-9]+")
|
||||
torrent_list = re_compile("start torrent list -->(.*)<!-- end torrent", DOTALL)
|
||||
data = torrent_list.search(data).group(0)
|
||||
|
@ -144,10 +137,8 @@ class demonoid(object):
|
|||
|
||||
if list_results:
|
||||
for search_query in islice((add_res_list.search(result).group(0) for result in list_results[1].split(" | ")), 0, 5):
|
||||
connection.request("GET", search_query)
|
||||
response = connection.getresponse()
|
||||
parser.feed(torrent_list.search(response.read().decode('utf-8')).group(0))
|
||||
response = retrieve_url(self.url + search_query)
|
||||
parser.feed(torrent_list.search(response).group(0))
|
||||
parser.close()
|
||||
|
||||
connection.close()
|
||||
return
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#VERSION: 2.02
|
||||
#VERSION: 2.03
|
||||
#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
|
||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
||||
|
||||
|
@ -27,10 +27,9 @@
|
|||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from HTMLParser import HTMLParser
|
||||
from httplib import HTTPConnection as http
|
||||
#qBt
|
||||
from novaprinter import prettyPrinter
|
||||
from helpers import download_file
|
||||
from helpers import download_file, retrieve_url
|
||||
|
||||
class extratorrent(object):
|
||||
""" Search engine class """
|
||||
|
@ -140,25 +139,18 @@ class extratorrent(object):
|
|||
|
||||
def search(self, what, cat="all"):
|
||||
""" Performs search """
|
||||
connection = http("extratorrent.cc")
|
||||
query = "".join((self.url, "/advanced_search/?with=", what, "&s_cat=", self.supported_categories[cat]))
|
||||
|
||||
query = "".join(("/advanced_search/?with=", what, "&s_cat=", self.supported_categories[cat]))
|
||||
|
||||
connection.request("GET", query)
|
||||
response = connection.getresponse()
|
||||
if response.status != 200:
|
||||
return
|
||||
response = retrieve_url(query)
|
||||
|
||||
list_searches = []
|
||||
parser = self.MyHtmlParseWithBlackJack(list_searches, self.url)
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
for search_query in list_searches:
|
||||
connection.request("GET", search_query)
|
||||
response = connection.getresponse()
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
response = retrieve_url(self.url + search_query)
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
connection.close()
|
||||
return
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#VERSION: 2.00
|
||||
#VERSION: 2.01
|
||||
#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
|
||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
||||
|
||||
|
@ -27,9 +27,8 @@
|
|||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from HTMLParser import HTMLParser
|
||||
from httplib import HTTPConnection as http
|
||||
from novaprinter import prettyPrinter
|
||||
from helpers import download_file
|
||||
from helpers import download_file, retrieve_url
|
||||
|
||||
class mininova(object):
|
||||
""" Search engine class """
|
||||
|
@ -123,26 +122,19 @@ class mininova(object):
|
|||
|
||||
def search(self, what, cat="all"):
|
||||
""" Performs search """
|
||||
connection = http("www.mininova.org")
|
||||
query = "/".join((self.url, "search", what, self.supported_categories[cat], "seeds"))
|
||||
|
||||
query = "/".join(("/search", what, self.supported_categories[cat], "seeds"))
|
||||
|
||||
connection.request("GET", query)
|
||||
response = connection.getresponse()
|
||||
if response.status != 200:
|
||||
return
|
||||
response = retrieve_url(query)
|
||||
|
||||
list_searches = []
|
||||
parser = self.MyHtmlParseWithBlackJack(list_searches, self.url)
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
parser.next_queries = False
|
||||
for search_query in list_searches:
|
||||
connection.request("GET", search_query)
|
||||
response = connection.getresponse()
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
response = retrieve_url(self.url + search_query)
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
connection.close()
|
||||
return
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#VERSION: 2.13
|
||||
#VERSION: 2.14
|
||||
#AUTHORS: Fabien Devaux (fab@gnux.info)
|
||||
#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
|
||||
# Arthur (custparasite@gmx.se)
|
||||
|
@ -29,10 +29,9 @@
|
|||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from HTMLParser import HTMLParser
|
||||
from httplib import HTTPSConnection as https
|
||||
#qBt
|
||||
from novaprinter import prettyPrinter
|
||||
from helpers import download_file
|
||||
from helpers import download_file, retrieve_url
|
||||
|
||||
class piratebay(object):
|
||||
""" Search engine class """
|
||||
|
@ -141,10 +140,8 @@ class piratebay(object):
|
|||
if self.save_item == "size":
|
||||
temp_data = data.split()
|
||||
if "Size" in temp_data:
|
||||
self.current_item[self.save_item] = temp_data[2]
|
||||
elif "ULed" in temp_data:
|
||||
temp_string = self.current_item[self.save_item]
|
||||
self.current_item[self.save_item] = " ".join((temp_string, temp_data[0][:-1]))
|
||||
indx = temp_data.index("Size")
|
||||
self.current_item[self.save_item] = temp_data[indx + 1] + " " + temp_data[indx + 2]
|
||||
|
||||
elif self.save_item == "name":
|
||||
# names with special characters like '&' are splitted in several pieces
|
||||
|
@ -159,28 +156,21 @@ class piratebay(object):
|
|||
|
||||
def search(self, what, cat='all'):
|
||||
""" Performs search """
|
||||
connection = https("thepiratebay.se")
|
||||
|
||||
#prepare query. 7 is filtering by seeders
|
||||
cat = cat.lower()
|
||||
query = "/".join(("/search", what, "0", "7", self.supported_categories[cat]))
|
||||
query = "/".join((self.url, "search", what, "0", "7", self.supported_categories[cat]))
|
||||
|
||||
connection.request("GET", query)
|
||||
response = connection.getresponse()
|
||||
if response.status != 200:
|
||||
return
|
||||
response = retrieve_url(query)
|
||||
|
||||
list_searches = []
|
||||
parser = self.MyHtmlParseWithBlackJack(list_searches, self.url)
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
parser.add_query = False
|
||||
for search_query in list_searches:
|
||||
connection.request("GET", search_query)
|
||||
response = connection.getresponse()
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
response = retrieve_url(self.url + search_query)
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
connection.close()
|
||||
return
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
btdigg: 1.30
|
||||
demonoid: 1.1
|
||||
extratorrent: 2.02
|
||||
demonoid: 1.2
|
||||
extratorrent: 2.03
|
||||
kickasstorrents: 1.28
|
||||
legittorrents: 2.00
|
||||
mininova: 2.00
|
||||
piratebay: 2.13
|
||||
mininova: 2.01
|
||||
piratebay: 2.14
|
||||
torrentreactor: 1.40
|
||||
torrentz: 2.17
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#VERSION: 1.1
|
||||
#VERSION: 1.2
|
||||
#AUTHORS: Douman (custparasite@gmx.se)
|
||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
||||
|
||||
|
@ -27,13 +27,12 @@
|
|||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from html.parser import HTMLParser
|
||||
from http.client import HTTPSConnection as https
|
||||
from re import compile as re_compile
|
||||
from re import DOTALL
|
||||
from itertools import islice
|
||||
#qBt
|
||||
from novaprinter import prettyPrinter
|
||||
from helpers import download_file
|
||||
from helpers import download_file, retrieve_url
|
||||
|
||||
class demonoid(object):
|
||||
""" Search engine class """
|
||||
|
@ -120,18 +119,12 @@ class demonoid(object):
|
|||
|
||||
def search(self, what, cat='all'):
|
||||
""" Performs search """
|
||||
connection = https("www.demonoid.pw")
|
||||
|
||||
#prepare query
|
||||
cat = self.supported_categories[cat.lower()]
|
||||
query = "".join(("/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&to=1&uid=0&sort=S"))
|
||||
query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&to=1&uid=0&sort=S"))
|
||||
|
||||
connection.request("GET", query)
|
||||
response = connection.getresponse()
|
||||
if response.status != 200:
|
||||
return
|
||||
data = retrieve_url(query)
|
||||
|
||||
data = response.read().decode("utf-8")
|
||||
add_res_list = re_compile("/files.*page=[0-9]+")
|
||||
torrent_list = re_compile("start torrent list -->(.*)<!-- end torrent", DOTALL)
|
||||
data = torrent_list.search(data).group(0)
|
||||
|
@ -144,10 +137,8 @@ class demonoid(object):
|
|||
|
||||
if list_results:
|
||||
for search_query in islice((add_res_list.search(result).group(0) for result in list_results[1].split(" | ")), 0, 5):
|
||||
connection.request("GET", search_query)
|
||||
response = connection.getresponse()
|
||||
parser.feed(torrent_list.search(response.read().decode('utf-8')).group(0))
|
||||
response = retrieve_url(self.url + search_query)
|
||||
parser.feed(torrent_list.search(response).group(0))
|
||||
parser.close()
|
||||
|
||||
connection.close()
|
||||
return
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#VERSION: 2.02
|
||||
#VERSION: 2.03
|
||||
#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
|
||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
||||
|
||||
|
@ -27,10 +27,9 @@
|
|||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from html.parser import HTMLParser
|
||||
from http.client import HTTPConnection as http
|
||||
#qBt
|
||||
from novaprinter import prettyPrinter
|
||||
from helpers import download_file
|
||||
from helpers import download_file, retrieve_url
|
||||
|
||||
class extratorrent(object):
|
||||
""" Search engine class """
|
||||
|
@ -140,25 +139,18 @@ class extratorrent(object):
|
|||
|
||||
def search(self, what, cat="all"):
|
||||
""" Performs search """
|
||||
connection = http("extratorrent.cc")
|
||||
query = "".join((self.url, "/advanced_search/?with=", what, "&s_cat=", self.supported_categories[cat]))
|
||||
|
||||
query = "".join(("/advanced_search/?with=", what, "&s_cat=", self.supported_categories[cat]))
|
||||
|
||||
connection.request("GET", query)
|
||||
response = connection.getresponse()
|
||||
if response.status != 200:
|
||||
return
|
||||
response = retrieve_url(query)
|
||||
|
||||
list_searches = []
|
||||
parser = self.MyHtmlParseWithBlackJack(list_searches, self.url)
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
for search_query in list_searches:
|
||||
connection.request("GET", search_query)
|
||||
response = connection.getresponse()
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
response = retrieve_url(self.url + search_query)
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
connection.close()
|
||||
return
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#VERSION: 2.00
|
||||
#VERSION: 2.01
|
||||
#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
|
||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
||||
|
||||
|
@ -27,9 +27,8 @@
|
|||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from html.parser import HTMLParser
|
||||
from http.client import HTTPConnection as http
|
||||
from novaprinter import prettyPrinter
|
||||
from helpers import download_file
|
||||
from helpers import download_file, retrieve_url
|
||||
|
||||
class mininova(object):
|
||||
""" Search engine class """
|
||||
|
@ -123,26 +122,19 @@ class mininova(object):
|
|||
|
||||
def search(self, what, cat="all"):
|
||||
""" Performs search """
|
||||
connection = http("www.mininova.org")
|
||||
query = "/".join((self.url, "search", what, self.supported_categories[cat], "seeds"))
|
||||
|
||||
query = "/".join(("/search", what, self.supported_categories[cat], "seeds"))
|
||||
|
||||
connection.request("GET", query)
|
||||
response = connection.getresponse()
|
||||
if response.status != 200:
|
||||
return
|
||||
response = retrieve_url(query)
|
||||
|
||||
list_searches = []
|
||||
parser = self.MyHtmlParseWithBlackJack(list_searches, self.url)
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
parser.next_queries = False
|
||||
for search_query in list_searches:
|
||||
connection.request("GET", search_query)
|
||||
response = connection.getresponse()
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
response = retrieve_url(self.url + search_query)
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
connection.close()
|
||||
return
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#VERSION: 2.13
|
||||
#VERSION: 2.14
|
||||
#AUTHORS: Fabien Devaux (fab@gnux.info)
|
||||
#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
|
||||
# Arthur (custparasite@gmx.se)
|
||||
|
@ -29,10 +29,9 @@
|
|||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from html.parser import HTMLParser
|
||||
from http.client import HTTPSConnection as https
|
||||
#qBt
|
||||
from novaprinter import prettyPrinter
|
||||
from helpers import download_file
|
||||
from helpers import download_file, retrieve_url
|
||||
|
||||
class piratebay(object):
|
||||
""" Search engine class """
|
||||
|
@ -157,28 +156,21 @@ class piratebay(object):
|
|||
|
||||
def search(self, what, cat='all'):
|
||||
""" Performs search """
|
||||
connection = https("thepiratebay.se")
|
||||
|
||||
#prepare query. 7 is filtering by seeders
|
||||
cat = cat.lower()
|
||||
query = "/".join(("/search", what, "0", "7", self.supported_categories[cat]))
|
||||
query = "/".join((self.url, "search", what, "0", "7", self.supported_categories[cat]))
|
||||
|
||||
connection.request("GET", query)
|
||||
response = connection.getresponse()
|
||||
if response.status != 200:
|
||||
return
|
||||
response = retrieve_url(query)
|
||||
|
||||
list_searches = []
|
||||
parser = self.MyHtmlParseWithBlackJack(list_searches, self.url)
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
parser.add_query = False
|
||||
for search_query in list_searches:
|
||||
connection.request("GET", search_query)
|
||||
response = connection.getresponse()
|
||||
parser.feed(response.read().decode('utf-8'))
|
||||
response = retrieve_url(self.url + search_query)
|
||||
parser.feed(response)
|
||||
parser.close()
|
||||
|
||||
connection.close()
|
||||
return
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
btdigg: 1.30
|
||||
demonoid: 1.1
|
||||
extratorrent: 2.02
|
||||
demonoid: 1.2
|
||||
extratorrent: 2.03
|
||||
kickasstorrents: 1.28
|
||||
legittorrents: 2.00
|
||||
mininova: 2.00
|
||||
piratebay: 2.13
|
||||
mininova: 2.01
|
||||
piratebay: 2.14
|
||||
torrentreactor: 1.40
|
||||
torrentz: 2.17
|
||||
|
|
Loading…
Reference in a new issue