openlp/openlp/plugins/bibles/lib/http.py

621 lines
26 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
2011-12-27 10:33:55 +00:00
# Copyright (c) 2008-2012 Raoul Snyman #
# Portions copyright (c) 2008-2012 Tim Bentley, Gerald Britton, Jonathan #
2011-05-26 16:25:54 +00:00
# Corwin, Michael Gorven, Scott Guerrieri, Matthias Hub, Meinert Jordan, #
2011-05-26 17:11:22 +00:00
# Armin Köhler, Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias #
2011-06-12 16:02:52 +00:00
# Põldaru, Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
2011-06-12 15:41:01 +00:00
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Frode Woldsund #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
2011-01-10 01:46:47 +00:00
"""
The :mod:`http` module enables OpenLP to retrieve scripture from bible
websites.
"""
2009-09-25 00:43:42 +00:00
import logging
2010-03-26 20:50:55 +00:00
import re
import socket
2010-07-24 14:03:04 +00:00
import urllib
2010-07-27 12:54:26 +00:00
from HTMLParser import HTMLParseError
2011-02-26 00:34:46 +00:00
from BeautifulSoup import BeautifulSoup, NavigableString, Tag
2011-01-01 10:33:14 +00:00
from openlp.core.lib import Receiver, translate
from openlp.core.lib.ui import critical_error_message_box
2011-07-07 18:03:12 +00:00
from openlp.core.utils import get_web_page
2010-10-27 17:42:10 +00:00
from openlp.plugins.bibles.lib import SearchResults
from openlp.plugins.bibles.lib.db import BibleDB, BiblesResourcesDB, \
2011-04-14 20:18:23 +00:00
Book
log = logging.getLogger(__name__)
2010-07-29 14:36:02 +00:00
class BGExtract(object):
"""
Extract verses from BibleGateway
"""
def __init__(self, proxyurl=None):
log.debug(u'BGExtract.init("%s")', proxyurl)
self.proxyurl = proxyurl
socket.setdefaulttimeout(30)
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode Bibles via the BibleGateway website.
2009-09-21 17:56:36 +00:00
``version``
The version of the Bible like 31 for New International version.
2009-09-21 17:56:36 +00:00
``book_name``
2010-10-01 14:18:15 +00:00
Name of the Book.
2009-09-21 17:56:36 +00:00
``chapter``
2010-10-01 14:18:15 +00:00
Chapter number.
"""
log.debug(u'BGExtract.get_bible_chapter("%s", "%s", "%s")', version,
book_name, chapter)
url_book_name = urllib.quote(book_name.encode("utf-8"))
url_params = u'search=%s+%s&version=%s' % (url_book_name, chapter,
version)
2010-11-16 03:31:17 +00:00
cleaner = [(re.compile('&nbsp;|<br />|\'\+\''), lambda match: '')]
2011-01-10 01:46:47 +00:00
soup = get_soup_for_bible_ref(
2011-01-12 19:12:30 +00:00
u'http://www.biblegateway.com/passage/?%s' % url_params,
2011-01-31 01:55:25 +00:00
pre_parse_regex=r'<meta name.*?/>', pre_parse_substitute='',
cleaner=cleaner)
2011-01-10 01:46:47 +00:00
if not soup:
return None
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
2010-07-24 23:12:36 +00:00
footnotes = soup.findAll(u'sup', u'footnote')
2011-01-05 19:48:01 +00:00
if footnotes:
2011-10-03 20:26:51 +00:00
for footnote in footnotes:
footnote.extract()
2011-01-08 18:50:06 +00:00
crossrefs = soup.findAll(u'sup', u'xref')
if crossrefs:
2011-10-03 20:26:51 +00:00
for crossref in crossrefs:
crossref.extract()
2011-02-26 00:34:46 +00:00
headings = soup.findAll(u'h5')
if headings:
2011-10-03 20:26:51 +00:00
for heading in headings:
heading.extract()
chapter_notes = soup.findAll('div', 'footnotes')
if chapter_notes:
log.debug('Found chapter notes')
for note in chapter_notes:
note.extract()
note_comments = soup.findAll(text=u'end of footnotes')
if note_comments:
for comment in note_comments:
comment.extract()
2010-07-24 23:12:36 +00:00
cleanup = [(re.compile('\s+'), lambda match: ' ')]
verses = BeautifulSoup(str(soup), markupMassage=cleanup)
2010-07-23 02:19:35 +00:00
verse_list = {}
2011-03-14 19:26:38 +00:00
# Cater for inconsistent mark up in the first verse of a chapter.
first_verse = verses.find(u'versenum')
2011-05-31 07:26:44 +00:00
if first_verse and len(first_verse.contents):
2011-03-14 19:26:38 +00:00
verse_list[1] = unicode(first_verse.contents[0])
2011-02-26 00:34:46 +00:00
for verse in verses(u'sup', u'versenum'):
2011-05-31 07:31:38 +00:00
raw_verse_num = verse.next
2011-02-24 19:13:51 +00:00
clean_verse_num = 0
# Not all verses exist in all translations and may or may not be
2011-02-25 17:05:01 +00:00
# represented by a verse number. If they are not fine, if they are
# it will probably be in a format that breaks int(). We will then
2011-02-24 19:13:51 +00:00
# have no idea what garbage may be sucked in to the verse text so
# if we do not get a clean int() then ignore the verse completely.
try:
clean_verse_num = int(str(raw_verse_num))
except ValueError:
log.warn(u'Illegal verse number in %s %s %s:%s',
version, book_name, chapter, unicode(raw_verse_num))
2011-02-24 19:13:51 +00:00
if clean_verse_num:
2011-02-26 00:34:46 +00:00
verse_text = raw_verse_num.next
part = raw_verse_num.next.next
while not (isinstance(part, Tag) and part.attrMap and
part.attrMap[u'class'] == u'versenum'):
# While we are still in the same verse grab all the text.
if isinstance(part, NavigableString):
verse_text = verse_text + part
if isinstance(part.next, Tag) and part.next.name == u'div':
# Run out of verses so stop.
break
2011-03-24 19:04:02 +00:00
part = part.next
2011-02-26 00:34:46 +00:00
verse_list[clean_verse_num] = unicode(verse_text)
if not verse_list:
log.debug(u'No content found in the BibleGateway response.')
send_error_message(u'parse')
return None
return SearchResults(book_name, chapter, verse_list)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contaions from BibleGateway website.
``version``
The version of the Bible like NIV for New International Version
"""
log.debug(u'BGExtract.get_books_from_http("%s")', version)
url_params = urllib.urlencode(
{u'action': 'getVersionInfo', u'vid': u'%s' % version})
reference_url = u'http://www.biblegateway.com/versions/?%s#books' % \
url_params
page = get_web_page(reference_url)
if not page:
send_error_message(u'download')
return None
page_source = page.read()
try:
page_source = unicode(page_source, u'utf8')
except UnicodeDecodeError:
page_source = unicode(page_source, u'cp1251')
page_source_temp = re.search(u'<table .*?class="infotable".*?>.*?'\
u'</table>', page_source, re.DOTALL)
if page_source_temp:
soup = page_source_temp.group(0)
else:
soup = None
try:
soup = BeautifulSoup(soup)
except HTMLParseError:
log.error(u'BeautifulSoup could not parse the Bible page.')
send_error_message(u'parse')
return None
if not soup:
send_error_message(u'parse')
return None
Receiver.send_message(u'openlp_process_events')
content = soup.find(u'table', {u'class': u'infotable'})
content = content.findAll(u'tr')
if not content:
log.error(u'No books found in the Biblegateway response.')
send_error_message(u'parse')
return None
books = []
for book in content:
book = book.find(u'td')
if book:
books.append(book.contents[0])
return books
2010-07-29 14:36:02 +00:00
class BSExtract(object):
"""
Extract verses from Bibleserver.com
"""
2010-12-21 19:39:35 +00:00
def __init__(self, proxyurl=None):
log.debug(u'BSExtract.init("%s")', proxyurl)
self.proxyurl = proxyurl
socket.setdefaulttimeout(30)
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode bibles via Bibleserver mobile website
``version``
The version of the bible like NIV for New International Version
``book_name``
Text name of bible book e.g. Genesis, 1. John, 1John or Offenbarung
``chapter``
Chapter number
"""
log.debug(u'BSExtract.get_bible_chapter("%s", "%s", "%s")', version,
book_name, chapter)
url_version = urllib.quote(version.encode("utf-8"))
url_book_name = urllib.quote(book_name.encode("utf-8"))
chapter_url = u'http://m.bibleserver.com/text/%s/%s%d' % \
(url_version, url_book_name, chapter)
header = (u'Accept-Language', u'en')
soup = get_soup_for_bible_ref(chapter_url, header)
2011-01-10 01:46:47 +00:00
if not soup:
2011-01-01 10:33:14 +00:00
return None
Receiver.send_message(u'openlp_process_events')
2011-01-12 15:31:32 +00:00
content = soup.find(u'div', u'content')
2011-01-10 01:46:47 +00:00
if not content:
log.error(u'No verses found in the Bibleserver response.')
2011-01-05 19:48:01 +00:00
send_error_message(u'parse')
2011-01-10 01:46:47 +00:00
return None
2011-01-12 15:31:32 +00:00
content = content.find(u'div').findAll(u'div')
verse_number = re.compile(r'v(\d{1,2})(\d{3})(\d{3}) verse.*')
verses = {}
for verse in content:
Receiver.send_message(u'openlp_process_events')
versenumber = int(verse_number.sub(r'\3', verse[u'class']))
verses[versenumber] = verse.contents[1].rstrip(u'\n')
return SearchResults(book_name, chapter, verses)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contains from Bibleserver mobile
website.
``version``
The version of the Bible like NIV for New International Version
"""
log.debug(u'BSExtract.get_books_from_http("%s")', version)
urlversion = urllib.quote(version.encode("utf-8"))
chapter_url = u'http://m.bibleserver.com/overlay/selectBook?'\
'translation=%s' % (urlversion)
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
return None
content = soup.find(u'ul')
if not content:
log.error(u'No books found in the Bibleserver response.')
send_error_message(u'parse')
return None
content = content.findAll(u'li')
return [
2011-04-26 18:39:08 +00:00
book.contents[0].contents[0] for book in content
]
2010-07-29 14:36:02 +00:00
class CWExtract(object):
"""
Extract verses from CrossWalk/BibleStudyTools
"""
def __init__(self, proxyurl=None):
log.debug(u'CWExtract.init("%s")', proxyurl)
self.proxyurl = proxyurl
socket.setdefaulttimeout(30)
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode bibles via the Crosswalk website
2009-09-21 17:56:36 +00:00
``version``
The version of the Bible like niv for New International Version
2009-09-21 17:56:36 +00:00
``book_name``
2009-09-21 17:56:36 +00:00
Text name of in english e.g. 'gen' for Genesis
``chapter``
Chapter number
"""
log.debug(u'CWExtract.get_bible_chapter("%s", "%s", "%s")', version,
book_name, chapter)
url_book_name = book_name.replace(u' ', u'-')
url_book_name = url_book_name.lower()
url_book_name = urllib.quote(url_book_name.encode("utf-8"))
2010-02-06 10:42:47 +00:00
chapter_url = u'http://www.biblestudytools.com/%s/%s/%s.html' % \
(version, url_book_name, chapter)
2011-01-10 01:46:47 +00:00
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
2011-01-01 10:33:14 +00:00
return None
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
html_verses = soup.findAll(u'span', u'versetext')
if not html_verses:
log.error(u'No verses found in the CrossWalk response.')
2011-01-05 19:48:01 +00:00
send_error_message(u'parse')
return None
verses = {}
2010-03-26 20:50:55 +00:00
reduce_spaces = re.compile(r'[ ]{2,}')
fix_punctuation = re.compile(r'[ ]+([.,;])')
for verse in html_verses:
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
verse_number = int(verse.contents[0].contents[0])
verse_text = u''
for part in verse.contents:
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
2010-03-26 20:50:55 +00:00
if isinstance(part, NavigableString):
verse_text = verse_text + part
2010-03-26 20:50:55 +00:00
elif part and part.attrMap and \
(part.attrMap[u'class'] == u'WordsOfChrist' or \
2010-10-01 14:18:15 +00:00
part.attrMap[u'class'] == u'strongs'):
for subpart in part.contents:
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
2010-03-26 20:50:55 +00:00
if isinstance(subpart, NavigableString):
verse_text = verse_text + subpart
elif subpart and subpart.attrMap and \
2010-10-01 14:18:15 +00:00
subpart.attrMap[u'class'] == u'strongs':
for subsub in subpart.contents:
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
if isinstance(subsub, NavigableString):
verse_text = verse_text + subsub
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
# Fix up leading and trailing spaces, multiple spaces, and spaces
# between text and , and .
verse_text = verse_text.strip(u'\n\r\t ')
verse_text = reduce_spaces.sub(u' ', verse_text)
verse_text = fix_punctuation.sub(r'\1', verse_text)
verses[verse_number] = verse_text
return SearchResults(book_name, chapter, verses)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contain from the Crosswalk website.
``version``
The version of the bible like NIV for New International Version
"""
log.debug(u'CWExtract.get_books_from_http("%s")', version)
chapter_url = u'http://www.biblestudytools.com/%s/'\
% (version)
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
return None
content = soup.find(u'div', {u'class': u'Body'})
content = content.find(u'ul', {u'class': u'parent'})
if not content:
log.error(u'No books found in the Crosswalk response.')
send_error_message(u'parse')
return None
content = content.findAll(u'li')
books = []
for book in content:
book = book.find(u'a')
books.append(book.contents[0])
return books
class HTTPBible(BibleDB):
2012-04-03 17:58:42 +00:00
log.info(u'%s HTTPBible loaded', __name__)
def __init__(self, parent, **kwargs):
"""
Finds all the bibles defined for the system
2009-09-21 17:56:36 +00:00
Creates an Interface Object for each bible containing connection
information
Throws Exception if no Bibles are found.
Init confirms the bible exists and stores the database path.
"""
BibleDB.__init__(self, parent, **kwargs)
self.download_source = kwargs[u'download_source']
self.download_name = kwargs[u'download_name']
2011-02-25 17:05:01 +00:00
# TODO: Clean up proxy stuff. We probably want one global proxy per
2011-01-10 01:46:47 +00:00
# connection type (HTTP and HTTPS) at most.
2010-12-06 19:30:04 +00:00
self.proxy_server = None
self.proxy_username = None
self.proxy_password = None
if u'path' in kwargs:
self.path = kwargs[u'path']
if u'proxy_server' in kwargs:
self.proxy_server = kwargs[u'proxy_server']
if u'proxy_username' in kwargs:
self.proxy_username = kwargs[u'proxy_username']
if u'proxy_password' in kwargs:
self.proxy_password = kwargs[u'proxy_password']
def do_import(self, bible_name=None):
"""
Run the import. This method overrides the parent class method. Returns
``True`` on success, ``False`` on failure.
"""
self.wizard.progressBar.setMaximum(68)
self.wizard.incrementProgressBar(unicode(translate(
'BiblesPlugin.HTTPBible',
'Registering Bible and loading books...')))
self.save_meta(u'download source', self.download_source)
self.save_meta(u'download name', self.download_name)
if self.proxy_server:
self.save_meta(u'proxy server', self.proxy_server)
if self.proxy_username:
# Store the proxy userid.
self.save_meta(u'proxy username', self.proxy_username)
if self.proxy_password:
# Store the proxy password.
self.save_meta(u'proxy password', self.proxy_password)
if self.download_source.lower() == u'crosswalk':
handler = CWExtract(self.proxy_server)
elif self.download_source.lower() == u'biblegateway':
handler = BGExtract(self.proxy_server)
elif self.download_source.lower() == u'bibleserver':
handler = BSExtract(self.proxy_server)
books = handler.get_books_from_http(self.download_name)
if not books:
log.exception(u'Importing books from %s - download name: "%s" '\
'failed' % (self.download_source, self.download_name))
return False
self.wizard.progressBar.setMaximum(len(books)+2)
self.wizard.incrementProgressBar(unicode(translate(
'BiblesPlugin.HTTPBible', 'Registering Language...')))
bible = BiblesResourcesDB.get_webbible(self.download_name,
self.download_source.lower())
if bible[u'language_id']:
language_id = bible[u'language_id']
self.save_meta(u'language_id', language_id)
else:
language_id = self.get_language(bible_name)
if not language_id:
log.exception(u'Importing books from %s " '\
'failed' % self.filename)
return False
for book in books:
2011-05-03 20:34:39 +00:00
if self.stop_import_flag:
break
self.wizard.incrementProgressBar(unicode(translate(
2011-05-03 20:34:39 +00:00
'BiblesPlugin.HTTPBible', 'Importing %s...',
'Importing <book name>...')) % book)
book_ref_id = self.get_book_ref_id_by_name(book, len(books),
language_id)
if not book_ref_id:
log.exception(u'Importing books from %s - download name: "%s" '\
'failed' % (self.download_source, self.download_name))
return False
book_details = BiblesResourcesDB.get_book_by_id(book_ref_id)
log.debug(u'Book details: Name:%s; id:%s; testament_id:%s',
book, book_ref_id, book_details[u'testament_id'])
self.create_book(book, book_ref_id, book_details[u'testament_id'])
2011-05-03 20:34:39 +00:00
if self.stop_import_flag:
return False
else:
return True
def get_verses(self, reference_list, show_error=True):
"""
A reimplementation of the ``BibleDB.get_verses`` method, this one is
specifically for web Bibles. It first checks to see if the particular
chapter exists in the DB, and if not it pulls it from the web. If the
chapter DOES exist, it simply pulls the verses from the DB using the
ancestor method.
``reference_list``
This is the list of references the media manager item wants. It is
2010-10-09 06:00:50 +00:00
a list of tuples, with the following format::
(book_reference_id, chapter, start_verse, end_verse)
Therefore, when you are looking for multiple items, simply break
them up into references like this, bundle them into a list. This
function then runs through the list, and returns an amalgamated
2010-10-09 06:48:21 +00:00
list of ``Verse`` objects. For example::
[(u'35', 1, 1, 1), (u'35', 2, 2, 3)]
"""
log.debug(u'HTTPBible.get_verses("%s")', reference_list)
for reference in reference_list:
book_id = reference[0]
db_book = self.get_book_by_book_ref_id(book_id)
if not db_book:
2011-05-18 18:00:40 +00:00
if show_error:
critical_error_message_box(
translate('BiblesPlugin', 'No Book Found'),
translate('BiblesPlugin', 'No matching '
'book could be found in this Bible. Check that you '
'have spelled the name of the book correctly.'))
return []
book = db_book.name
if BibleDB.get_verse_count(self, book_id, reference[1]) == 0:
Receiver.send_message(u'cursor_busy')
search_results = self.get_chapter(book, reference[1])
if search_results and search_results.has_verselist():
## We have found a book of the bible lets check to see
2011-02-25 17:05:01 +00:00
## if it was there. By reusing the returned book name
## we get a correct book. For example it is possible
## to request ac and get Acts back.
book_name = search_results.book
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
# Check to see if book/chapter exists.
db_book = self.get_book(book_name)
2010-07-23 00:01:06 +00:00
self.create_chapter(db_book.id, search_results.chapter,
search_results.verselist)
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
Receiver.send_message(u'cursor_normal')
2010-04-16 07:31:01 +00:00
Receiver.send_message(u'openlp_process_events')
return BibleDB.get_verses(self, reference_list, show_error)
def get_chapter(self, book, chapter):
"""
2010-10-01 14:18:15 +00:00
Receive the request and call the relevant handler methods.
"""
log.debug(u'HTTPBible.get_chapter("%s", "%s")', book, chapter)
log.debug(u'source = %s', self.download_source)
2010-07-27 12:54:26 +00:00
if self.download_source.lower() == u'crosswalk':
2011-01-10 01:46:47 +00:00
handler = CWExtract(self.proxy_server)
elif self.download_source.lower() == u'biblegateway':
2011-01-10 01:46:47 +00:00
handler = BGExtract(self.proxy_server)
elif self.download_source.lower() == u'bibleserver':
2011-01-10 01:46:47 +00:00
handler = BSExtract(self.proxy_server)
return handler.get_bible_chapter(self.download_name, book, chapter)
def get_books(self):
"""
Return the list of books.
"""
log.debug(u'HTTPBible.get_books("%s")', Book.name)
return self.get_all_objects(Book, order_by_ref=Book.id)
def get_chapter_count(self, book):
"""
Return the number of chapters in a particular book.
``book``
The book object to get the chapter count for.
"""
log.debug(u'HTTPBible.get_chapter_count("%s")', book.name)
return BiblesResourcesDB.get_chapter_count(book.book_reference_id)
def get_verse_count(self, book_id, chapter):
"""
Return the number of verses for the specified chapter and book.
``book``
The name of the book.
``chapter``
The chapter whose verses are being counted.
"""
log.debug(u'HTTPBible.get_verse_count("%s", %s)', book_id, chapter)
return BiblesResourcesDB.get_verse_count(book_id, chapter)
2011-01-05 19:48:01 +00:00
2011-01-31 01:55:25 +00:00
def get_soup_for_bible_ref(reference_url, header=None, pre_parse_regex=None,
pre_parse_substitute=None, cleaner=None):
2011-01-10 01:46:47 +00:00
"""
Gets a webpage and returns a parsed and optionally cleaned soup or None.
``reference_url``
The URL to obtain the soup from.
``header``
An optional HTTP header to pass to the bible web server.
2011-01-31 01:55:25 +00:00
``pre_parse_regex``
A regular expression to run on the webpage. Allows manipulation of the
webpage before passing to BeautifulSoup for parsing.
``pre_parse_substitute``
The text to replace any matches to the regular expression with.
2011-01-10 01:46:47 +00:00
``cleaner``
An optional regex to use during webpage parsing.
"""
if not reference_url:
return None
page = get_web_page(reference_url, header, True)
2011-01-10 01:46:47 +00:00
if not page:
send_error_message(u'download')
return None
2011-01-31 01:55:25 +00:00
page_source = page.read()
if pre_parse_regex and pre_parse_substitute is not None:
page_source = re.sub(pre_parse_regex, pre_parse_substitute, page_source)
2011-01-10 01:46:47 +00:00
soup = None
try:
if cleaner:
2011-01-31 01:55:25 +00:00
soup = BeautifulSoup(page_source, markupMassage=cleaner)
2011-01-10 01:46:47 +00:00
else:
2011-01-31 01:55:25 +00:00
soup = BeautifulSoup(page_source)
2011-01-10 01:46:47 +00:00
except HTMLParseError:
log.exception(u'BeautifulSoup could not parse the bible page.')
if not soup:
send_error_message(u'parse')
return None
Receiver.send_message(u'openlp_process_events')
return soup
2011-01-13 17:55:29 +00:00
def send_error_message(error_type):
"""
Send a standard error message informing the user of an issue.
``error_type``
The type of error that occured for the issue.
"""
if error_type == u'download':
critical_error_message_box(
translate('BiblesPlugin.HTTPBible', 'Download Error'),
translate('BiblesPlugin.HTTPBible', 'There was a '
2011-01-05 19:48:01 +00:00
'problem downloading your verse selection. Please check your '
'Internet connection, and if this error continues to occur '
2011-01-15 19:24:50 +00:00
'please consider reporting a bug.'))
2011-01-13 17:55:29 +00:00
elif error_type == u'parse':
critical_error_message_box(
translate('BiblesPlugin.HTTPBible', 'Parse Error'),
translate('BiblesPlugin.HTTPBible', 'There was a '
2011-01-05 19:48:01 +00:00
'problem extracting your verse selection. If this error continues '
2011-01-15 19:24:50 +00:00
'to occur please consider reporting a bug.'))